mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-30 11:31:25 +00:00
Merge branch 'yt-dlp:master' into pr/live-sections
This commit is contained in:
@@ -12,7 +12,7 @@ from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class BoxIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:[^.]+\.)?app\.box\.com/s/(?P<shared_name>[^/?#]+)(?:/file/(?P<id>\d+))?'
|
||||
_VALID_URL = r'https?://(?:[^.]+\.)?(?P<service>app|ent)\.box\.com/s/(?P<shared_name>[^/?#]+)(?:/file/(?P<id>\d+))?'
|
||||
_TESTS = [{
|
||||
'url': 'https://mlssoccer.app.box.com/s/0evd2o3e08l60lr4ygukepvnkord1o1x/file/510727257538',
|
||||
'md5': '1f81b2fd3960f38a40a3b8823e5fcd43',
|
||||
@@ -38,10 +38,22 @@ class BoxIE(InfoExtractor):
|
||||
'uploader_id': '239068974',
|
||||
},
|
||||
'params': {'skip_download': 'dash fragment too small'},
|
||||
}, {
|
||||
'url': 'https://thejacksonlaboratory.ent.box.com/s/2x09dm6vcg6y28o0oox1so4l0t8wzt6l/file/1536173056065',
|
||||
'info_dict': {
|
||||
'id': '1536173056065',
|
||||
'ext': 'mp4',
|
||||
'uploader_id': '18523128264',
|
||||
'uploader': 'Lexi Hennigan',
|
||||
'title': 'iPSC Symposium recording part 1.mp4',
|
||||
'timestamp': 1716228343,
|
||||
'upload_date': '20240520',
|
||||
},
|
||||
'params': {'skip_download': 'dash fragment too small'},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
shared_name, file_id = self._match_valid_url(url).groups()
|
||||
shared_name, file_id, service = self._match_valid_url(url).group('shared_name', 'id', 'service')
|
||||
webpage = self._download_webpage(url, file_id or shared_name)
|
||||
|
||||
if not file_id:
|
||||
@@ -57,14 +69,14 @@ class BoxIE(InfoExtractor):
|
||||
request_token = self._search_json(
|
||||
r'Box\.config\s*=', webpage, 'Box config', file_id)['requestToken']
|
||||
access_token = self._download_json(
|
||||
'https://app.box.com/app-api/enduserapp/elements/tokens', file_id,
|
||||
f'https://{service}.box.com/app-api/enduserapp/elements/tokens', file_id,
|
||||
'Downloading token JSON metadata',
|
||||
data=json.dumps({'fileIDs': [file_id]}).encode(), headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-Request-Token': request_token,
|
||||
'X-Box-EndUser-API': 'sharedName=' + shared_name,
|
||||
})[file_id]['read']
|
||||
shared_link = 'https://app.box.com/s/' + shared_name
|
||||
shared_link = f'https://{service}.box.com/s/{shared_name}'
|
||||
f = self._download_json(
|
||||
'https://api.box.com/2.0/files/' + file_id, file_id,
|
||||
'Downloading file JSON metadata', headers={
|
||||
|
||||
@@ -314,23 +314,11 @@ class SoundcloudBaseIE(InfoExtractor):
|
||||
self.write_debug(f'"{identifier}" is not a requested format, skipping')
|
||||
continue
|
||||
|
||||
stream = None
|
||||
for retry in self.RetryManager(fatal=False):
|
||||
try:
|
||||
stream = self._call_api(
|
||||
format_url, track_id, f'Downloading {identifier} format info JSON',
|
||||
query=query, headers=self._HEADERS)
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, HTTPError) and e.cause.status == 429:
|
||||
self.report_warning(
|
||||
'You have reached the API rate limit, which is ~600 requests per '
|
||||
'10 minutes. Use the --extractor-retries and --retry-sleep options '
|
||||
'to configure an appropriate retry count and wait time', only_once=True)
|
||||
retry.error = e.cause
|
||||
else:
|
||||
self.report_warning(e.msg)
|
||||
# XXX: if not extract_flat, 429 error must be caught where _extract_info_dict is called
|
||||
stream_url = traverse_obj(self._call_api(
|
||||
format_url, track_id, f'Downloading {identifier} format info JSON',
|
||||
query=query, headers=self._HEADERS), ('url', {url_or_none}))
|
||||
|
||||
stream_url = traverse_obj(stream, ('url', {url_or_none}))
|
||||
if invalid_url(stream_url):
|
||||
continue
|
||||
format_urls.add(stream_url)
|
||||
@@ -647,7 +635,17 @@ class SoundcloudIE(SoundcloudBaseIE):
|
||||
info = self._call_api(
|
||||
info_json_url, full_title, 'Downloading info JSON', query=query, headers=self._HEADERS)
|
||||
|
||||
return self._extract_info_dict(info, full_title, token)
|
||||
for retry in self.RetryManager():
|
||||
try:
|
||||
return self._extract_info_dict(info, full_title, token)
|
||||
except ExtractorError as e:
|
||||
if not isinstance(e.cause, HTTPError) or not e.cause.status == 429:
|
||||
raise
|
||||
self.report_warning(
|
||||
'You have reached the API rate limit, which is ~600 requests per '
|
||||
'10 minutes. Use the --extractor-retries and --retry-sleep options '
|
||||
'to configure an appropriate retry count and wait time', only_once=True)
|
||||
retry.error = e.cause
|
||||
|
||||
|
||||
class SoundcloudPlaylistBaseIE(SoundcloudBaseIE):
|
||||
|
||||
@@ -1458,9 +1458,11 @@ class TikTokLiveIE(TikTokBaseIE):
|
||||
|
||||
if webpage:
|
||||
data = self._get_sigi_state(webpage, uploader or room_id)
|
||||
room_id = (traverse_obj(data, ('UserModule', 'users', ..., 'roomId', {str_or_none}), get_all=False)
|
||||
or self._search_regex(r'snssdk\d*://live\?room_id=(\d+)', webpage, 'room ID', default=None)
|
||||
or room_id)
|
||||
room_id = (
|
||||
traverse_obj(data, ((
|
||||
('LiveRoom', 'liveRoomUserInfo', 'user'),
|
||||
('UserModule', 'users', ...)), 'roomId', {str}, any))
|
||||
or self._search_regex(r'snssdk\d*://live\?room_id=(\d+)', webpage, 'room ID', default=room_id))
|
||||
uploader = uploader or traverse_obj(
|
||||
data, ('LiveRoom', 'liveRoomUserInfo', 'user', 'uniqueId'),
|
||||
('UserModule', 'users', ..., 'uniqueId'), get_all=False, expected_type=str)
|
||||
|
||||
@@ -96,7 +96,7 @@ class TV5MondePlusIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
webpage = self._download_webpage(url, display_id, impersonate=True)
|
||||
|
||||
if ">Ce programme n'est malheureusement pas disponible pour votre zone géographique.<" in webpage:
|
||||
self.raise_geo_restricted(countries=['FR'])
|
||||
|
||||
@@ -3159,7 +3159,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
|
||||
def _extract_n_function_name(self, jscode):
|
||||
funcname, idx = self._search_regex(
|
||||
r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z0-9$]+)(?:\[(?P<idx>\d+)\])?\([a-zA-Z0-9]\)',
|
||||
r'''(?x)(?:\.get\("n"\)\)&&\(b=|b=String\.fromCharCode\(110\),c=a\.get\(b\)\)&&\(c=)
|
||||
(?P<nfunc>[a-zA-Z0-9$]+)(?:\[(?P<idx>\d+)\])?\([a-zA-Z0-9]\)''',
|
||||
jscode, 'Initial JS player n function name', group=('nfunc', 'idx'))
|
||||
if not idx:
|
||||
return funcname
|
||||
@@ -3170,7 +3171,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
|
||||
def _extract_n_function_code(self, video_id, player_url):
|
||||
player_id = self._extract_player_info(player_url)
|
||||
func_code = self.cache.load('youtube-nsig', player_id, min_ver='2022.09.1')
|
||||
func_code = self.cache.load('youtube-nsig', player_id, min_ver='2024.07.09')
|
||||
jscode = func_code or self._load_player(video_id, player_url)
|
||||
jsi = JSInterpreter(jscode)
|
||||
|
||||
@@ -3179,17 +3180,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
|
||||
func_name = self._extract_n_function_name(jscode)
|
||||
|
||||
# For redundancy
|
||||
func_code = self._search_regex(
|
||||
rf'''(?xs){func_name}\s*=\s*function\s*\((?P<var>[\w$]+)\)\s*
|
||||
# NB: The end of the regex is intentionally kept strict
|
||||
{{(?P<code>.+?}}\s*return\ [\w$]+.join\(""\))}};''',
|
||||
jscode, 'nsig function', group=('var', 'code'), default=None)
|
||||
if func_code:
|
||||
func_code = ([func_code[0]], func_code[1])
|
||||
else:
|
||||
self.write_debug('Extracting nsig function with jsinterp')
|
||||
func_code = jsi.extract_function_code(func_name)
|
||||
func_code = jsi.extract_function_code(func_name)
|
||||
|
||||
self.cache.store('youtube-nsig', player_id, func_code)
|
||||
return jsi, player_id, func_code
|
||||
|
||||
Reference in New Issue
Block a user