mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-26 17:41:46 +00:00
Merge remote-tracking branch 'upstream/master' into boomplay
This commit is contained in:
@@ -154,7 +154,6 @@ from .utils import (
|
||||
try_get,
|
||||
url_basename,
|
||||
variadic,
|
||||
version_tuple,
|
||||
windows_enable_vt_mode,
|
||||
write_json_file,
|
||||
write_string,
|
||||
@@ -251,7 +250,7 @@ class YoutubeDL:
|
||||
format_sort_force: Force the given format_sort. see "Sorting Formats"
|
||||
for more details.
|
||||
prefer_free_formats: Whether to prefer video formats with free containers
|
||||
over non-free ones of same quality.
|
||||
over non-free ones of the same quality.
|
||||
allow_multiple_video_streams: Allow multiple video streams to be merged
|
||||
into a single file
|
||||
allow_multiple_audio_streams: Allow multiple audio streams to be merged
|
||||
@@ -285,7 +284,7 @@ class YoutubeDL:
|
||||
rejecttitle: Reject downloads for matching titles.
|
||||
logger: Log messages to a logging.Logger instance.
|
||||
logtostderr: Print everything to stderr instead of stdout.
|
||||
consoletitle: Display progress in console window's titlebar.
|
||||
consoletitle: Display progress in the console window's titlebar.
|
||||
writedescription: Write the video description to a .description file
|
||||
writeinfojson: Write the video description to a .info.json file
|
||||
clean_infojson: Remove internal metadata from the infojson
|
||||
@@ -513,7 +512,7 @@ class YoutubeDL:
|
||||
The following options are used by the extractors:
|
||||
extractor_retries: Number of times to retry for known errors (default: 3)
|
||||
dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
|
||||
hls_split_discontinuity: Split HLS playlists to different formats at
|
||||
hls_split_discontinuity: Split HLS playlists into different formats at
|
||||
discontinuities such as ad breaks (default: False)
|
||||
extractor_args: A dictionary of arguments to be passed to the extractors.
|
||||
See "EXTRACTOR ARGUMENTS" for details.
|
||||
@@ -553,7 +552,7 @@ class YoutubeDL:
|
||||
include_ads: - Doesn't work
|
||||
Download ads as well
|
||||
call_home: - Not implemented
|
||||
Boolean, true iff we are allowed to contact the
|
||||
Boolean, true if we are allowed to contact the
|
||||
yt-dlp servers for debugging.
|
||||
post_hooks: - Register a custom postprocessor
|
||||
A list of functions that get called as the final step
|
||||
@@ -4089,17 +4088,6 @@ class YoutubeDL:
|
||||
if plugin_dirs:
|
||||
write_debug(f'Plugin directories: {plugin_dirs}')
|
||||
|
||||
# Not implemented
|
||||
if False and self.params.get('call_home'):
|
||||
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
|
||||
write_debug(f'Public IP address: {ipaddr}')
|
||||
latest_version = self.urlopen(
|
||||
'https://yt-dl.org/latest/version').read().decode()
|
||||
if version_tuple(latest_version) > version_tuple(__version__):
|
||||
self.report_warning(
|
||||
f'You are using an outdated version (newest version: {latest_version})! '
|
||||
'See https://yt-dl.org/update if you need help updating.')
|
||||
|
||||
@functools.cached_property
|
||||
def proxies(self):
|
||||
"""Global proxy configuration"""
|
||||
|
||||
@@ -34,6 +34,7 @@ from .postprocessor import (
|
||||
)
|
||||
from .update import Updater
|
||||
from .utils import (
|
||||
Config,
|
||||
NO_DEFAULT,
|
||||
POSTPROCESS_WHEN,
|
||||
DateRange,
|
||||
@@ -967,6 +968,10 @@ def _real_main(argv=None):
|
||||
|
||||
parser, opts, all_urls, ydl_opts = parse_options(argv)
|
||||
|
||||
# HACK: Set the plugin dirs early on
|
||||
# TODO(coletdjnz): remove when plugin globals system is implemented
|
||||
Config._plugin_dirs = opts.plugin_dirs
|
||||
|
||||
# Dump user agent
|
||||
if opts.dump_user_agent:
|
||||
ua = traverse_obj(opts.headers, 'User-Agent', casesense=False, default=std_headers['User-Agent'])
|
||||
|
||||
@@ -373,7 +373,10 @@ from .ccc import (
|
||||
)
|
||||
from .ccma import CCMAIE
|
||||
from .cctv import CCTVIE
|
||||
from .cda import CDAIE
|
||||
from .cda import (
|
||||
CDAIE,
|
||||
CDAFolderIE,
|
||||
)
|
||||
from .cellebrite import CellebriteIE
|
||||
from .ceskatelevize import CeskaTelevizeIE
|
||||
from .cgtn import CGTNIE
|
||||
|
||||
@@ -1355,6 +1355,7 @@ MSO_INFO = {
|
||||
class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should end with BaseIE/InfoExtractor
|
||||
_SERVICE_PROVIDER_TEMPLATE = 'https://sp.auth.adobe.com/adobe-services/%s'
|
||||
_USER_AGENT = 'Mozilla/5.0 (X11; Linux i686; rv:47.0) Gecko/20100101 Firefox/47.0'
|
||||
_MODERN_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; rv:131.0) Gecko/20100101 Firefox/131.0'
|
||||
_MVPD_CACHE = 'ap-mvpd'
|
||||
|
||||
_DOWNLOADING_LOGIN_PAGE = 'Downloading Provider Login Page'
|
||||
@@ -1454,7 +1455,11 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
|
||||
'no_iframe': 'false',
|
||||
'domain_name': 'adobe.com',
|
||||
'redirect_url': url,
|
||||
})
|
||||
}, headers={
|
||||
# yt-dlp's default user-agent is usually too old for Comcast_SSO
|
||||
# See: https://github.com/yt-dlp/yt-dlp/issues/10848
|
||||
'User-Agent': self._MODERN_USER_AGENT,
|
||||
} if mso_id == 'Comcast_SSO' else None)
|
||||
elif not self._cookies_passed:
|
||||
raise_mvpd_required()
|
||||
|
||||
|
||||
@@ -33,21 +33,21 @@ class AfreecaTVBaseIE(InfoExtractor):
|
||||
}
|
||||
|
||||
response = self._download_json(
|
||||
'https://login.afreecatv.com/app/LoginAction.php', None,
|
||||
'https://login.sooplive.co.kr/app/LoginAction.php', None,
|
||||
'Logging in', data=urlencode_postdata(login_form))
|
||||
|
||||
_ERRORS = {
|
||||
-4: 'Your account has been suspended due to a violation of our terms and policies.',
|
||||
-5: 'https://member.afreecatv.com/app/user_delete_progress.php',
|
||||
-6: 'https://login.afreecatv.com/membership/changeMember.php',
|
||||
-8: "Hello! AfreecaTV here.\nThe username you have entered belongs to \n an account that requires a legal guardian's consent. \nIf you wish to use our services without restriction, \nplease make sure to go through the necessary verification process.",
|
||||
-9: 'https://member.afreecatv.com/app/pop_login_block.php',
|
||||
-11: 'https://login.afreecatv.com/afreeca/second_login.php',
|
||||
-12: 'https://member.afreecatv.com/app/user_security.php',
|
||||
-5: 'https://member.sooplive.co.kr/app/user_delete_progress.php',
|
||||
-6: 'https://login.sooplive.co.kr/membership/changeMember.php',
|
||||
-8: "Hello! Soop here.\nThe username you have entered belongs to \n an account that requires a legal guardian's consent. \nIf you wish to use our services without restriction, \nplease make sure to go through the necessary verification process.",
|
||||
-9: 'https://member.sooplive.co.kr/app/pop_login_block.php',
|
||||
-11: 'https://login.sooplive.co.kr/afreeca/second_login.php',
|
||||
-12: 'https://member.sooplive.co.kr/app/user_security.php',
|
||||
0: 'The username does not exist or you have entered the wrong password.',
|
||||
-1: 'The username does not exist or you have entered the wrong password.',
|
||||
-3: 'You have entered your username/password incorrectly.',
|
||||
-7: 'You cannot use your Global AfreecaTV account to access Korean AfreecaTV.',
|
||||
-7: 'You cannot use your Global Soop account to access Korean Soop.',
|
||||
-10: 'Sorry for the inconvenience. \nYour account has been blocked due to an unauthorized access. \nPlease contact our Help Center for assistance.',
|
||||
-32008: 'You have failed to log in. Please contact our Help Center.',
|
||||
}
|
||||
@@ -61,76 +61,40 @@ class AfreecaTVBaseIE(InfoExtractor):
|
||||
|
||||
def _call_api(self, endpoint, display_id, data=None, headers=None, query=None):
|
||||
return self._download_json(Request(
|
||||
f'https://api.m.afreecatv.com/{endpoint}',
|
||||
f'https://api.m.sooplive.co.kr/{endpoint}',
|
||||
data=data, headers=headers, query=query,
|
||||
extensions={'legacy_ssl': True}), display_id,
|
||||
'Downloading API JSON', 'Unable to download API JSON')
|
||||
|
||||
|
||||
class AfreecaTVIE(AfreecaTVBaseIE):
|
||||
IE_NAME = 'afreecatv'
|
||||
IE_DESC = 'afreecatv.com'
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?:
|
||||
(?:(?:live|afbbs|www)\.)?afreeca(?:tv)?\.com(?::\d+)?
|
||||
(?:
|
||||
/app/(?:index|read_ucc_bbs)\.cgi|
|
||||
/player/[Pp]layer\.(?:swf|html)
|
||||
)\?.*?\bnTitleNo=|
|
||||
vod\.afreecatv\.com/(PLAYER/STATION|player)/
|
||||
)
|
||||
(?P<id>\d+)/?(?:$|[?#&])
|
||||
'''
|
||||
IE_NAME = 'soop'
|
||||
IE_DESC = 'sooplive.co.kr'
|
||||
_VALID_URL = r'https?://vod\.(?:sooplive\.co\.kr|afreecatv\.com)/(?:PLAYER/STATION|player)/(?P<id>\d+)/?(?:$|[?#&])'
|
||||
_TESTS = [{
|
||||
'url': 'http://live.afreecatv.com:8079/app/index.cgi?szType=read_ucc_bbs&szBjId=dailyapril&nStationNo=16711924&nBbsNo=18605867&nTitleNo=36164052&szSkin=',
|
||||
'md5': 'f72c89fe7ecc14c1b5ce506c4996046e',
|
||||
'url': 'https://vod.sooplive.co.kr/player/96753363',
|
||||
'info_dict': {
|
||||
'id': '36164052',
|
||||
'id': '20230108_9FF5BEE1_244432674_1',
|
||||
'ext': 'mp4',
|
||||
'title': '데일리 에이프릴 요정들의 시상식!',
|
||||
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
|
||||
'uploader': 'dailyapril',
|
||||
'uploader_id': 'dailyapril',
|
||||
'upload_date': '20160503',
|
||||
'uploader_id': 'rlantnghks',
|
||||
'uploader': '페이즈으',
|
||||
'duration': 10840,
|
||||
'thumbnail': r're:https?://videoimg\.sooplive\.co/.kr/.+',
|
||||
'upload_date': '20230108',
|
||||
'timestamp': 1673218805,
|
||||
'title': '젠지 페이즈',
|
||||
},
|
||||
'skip': 'Video is gone',
|
||||
}, {
|
||||
'url': 'http://afbbs.afreecatv.com:8080/app/read_ucc_bbs.cgi?nStationNo=16711924&nTitleNo=36153164&szBjId=dailyapril&nBbsNo=18605867',
|
||||
'info_dict': {
|
||||
'id': '36153164',
|
||||
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
|
||||
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
|
||||
'uploader': 'dailyapril',
|
||||
'uploader_id': 'dailyapril',
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'playlist_count': 2,
|
||||
'playlist': [{
|
||||
'md5': 'd8b7c174568da61d774ef0203159bf97',
|
||||
'info_dict': {
|
||||
'id': '36153164_1',
|
||||
'ext': 'mp4',
|
||||
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
|
||||
'upload_date': '20160502',
|
||||
},
|
||||
}, {
|
||||
'md5': '58f2ce7f6044e34439ab2d50612ab02b',
|
||||
'info_dict': {
|
||||
'id': '36153164_2',
|
||||
'ext': 'mp4',
|
||||
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
|
||||
'upload_date': '20160502',
|
||||
},
|
||||
}],
|
||||
'skip': 'Video is gone',
|
||||
}, {
|
||||
# non standard key
|
||||
'url': 'http://vod.afreecatv.com/PLAYER/STATION/20515605',
|
||||
'url': 'http://vod.sooplive.co.kr/PLAYER/STATION/20515605',
|
||||
'info_dict': {
|
||||
'id': '20170411_BE689A0E_190960999_1_2_h',
|
||||
'ext': 'mp4',
|
||||
'title': '혼자사는여자집',
|
||||
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
|
||||
'thumbnail': r're:https?://(?:video|st)img\.sooplive\.co\.kr/.+',
|
||||
'uploader': '♥이슬이',
|
||||
'uploader_id': 'dasl8121',
|
||||
'upload_date': '20170411',
|
||||
@@ -142,12 +106,12 @@ class AfreecaTVIE(AfreecaTVBaseIE):
|
||||
},
|
||||
}, {
|
||||
# adult content
|
||||
'url': 'https://vod.afreecatv.com/player/97267690',
|
||||
'url': 'https://vod.sooplive.co.kr/player/97267690',
|
||||
'info_dict': {
|
||||
'id': '20180327_27901457_202289533_1',
|
||||
'ext': 'mp4',
|
||||
'title': '[생]빨개요♥ (part 1)',
|
||||
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
|
||||
'thumbnail': r're:https?://(?:video|st)img\.sooplive\.co\.kr/.+',
|
||||
'uploader': '[SA]서아',
|
||||
'uploader_id': 'bjdyrksu',
|
||||
'upload_date': '20180327',
|
||||
@@ -157,36 +121,17 @@ class AfreecaTVIE(AfreecaTVBaseIE):
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'The VOD does not exist',
|
||||
}, {
|
||||
'url': 'http://www.afreecatv.com/player/Player.swf?szType=szBjId=djleegoon&nStationNo=11273158&nBbsNo=13161095&nTitleNo=36327652',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://vod.afreecatv.com/player/96753363',
|
||||
'info_dict': {
|
||||
'id': '20230108_9FF5BEE1_244432674_1',
|
||||
'ext': 'mp4',
|
||||
'uploader_id': 'rlantnghks',
|
||||
'uploader': '페이즈으',
|
||||
'duration': 10840,
|
||||
'thumbnail': r're:https?://videoimg\.afreecatv\.com/.+',
|
||||
'upload_date': '20230108',
|
||||
'timestamp': 1673218805,
|
||||
'title': '젠지 페이즈',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
# adult content
|
||||
'url': 'https://vod.afreecatv.com/player/70395877',
|
||||
'url': 'https://vod.sooplive.co.kr/player/70395877',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# subscribers only
|
||||
'url': 'https://vod.afreecatv.com/player/104647403',
|
||||
'url': 'https://vod.sooplive.co.kr/player/104647403',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# private
|
||||
'url': 'https://vod.afreecatv.com/player/81669846',
|
||||
'url': 'https://vod.sooplive.co.kr/player/81669846',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@@ -262,11 +207,11 @@ class AfreecaTVIE(AfreecaTVBaseIE):
|
||||
|
||||
|
||||
class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
|
||||
IE_NAME = 'afreecatv:catchstory'
|
||||
IE_DESC = 'afreecatv.com catch story'
|
||||
_VALID_URL = r'https?://vod\.afreecatv\.com/player/(?P<id>\d+)/catchstory'
|
||||
IE_NAME = 'soop:catchstory'
|
||||
IE_DESC = 'sooplive.co.kr catch story'
|
||||
_VALID_URL = r'https?://vod\.(?:sooplive\.co\.kr|afreecatv\.com)/player/(?P<id>\d+)/catchstory'
|
||||
_TESTS = [{
|
||||
'url': 'https://vod.afreecatv.com/player/103247/catchstory',
|
||||
'url': 'https://vod.sooplive.co.kr/player/103247/catchstory',
|
||||
'info_dict': {
|
||||
'id': '103247',
|
||||
},
|
||||
@@ -299,11 +244,11 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
|
||||
|
||||
|
||||
class AfreecaTVLiveIE(AfreecaTVBaseIE):
|
||||
IE_NAME = 'afreecatv:live'
|
||||
IE_DESC = 'afreecatv.com livestreams'
|
||||
_VALID_URL = r'https?://play\.afreeca(?:tv)?\.com/(?P<id>[^/]+)(?:/(?P<bno>\d+))?'
|
||||
IE_NAME = 'soop:live'
|
||||
IE_DESC = 'sooplive.co.kr livestreams'
|
||||
_VALID_URL = r'https?://play\.(?:sooplive\.co\.kr|afreecatv\.com)/(?P<id>[^/?#]+)(?:/(?P<bno>\d+))?'
|
||||
_TESTS = [{
|
||||
'url': 'https://play.afreecatv.com/pyh3646/237852185',
|
||||
'url': 'https://play.sooplive.co.kr/pyh3646/237852185',
|
||||
'info_dict': {
|
||||
'id': '237852185',
|
||||
'ext': 'mp4',
|
||||
@@ -315,30 +260,30 @@ class AfreecaTVLiveIE(AfreecaTVBaseIE):
|
||||
},
|
||||
'skip': 'Livestream has ended',
|
||||
}, {
|
||||
'url': 'https://play.afreecatv.com/pyh3646/237852185',
|
||||
'url': 'https://play.sooplive.co.kr/pyh3646/237852185',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://play.afreecatv.com/pyh3646',
|
||||
'url': 'https://play.sooplive.co.kr/pyh3646',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_LIVE_API_URL = 'https://live.afreecatv.com/afreeca/player_live_api.php'
|
||||
_LIVE_API_URL = 'https://live.sooplive.co.kr/afreeca/player_live_api.php'
|
||||
_WORKING_CDNS = [
|
||||
'gcp_cdn', # live-global-cdn-v02.afreecatv.com
|
||||
'gs_cdn_pc_app', # pc-app.stream.afreecatv.com
|
||||
'gs_cdn_mobile_web', # mobile-web.stream.afreecatv.com
|
||||
'gs_cdn_pc_web', # pc-web.stream.afreecatv.com
|
||||
'gcp_cdn', # live-global-cdn-v02.sooplive.co.kr
|
||||
'gs_cdn_pc_app', # pc-app.stream.sooplive.co.kr
|
||||
'gs_cdn_mobile_web', # mobile-web.stream.sooplive.co.kr
|
||||
'gs_cdn_pc_web', # pc-web.stream.sooplive.co.kr
|
||||
]
|
||||
_BAD_CDNS = [
|
||||
'gs_cdn', # chromecast.afreeca.gscdn.com (cannot resolve)
|
||||
'gs_cdn_chromecast', # chromecast.stream.afreecatv.com (HTTP Error 400)
|
||||
'azure_cdn', # live-global-cdn-v01.afreecatv.com (cannot resolve)
|
||||
'aws_cf', # live-global-cdn-v03.afreecatv.com (cannot resolve)
|
||||
'kt_cdn', # kt.stream.afreecatv.com (HTTP Error 400)
|
||||
'gs_cdn_chromecast', # chromecast.stream.sooplive.co.kr (HTTP Error 400)
|
||||
'azure_cdn', # live-global-cdn-v01.sooplive.co.kr (cannot resolve)
|
||||
'aws_cf', # live-global-cdn-v03.sooplive.co.kr (cannot resolve)
|
||||
'kt_cdn', # kt.stream.sooplive.co.kr (HTTP Error 400)
|
||||
]
|
||||
|
||||
def _extract_formats(self, channel_info, broadcast_no, aid):
|
||||
stream_base_url = channel_info.get('RMD') or 'https://livestream-manager.afreecatv.com'
|
||||
stream_base_url = channel_info.get('RMD') or 'https://livestream-manager.sooplive.co.kr'
|
||||
|
||||
# If user has not passed CDN IDs, try API-provided CDN ID followed by other working CDN IDs
|
||||
default_cdn_ids = orderedSet([
|
||||
@@ -358,7 +303,7 @@ class AfreecaTVLiveIE(AfreecaTVBaseIE):
|
||||
try:
|
||||
return self._extract_m3u8_formats(
|
||||
m3u8_url, broadcast_no, 'mp4', m3u8_id='hls', query={'aid': aid},
|
||||
headers={'Referer': 'https://play.afreecatv.com/'})
|
||||
headers={'Referer': 'https://play.sooplive.co.kr/'})
|
||||
except ExtractorError as e:
|
||||
if attempt == len(cdn_ids):
|
||||
raise
|
||||
@@ -374,7 +319,13 @@ class AfreecaTVLiveIE(AfreecaTVBaseIE):
|
||||
broadcaster_id = channel_info.get('BJID') or broadcaster_id
|
||||
broadcast_no = channel_info.get('BNO') or broadcast_no
|
||||
if not broadcast_no:
|
||||
raise UserNotLive(video_id=broadcaster_id)
|
||||
result = channel_info.get('RESULT')
|
||||
if result == 0:
|
||||
raise UserNotLive(video_id=broadcaster_id)
|
||||
elif result == -6:
|
||||
self.raise_login_required(
|
||||
'This channel is streaming for subscribers only', method='password')
|
||||
raise ExtractorError('Unable to extract broadcast number')
|
||||
|
||||
password = self.get_param('videopassword')
|
||||
if channel_info.get('BPWD') == 'Y' and password is None:
|
||||
@@ -403,7 +354,7 @@ class AfreecaTVLiveIE(AfreecaTVBaseIE):
|
||||
formats = self._extract_formats(channel_info, broadcast_no, aid)
|
||||
|
||||
station_info = traverse_obj(self._download_json(
|
||||
'https://st.afreecatv.com/api/get_station_status.php', broadcast_no,
|
||||
'https://st.sooplive.co.kr/api/get_station_status.php', broadcast_no,
|
||||
'Downloading channel metadata', 'Unable to download channel metadata',
|
||||
query={'szBjId': broadcaster_id}, fatal=False), {dict}) or {}
|
||||
|
||||
@@ -419,11 +370,11 @@ class AfreecaTVLiveIE(AfreecaTVBaseIE):
|
||||
}
|
||||
|
||||
|
||||
class AfreecaTVUserIE(InfoExtractor):
|
||||
IE_NAME = 'afreecatv:user'
|
||||
_VALID_URL = r'https?://bj\.afreeca(?:tv)?\.com/(?P<id>[^/]+)/vods/?(?P<slug_type>[^/]+)?'
|
||||
class AfreecaTVUserIE(AfreecaTVBaseIE):
|
||||
IE_NAME = 'soop:user'
|
||||
_VALID_URL = r'https?://ch\.(?:sooplive\.co\.kr|afreecatv\.com)/(?P<id>[^/?#]+)/vods/?(?P<slug_type>[^/?#]+)?'
|
||||
_TESTS = [{
|
||||
'url': 'https://bj.afreecatv.com/ryuryu24/vods/review',
|
||||
'url': 'https://ch.sooplive.co.kr/ryuryu24/vods/review',
|
||||
'info_dict': {
|
||||
'_type': 'playlist',
|
||||
'id': 'ryuryu24',
|
||||
@@ -431,7 +382,7 @@ class AfreecaTVUserIE(InfoExtractor):
|
||||
},
|
||||
'playlist_count': 218,
|
||||
}, {
|
||||
'url': 'https://bj.afreecatv.com/parang1995/vods/highlight',
|
||||
'url': 'https://ch.sooplive.co.kr/parang1995/vods/highlight',
|
||||
'info_dict': {
|
||||
'_type': 'playlist',
|
||||
'id': 'parang1995',
|
||||
@@ -439,7 +390,7 @@ class AfreecaTVUserIE(InfoExtractor):
|
||||
},
|
||||
'playlist_count': 997,
|
||||
}, {
|
||||
'url': 'https://bj.afreecatv.com/ryuryu24/vods',
|
||||
'url': 'https://ch.sooplive.co.kr/ryuryu24/vods',
|
||||
'info_dict': {
|
||||
'_type': 'playlist',
|
||||
'id': 'ryuryu24',
|
||||
@@ -447,7 +398,7 @@ class AfreecaTVUserIE(InfoExtractor):
|
||||
},
|
||||
'playlist_count': 221,
|
||||
}, {
|
||||
'url': 'https://bj.afreecatv.com/ryuryu24/vods/balloonclip',
|
||||
'url': 'https://ch.sooplive.co.kr/ryuryu24/vods/balloonclip',
|
||||
'info_dict': {
|
||||
'_type': 'playlist',
|
||||
'id': 'ryuryu24',
|
||||
@@ -459,12 +410,12 @@ class AfreecaTVUserIE(InfoExtractor):
|
||||
|
||||
def _fetch_page(self, user_id, user_type, page):
|
||||
page += 1
|
||||
info = self._download_json(f'https://bjapi.afreecatv.com/api/{user_id}/vods/{user_type}', user_id,
|
||||
info = self._download_json(f'https://chapi.sooplive.co.kr/api/{user_id}/vods/{user_type}', user_id,
|
||||
query={'page': page, 'per_page': self._PER_PAGE, 'orderby': 'reg_date'},
|
||||
note=f'Downloading {user_type} video page {page}')
|
||||
for item in info['data']:
|
||||
yield self.url_result(
|
||||
f'https://vod.afreecatv.com/player/{item["title_no"]}/', AfreecaTVIE, item['title_no'])
|
||||
f'https://vod.sooplive.co.kr/player/{item["title_no"]}/', AfreecaTVIE, item['title_no'])
|
||||
|
||||
def _real_extract(self, url):
|
||||
user_id, user_type = self._match_valid_url(url).group('id', 'slug_type')
|
||||
|
||||
@@ -4,7 +4,6 @@ import json
|
||||
import re
|
||||
import time
|
||||
import urllib.parse
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..networking import HEADRequest
|
||||
@@ -12,7 +11,6 @@ from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
js_to_json,
|
||||
mimetype2ext,
|
||||
orderedSet,
|
||||
@@ -524,14 +522,13 @@ class CBCGemIE(InfoExtractor):
|
||||
_TESTS = [{
|
||||
# This is a normal, public, TV show video
|
||||
'url': 'https://gem.cbc.ca/media/schitts-creek/s06e01',
|
||||
'md5': '93dbb31c74a8e45b378cf13bd3f6f11e',
|
||||
'info_dict': {
|
||||
'id': 'schitts-creek/s06e01',
|
||||
'ext': 'mp4',
|
||||
'title': 'Smoke Signals',
|
||||
'description': 'md5:929868d20021c924020641769eb3e7f1',
|
||||
'thumbnail': 'https://images.radio-canada.ca/v1/synps-cbc/episode/perso/cbc_schitts_creek_season_06e01_thumbnail_v01.jpg?im=Resize=(Size)',
|
||||
'duration': 1314,
|
||||
'thumbnail': r're:https://images\.radio-canada\.ca/[^#?]+/cbc_schitts_creek_season_06e01_thumbnail_v01\.jpg',
|
||||
'duration': 1324,
|
||||
'categories': ['comedy'],
|
||||
'series': 'Schitt\'s Creek',
|
||||
'season': 'Season 6',
|
||||
@@ -539,19 +536,21 @@ class CBCGemIE(InfoExtractor):
|
||||
'episode': 'Smoke Signals',
|
||||
'episode_number': 1,
|
||||
'episode_id': 'schitts-creek/s06e01',
|
||||
'upload_date': '20210618',
|
||||
'timestamp': 1623988800,
|
||||
'release_date': '20200107',
|
||||
'release_timestamp': 1578427200,
|
||||
},
|
||||
'params': {'format': 'bv'},
|
||||
'skip': 'Geo-restricted to Canada',
|
||||
}, {
|
||||
# This video requires an account in the browser, but works fine in yt-dlp
|
||||
'url': 'https://gem.cbc.ca/media/schitts-creek/s01e01',
|
||||
'md5': '297a9600f554f2258aed01514226a697',
|
||||
'info_dict': {
|
||||
'id': 'schitts-creek/s01e01',
|
||||
'ext': 'mp4',
|
||||
'title': 'The Cup Runneth Over',
|
||||
'description': 'md5:9bca14ea49ab808097530eb05a29e797',
|
||||
'thumbnail': 'https://images.radio-canada.ca/v1/synps-cbc/episode/perso/cbc_schitts_creek_season_01e01_thumbnail_v01.jpg?im=Resize=(Size)',
|
||||
'thumbnail': r're:https://images\.radio-canada\.ca/[^#?]+/cbc_schitts_creek_season_01e01_thumbnail_v01\.jpg',
|
||||
'series': 'Schitt\'s Creek',
|
||||
'season_number': 1,
|
||||
'season': 'Season 1',
|
||||
@@ -560,9 +559,12 @@ class CBCGemIE(InfoExtractor):
|
||||
'episode_id': 'schitts-creek/s01e01',
|
||||
'duration': 1309,
|
||||
'categories': ['comedy'],
|
||||
'upload_date': '20210617',
|
||||
'timestamp': 1623902400,
|
||||
'release_date': '20151124',
|
||||
'release_timestamp': 1448323200,
|
||||
},
|
||||
'params': {'format': 'bv'},
|
||||
'skip': 'Geo-restricted to Canada',
|
||||
}, {
|
||||
'url': 'https://gem.cbc.ca/nadiyas-family-favourites/s01e01',
|
||||
'only_matching': True,
|
||||
@@ -631,38 +633,6 @@ class CBCGemIE(InfoExtractor):
|
||||
return
|
||||
self._claims_token = self.cache.load(self._NETRC_MACHINE, 'claims_token')
|
||||
|
||||
def _find_secret_formats(self, formats, video_id):
|
||||
""" Find a valid video url and convert it to the secret variant """
|
||||
base_format = next((f for f in formats if f.get('vcodec') != 'none'), None)
|
||||
if not base_format:
|
||||
return
|
||||
|
||||
base_url = re.sub(r'(Manifest\(.*?),filter=[\w-]+(.*?\))', r'\1\2', base_format['url'])
|
||||
url = re.sub(r'(Manifest\(.*?),format=[\w-]+(.*?\))', r'\1\2', base_url)
|
||||
|
||||
secret_xml = self._download_xml(url, video_id, note='Downloading secret XML', fatal=False)
|
||||
if not isinstance(secret_xml, xml.etree.ElementTree.Element):
|
||||
return
|
||||
|
||||
for child in secret_xml:
|
||||
if child.attrib.get('Type') != 'video':
|
||||
continue
|
||||
for video_quality in child:
|
||||
bitrate = int_or_none(video_quality.attrib.get('Bitrate'))
|
||||
if not bitrate or 'Index' not in video_quality.attrib:
|
||||
continue
|
||||
height = int_or_none(video_quality.attrib.get('MaxHeight'))
|
||||
|
||||
yield {
|
||||
**base_format,
|
||||
'format_id': join_nonempty('sec', height),
|
||||
# Note: \g<1> is necessary instead of \1 since bitrate is a number
|
||||
'url': re.sub(r'(QualityLevels\()\d+(\))', fr'\g<1>{bitrate}\2', base_url),
|
||||
'width': int_or_none(video_quality.attrib.get('MaxWidth')),
|
||||
'tbr': bitrate / 1000.0,
|
||||
'height': height,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
video_info = self._download_json(
|
||||
@@ -676,7 +646,6 @@ class CBCGemIE(InfoExtractor):
|
||||
else:
|
||||
headers = {}
|
||||
m3u8_info = self._download_json(video_info['playSession']['url'], video_id, headers=headers)
|
||||
m3u8_url = m3u8_info.get('url')
|
||||
|
||||
if m3u8_info.get('errorCode') == 1:
|
||||
self.raise_geo_restricted(countries=['CA'])
|
||||
@@ -685,9 +654,9 @@ class CBCGemIE(InfoExtractor):
|
||||
elif m3u8_info.get('errorCode') != 0:
|
||||
raise ExtractorError(f'{self.IE_NAME} said: {m3u8_info.get("errorCode")} - {m3u8_info.get("message")}')
|
||||
|
||||
formats = self._extract_m3u8_formats(m3u8_url, video_id, m3u8_id='hls')
|
||||
formats = self._extract_m3u8_formats(
|
||||
m3u8_info['url'], video_id, 'mp4', m3u8_id='hls', query={'manifestType': ''})
|
||||
self._remove_duplicate_formats(formats)
|
||||
formats.extend(self._find_secret_formats(formats, video_id))
|
||||
|
||||
for fmt in formats:
|
||||
if fmt.get('vcodec') == 'none':
|
||||
@@ -703,20 +672,21 @@ class CBCGemIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_info['title'],
|
||||
'description': video_info.get('description'),
|
||||
'thumbnail': video_info.get('image'),
|
||||
'series': video_info.get('series'),
|
||||
'season_number': video_info.get('season'),
|
||||
'season': f'Season {video_info.get("season")}',
|
||||
'episode_number': video_info.get('episode'),
|
||||
'episode': video_info.get('title'),
|
||||
'episode_id': video_id,
|
||||
'duration': video_info.get('duration'),
|
||||
'categories': [video_info.get('category')],
|
||||
'formats': formats,
|
||||
'release_timestamp': video_info.get('airDate'),
|
||||
'timestamp': video_info.get('availableDate'),
|
||||
**traverse_obj(video_info, {
|
||||
'title': ('title', {str}),
|
||||
'episode': ('title', {str}),
|
||||
'description': ('description', {str}),
|
||||
'thumbnail': ('image', {url_or_none}),
|
||||
'series': ('series', {str}),
|
||||
'season_number': ('season', {int_or_none}),
|
||||
'episode_number': ('episode', {int_or_none}),
|
||||
'duration': ('duration', {int_or_none}),
|
||||
'categories': ('category', {str}, all),
|
||||
'release_timestamp': ('airDate', {int_or_none(scale=1000)}),
|
||||
'timestamp': ('availableDate', {int_or_none(scale=1000)}),
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ from .common import InfoExtractor
|
||||
from ..compat import compat_ord
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
OnDemandPagedList,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
merge_dicts,
|
||||
@@ -351,3 +352,50 @@ class CDAIE(InfoExtractor):
|
||||
extract_format(webpage, resolution)
|
||||
|
||||
return merge_dicts(info_dict, info)
|
||||
|
||||
|
||||
class CDAFolderIE(InfoExtractor):
|
||||
_MAX_PAGE_SIZE = 36
|
||||
_VALID_URL = r'https?://(?:www\.)?cda\.pl/(?P<channel>\w+)/folder/(?P<id>\d+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'https://www.cda.pl/domino264/folder/31188385',
|
||||
'info_dict': {
|
||||
'id': '31188385',
|
||||
'title': 'SERIA DRUGA',
|
||||
},
|
||||
'playlist_mincount': 13,
|
||||
},
|
||||
{
|
||||
'url': 'https://www.cda.pl/smiechawaTV/folder/2664592/vfilm',
|
||||
'info_dict': {
|
||||
'id': '2664592',
|
||||
'title': 'VideoDowcipy - wszystkie odcinki',
|
||||
},
|
||||
'playlist_mincount': 71,
|
||||
},
|
||||
{
|
||||
'url': 'https://www.cda.pl/DeliciousBeauty/folder/19129979/vfilm',
|
||||
'info_dict': {
|
||||
'id': '19129979',
|
||||
'title': 'TESTY KOSMETYKÓW',
|
||||
},
|
||||
'playlist_mincount': 139,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
folder_id, channel = self._match_valid_url(url).group('id', 'channel')
|
||||
|
||||
webpage = self._download_webpage(url, folder_id)
|
||||
|
||||
def extract_page_entries(page):
|
||||
webpage = self._download_webpage(
|
||||
f'https://www.cda.pl/{channel}/folder/{folder_id}/vfilm/{page + 1}', folder_id,
|
||||
f'Downloading page {page + 1}', expected_status=404)
|
||||
items = re.findall(r'<a[^>]+href="/video/([0-9a-z]+)"', webpage)
|
||||
for video_id in items:
|
||||
yield self.url_result(f'https://www.cda.pl/video/{video_id}', CDAIE, video_id)
|
||||
|
||||
return self.playlist_result(
|
||||
OnDemandPagedList(extract_page_entries, self._MAX_PAGE_SIZE),
|
||||
folder_id, self._og_search_title(webpage))
|
||||
|
||||
@@ -333,7 +333,7 @@ class InfoExtractor:
|
||||
like_count: Number of positive ratings of the video
|
||||
dislike_count: Number of negative ratings of the video
|
||||
repost_count: Number of reposts of the video
|
||||
average_rating: Average rating give by users, the scale used depends on the webpage
|
||||
average_rating: Average rating given by users, the scale used depends on the webpage
|
||||
comment_count: Number of comments on the video
|
||||
comments: A list of comments, each with one or more of the following
|
||||
properties (all but one of text or html optional):
|
||||
@@ -520,7 +520,7 @@ class InfoExtractor:
|
||||
or _extract_from_webpage as necessary. While these are normally classmethods,
|
||||
_extract_from_webpage is allowed to be an instance method.
|
||||
|
||||
_extract_from_webpage may raise self.StopExtraction() to stop further
|
||||
_extract_from_webpage may raise self.StopExtraction to stop further
|
||||
processing of the webpage and obtain exclusive rights to it. This is useful
|
||||
when the extractor cannot reliably be matched using just the URL,
|
||||
e.g. invidious/peertube instances
|
||||
|
||||
@@ -3,7 +3,7 @@ from .nexx import NexxIE
|
||||
|
||||
|
||||
class FunkIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.|origin\.)?funk\.net/(?:channel|playlist)/[^/]+/(?P<display_id>[0-9a-z-]+)-(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:(?:www|origin|play)\.)?funk\.net/(?:channel|playlist)/[^/?#]+/(?P<display_id>[0-9a-z-]+)-(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.funk.net/channel/ba-793/die-lustigsten-instrumente-aus-dem-internet-teil-2-1155821',
|
||||
'md5': '8610449476156f338761a75391b0017d',
|
||||
@@ -27,6 +27,9 @@ class FunkIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://www.funk.net/playlist/neuesteVideos/kameras-auf-dem-fusion-festival-1618699',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://play.funk.net/playlist/neuesteVideos/george-floyd-wenn-die-polizei-toetet-der-fall-2004391',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
@@ -37,7 +37,7 @@ class ImgurBaseIE(InfoExtractor):
|
||||
|
||||
|
||||
class ImgurIE(ImgurBaseIE):
|
||||
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|t|topic|r)/)(?P<id>[a-zA-Z0-9]+)'
|
||||
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|t|topic|r)/)(?:[^/?#]+-)?(?P<id>[a-zA-Z0-9]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://imgur.com/A61SaA1',
|
||||
@@ -54,6 +54,22 @@ class ImgurIE(ImgurBaseIE):
|
||||
'like_count': int,
|
||||
'thumbnail': 'https://i.imgur.com/A61SaA1h.jpg',
|
||||
},
|
||||
}, {
|
||||
# Test with URL slug
|
||||
'url': 'https://imgur.com/mrw-gifv-is-up-running-without-any-bugs-A61SaA1',
|
||||
'info_dict': {
|
||||
'id': 'A61SaA1',
|
||||
'ext': 'mp4',
|
||||
'title': 'MRW gifv is up and running without any bugs',
|
||||
'timestamp': 1416446068,
|
||||
'upload_date': '20141120',
|
||||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'release_timestamp': 1416446068,
|
||||
'release_date': '20141120',
|
||||
'like_count': int,
|
||||
'thumbnail': 'https://i.imgur.com/A61SaA1h.jpg',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://i.imgur.com/A61SaA1.gifv',
|
||||
'only_matching': True,
|
||||
@@ -92,6 +108,7 @@ class ImgurIE(ImgurBaseIE):
|
||||
'comment_count': int,
|
||||
'release_timestamp': 1710491255,
|
||||
'release_date': '20240315',
|
||||
'thumbnail': 'https://i.imgur.com/zV03bd5h.jpg',
|
||||
},
|
||||
}]
|
||||
|
||||
@@ -208,7 +225,10 @@ class ImgurIE(ImgurBaseIE):
|
||||
}), get_all=False),
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'thumbnail': url_or_none(search('thumbnailUrl')),
|
||||
'thumbnails': [{
|
||||
'url': thumbnail_url,
|
||||
'http_headers': {'Accept': '*/*'},
|
||||
}] if (thumbnail_url := search(['thumbnailUrl', 'twitter:image', 'og:image'])) else None,
|
||||
'http_headers': {'Accept': '*/*'},
|
||||
}
|
||||
|
||||
@@ -252,17 +272,9 @@ class ImgurGalleryBaseIE(ImgurBaseIE):
|
||||
|
||||
class ImgurGalleryIE(ImgurGalleryBaseIE):
|
||||
IE_NAME = 'imgur:gallery'
|
||||
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/?#]+)/(?P<id>[a-zA-Z0-9]+)'
|
||||
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/?#]+)/(?:[^/?#]+-)?(?P<id>[a-zA-Z0-9]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://imgur.com/gallery/Q95ko',
|
||||
'info_dict': {
|
||||
'id': 'Q95ko',
|
||||
'title': 'Adding faces make every GIF better',
|
||||
},
|
||||
'playlist_count': 25,
|
||||
'skip': 'Zoinks! You\'ve taken a wrong turn.',
|
||||
}, {
|
||||
# TODO: static images - replace with animated/video gallery
|
||||
'url': 'http://imgur.com/topic/Aww/ll5Vk',
|
||||
'only_matching': True,
|
||||
@@ -280,7 +292,27 @@ class ImgurGalleryIE(ImgurGalleryBaseIE):
|
||||
'release_timestamp': 1358554297,
|
||||
'thumbnail': 'https://i.imgur.com/YcAQlkxh.jpg',
|
||||
'release_date': '20130119',
|
||||
'uploader_url': 'https://i.imgur.com/u3R4I2S_d.png?maxwidth=290&fidelity=grand',
|
||||
'uploader_url': 'https://i.imgur.com/N5Flb2v_d.png?maxwidth=290&fidelity=grand',
|
||||
'comment_count': int,
|
||||
'dislike_count': int,
|
||||
'like_count': int,
|
||||
},
|
||||
}, {
|
||||
# Test with slug
|
||||
'url': 'https://imgur.com/gallery/classic-steve-carell-gif-cracks-me-up-everytime-repost-downvotes-YcAQlkx',
|
||||
'add_ies': ['Imgur'],
|
||||
'info_dict': {
|
||||
'id': 'YcAQlkx',
|
||||
'ext': 'mp4',
|
||||
'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',
|
||||
'timestamp': 1358554297,
|
||||
'upload_date': '20130119',
|
||||
'uploader_id': '1648642',
|
||||
'uploader': 'wittyusernamehere',
|
||||
'release_timestamp': 1358554297,
|
||||
'release_date': '20130119',
|
||||
'thumbnail': 'https://i.imgur.com/YcAQlkxh.jpg',
|
||||
'uploader_url': 'https://i.imgur.com/N5Flb2v_d.png?maxwidth=290&fidelity=grand',
|
||||
'comment_count': int,
|
||||
'dislike_count': int,
|
||||
'like_count': int,
|
||||
@@ -317,6 +349,13 @@ class ImgurGalleryIE(ImgurGalleryBaseIE):
|
||||
'title': 'Penguins !',
|
||||
},
|
||||
'playlist_count': 3,
|
||||
}, {
|
||||
'url': 'https://imgur.com/t/unmuted/penguins-penguins-6lAn9VQ',
|
||||
'info_dict': {
|
||||
'id': '6lAn9VQ',
|
||||
'title': 'Penguins !',
|
||||
},
|
||||
'playlist_count': 3,
|
||||
}, {
|
||||
'url': 'https://imgur.com/t/unmuted/kx2uD3C',
|
||||
'add_ies': ['Imgur'],
|
||||
@@ -357,7 +396,7 @@ class ImgurGalleryIE(ImgurGalleryBaseIE):
|
||||
|
||||
class ImgurAlbumIE(ImgurGalleryBaseIE):
|
||||
IE_NAME = 'imgur:album'
|
||||
_VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?P<id>[a-zA-Z0-9]+)'
|
||||
_VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?:[^/?#]+-)?(?P<id>[a-zA-Z0-9]+)'
|
||||
_GALLERY = False
|
||||
_TESTS = [{
|
||||
# TODO: only static images - replace with animated/video gallery
|
||||
@@ -372,6 +411,14 @@ class ImgurAlbumIE(ImgurGalleryBaseIE):
|
||||
'title': 'enen-no-shouboutai',
|
||||
},
|
||||
'playlist_count': 2,
|
||||
}, {
|
||||
# Test with URL slug
|
||||
'url': 'https://imgur.com/a/enen-no-shouboutai-iX265HX',
|
||||
'info_dict': {
|
||||
'id': 'iX265HX',
|
||||
'title': 'enen-no-shouboutai',
|
||||
},
|
||||
'playlist_count': 2,
|
||||
}, {
|
||||
'url': 'https://imgur.com/a/8pih2Ed',
|
||||
'info_dict': {
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
from .telecinco import TelecincoIE
|
||||
from .telecinco import TelecincoBaseIE
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class MiTeleIE(TelecincoIE): # XXX: Do not subclass from concrete IE
|
||||
class MiTeleIE(TelecincoBaseIE):
|
||||
IE_DESC = 'mitele.es'
|
||||
_VALID_URL = r'https?://(?:www\.)?mitele\.es/(?:[^/]+/)+(?P<id>[^/]+)/player'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.mitele.es/programas-tv/diario-de/57b0dfb9c715da65618b4afa/player',
|
||||
'info_dict': {
|
||||
@@ -27,6 +26,7 @@ class MiTeleIE(TelecincoIE): # XXX: Do not subclass from concrete IE
|
||||
'timestamp': 1471209401,
|
||||
'upload_date': '20160814',
|
||||
},
|
||||
'skip': 'HTTP Error 404 Not Found',
|
||||
}, {
|
||||
# no explicit title
|
||||
'url': 'http://www.mitele.es/programas-tv/cuarto-milenio/57b0de3dc915da14058b4876/player',
|
||||
@@ -49,6 +49,26 @@ class MiTeleIE(TelecincoIE): # XXX: Do not subclass from concrete IE
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'HTTP Error 404 Not Found',
|
||||
}, {
|
||||
'url': 'https://www.mitele.es/programas-tv/horizonte/temporada-5/programa-171-40_013480051/player/',
|
||||
'info_dict': {
|
||||
'id': '7adbe22e-cd41-4787-afa4-36f3da7c2c6f',
|
||||
'ext': 'mp4',
|
||||
'title': 'Horizonte Temporada 5 Programa 171',
|
||||
'description': 'md5:97f1fb712c5ac27e5693a8b3c5c0c6e3',
|
||||
'episode': 'Las Zonas de Bajas Emisiones, a debate',
|
||||
'episode_number': 171,
|
||||
'season': 'Season 5',
|
||||
'season_number': 5,
|
||||
'series': 'Horizonte',
|
||||
'duration': 7012,
|
||||
'upload_date': '20240927',
|
||||
'timestamp': 1727416450,
|
||||
'thumbnail': 'https://album.mediaset.es/eimg/2024/09/27/horizonte-171_9f02.jpg',
|
||||
'age_limit': 12,
|
||||
},
|
||||
'params': {'geo_bypass_country': 'ES'},
|
||||
}, {
|
||||
'url': 'http://www.mitele.es/series-online/la-que-se-avecina/57aac5c1c915da951a8b45ed/player',
|
||||
'only_matching': True,
|
||||
|
||||
@@ -371,7 +371,7 @@ class NexxIE(InfoExtractor):
|
||||
# not all videos work via arc, e.g. nexx:741:1269984
|
||||
if not video:
|
||||
# Reverse engineered from JS code (see getDeviceID function)
|
||||
device_id = f'{random.randint(1, 4)}:{int(time.time())}:{random.randint(1e4, 99999)}{random.randint(1, 9)}'
|
||||
device_id = f'{random.randint(1, 4)}:{int(time.time())}:{random.randint(10000, 99999)}{random.randint(1, 9)}'
|
||||
|
||||
result = self._call_api(domain_id, 'session/init', video_id, data={
|
||||
'nxp_devh': device_id,
|
||||
|
||||
@@ -2,7 +2,13 @@ import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import js_to_json, str_or_none, traverse_obj
|
||||
from ..networking import HEADRequest
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
js_to_json,
|
||||
str_or_none,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class SubstackIE(InfoExtractor):
|
||||
@@ -43,6 +49,19 @@ class SubstackIE(InfoExtractor):
|
||||
'uploader': "Andrew Zimmern's Spilled Milk ",
|
||||
'uploader_id': '577659',
|
||||
},
|
||||
}, {
|
||||
# Podcast that needs its file extension resolved to mp3
|
||||
'url': 'https://persuasion1.substack.com/p/summers',
|
||||
'md5': '1456a755d46084744facdfac9edf900f',
|
||||
'info_dict': {
|
||||
'id': '141970405',
|
||||
'ext': 'mp3',
|
||||
'title': 'Larry Summers on What Went Wrong on Campus',
|
||||
'description': 'Yascha Mounk and Larry Summers also discuss the promise and perils of artificial intelligence.',
|
||||
'thumbnail': r're:https://substackcdn\.com/image/.+\.jpeg',
|
||||
'uploader': 'Persuasion',
|
||||
'uploader_id': '61579',
|
||||
},
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
@@ -89,7 +108,15 @@ class SubstackIE(InfoExtractor):
|
||||
post_type = webpage_info['post']['type']
|
||||
formats, subtitles = [], {}
|
||||
if post_type == 'podcast':
|
||||
formats, subtitles = [{'url': webpage_info['post']['podcast_url']}], {}
|
||||
fmt = {'url': webpage_info['post']['podcast_url']}
|
||||
if not determine_ext(fmt['url'], default_ext=None):
|
||||
# The redirected format URL expires but the original URL doesn't,
|
||||
# so we only want to extract the extension from this request
|
||||
fmt['ext'] = determine_ext(self._request_webpage(
|
||||
HEADRequest(fmt['url']), display_id,
|
||||
'Resolving podcast file extension',
|
||||
'Podcast URL is invalid').url)
|
||||
formats.append(fmt)
|
||||
elif post_type == 'video':
|
||||
formats, subtitles = self._extract_video_formats(webpage_info['post']['videoUpload']['id'], canonical_url)
|
||||
else:
|
||||
|
||||
@@ -2,15 +2,69 @@ import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..networking.exceptions import HTTPError
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
str_or_none,
|
||||
try_get,
|
||||
traverse_obj,
|
||||
update_url,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
class TelecincoIE(InfoExtractor):
|
||||
class TelecincoBaseIE(InfoExtractor):
|
||||
def _parse_content(self, content, url):
|
||||
video_id = content['dataMediaId']
|
||||
config = self._download_json(
|
||||
content['dataConfig'], video_id, 'Downloading config JSON')
|
||||
services = config['services']
|
||||
caronte = self._download_json(services['caronte'], video_id)
|
||||
if traverse_obj(caronte, ('dls', 0, 'drm', {bool})):
|
||||
self.report_drm(video_id)
|
||||
|
||||
stream = caronte['dls'][0]['stream']
|
||||
headers = {
|
||||
'Referer': url,
|
||||
'Origin': re.match(r'https?://[^/]+', url).group(0),
|
||||
}
|
||||
geo_headers = {**headers, **self.geo_verification_headers()}
|
||||
|
||||
try:
|
||||
cdn = self._download_json(
|
||||
caronte['cerbero'], video_id, data=json.dumps({
|
||||
'bbx': caronte['bbx'],
|
||||
'gbx': self._download_json(services['gbx'], video_id)['gbx'],
|
||||
}).encode(), headers={
|
||||
'Content-Type': 'application/json',
|
||||
**geo_headers,
|
||||
})['tokens']['1']['cdn']
|
||||
except ExtractorError as error:
|
||||
if isinstance(error.cause, HTTPError) and error.cause.status == 403:
|
||||
error_code = traverse_obj(
|
||||
self._webpage_read_content(error.cause.response, caronte['cerbero'], video_id, fatal=False),
|
||||
({json.loads}, 'code', {int}))
|
||||
if error_code == 4038:
|
||||
self.raise_geo_restricted(countries=['ES'])
|
||||
raise
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
update_url(stream, query=cdn), video_id, 'mp4', m3u8_id='hls', headers=geo_headers)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': traverse_obj(config, ('info', 'title', {str})),
|
||||
'formats': formats,
|
||||
'thumbnail': (traverse_obj(content, ('dataPoster', {url_or_none}))
|
||||
or traverse_obj(config, 'poster', 'imageUrl', expected_type=url_or_none)),
|
||||
'duration': traverse_obj(content, ('dataDuration', {int_or_none})),
|
||||
'http_headers': headers,
|
||||
}
|
||||
|
||||
|
||||
class TelecincoIE(TelecincoBaseIE):
|
||||
IE_DESC = 'telecinco.es, cuatro.com and mediaset.es'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:telecinco\.es|cuatro\.com|mediaset\.es)/(?:[^/]+/)+(?P<id>.+?)\.html'
|
||||
|
||||
@@ -30,6 +84,7 @@ class TelecincoIE(InfoExtractor):
|
||||
'duration': 662,
|
||||
},
|
||||
}],
|
||||
'skip': 'HTTP Error 410 Gone',
|
||||
}, {
|
||||
'url': 'http://www.cuatro.com/deportes/futbol/barcelona/Leo_Messi-Champions-Roma_2_2052780128.html',
|
||||
'md5': 'c86fe0d99e3bdb46b7950d38bf6ef12a',
|
||||
@@ -40,23 +95,24 @@ class TelecincoIE(InfoExtractor):
|
||||
'description': 'md5:a62ecb5f1934fc787107d7b9a2262805',
|
||||
'duration': 79,
|
||||
},
|
||||
'skip': 'Redirects to main page',
|
||||
}, {
|
||||
'url': 'http://www.mediaset.es/12meses/campanas/doylacara/conlatratanohaytrato/Ayudame-dar-cara-trata-trato_2_1986630220.html',
|
||||
'md5': 'eddb50291df704ce23c74821b995bcac',
|
||||
'md5': '5ce057f43f30b634fbaf0f18c71a140a',
|
||||
'info_dict': {
|
||||
'id': 'aywerkD2Sv1vGNqq9b85Q2',
|
||||
'ext': 'mp4',
|
||||
'title': '#DOYLACARA. Con la trata no hay trato',
|
||||
'description': 'md5:2771356ff7bfad9179c5f5cd954f1477',
|
||||
'duration': 50,
|
||||
'thumbnail': 'https://album.mediaset.es/eimg/2017/11/02/1tlQLO5Q3mtKT24f3EaC24.jpg',
|
||||
},
|
||||
}, {
|
||||
# video in opening's content
|
||||
'url': 'https://www.telecinco.es/vivalavida/fiorella-sobrina-edmundo-arrocet-entrevista_18_2907195140.html',
|
||||
'info_dict': {
|
||||
'id': '2907195140',
|
||||
'id': '1691427',
|
||||
'title': 'La surrealista entrevista a la sobrina de Edmundo Arrocet: "No puedes venir aquí y tomarnos por tontos"',
|
||||
'description': 'md5:73f340a7320143d37ab895375b2bf13a',
|
||||
'description': r're:Fiorella, la sobrina de Edmundo Arrocet, concedió .{727}',
|
||||
},
|
||||
'playlist': [{
|
||||
'md5': 'adb28c37238b675dad0f042292f209a7',
|
||||
@@ -65,6 +121,7 @@ class TelecincoIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 'La surrealista entrevista a la sobrina de Edmundo Arrocet: "No puedes venir aquí y tomarnos por tontos"',
|
||||
'duration': 1015,
|
||||
'thumbnail': 'https://album.mediaset.es/eimg/2020/02/29/5opaC37lUhKlZ7FoDhiVC.jpg',
|
||||
},
|
||||
}],
|
||||
'params': {
|
||||
@@ -81,66 +138,29 @@ class TelecincoIE(InfoExtractor):
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _parse_content(self, content, url):
|
||||
video_id = content['dataMediaId']
|
||||
config = self._download_json(
|
||||
content['dataConfig'], video_id, 'Downloading config JSON')
|
||||
title = config['info']['title']
|
||||
services = config['services']
|
||||
caronte = self._download_json(services['caronte'], video_id)
|
||||
stream = caronte['dls'][0]['stream']
|
||||
headers = self.geo_verification_headers()
|
||||
headers.update({
|
||||
'Content-Type': 'application/json;charset=UTF-8',
|
||||
'Origin': re.match(r'https?://[^/]+', url).group(0),
|
||||
})
|
||||
cdn = self._download_json(
|
||||
caronte['cerbero'], video_id, data=json.dumps({
|
||||
'bbx': caronte['bbx'],
|
||||
'gbx': self._download_json(services['gbx'], video_id)['gbx'],
|
||||
}).encode(), headers=headers)['tokens']['1']['cdn']
|
||||
formats = self._extract_m3u8_formats(
|
||||
stream + '?' + cdn, video_id, 'mp4', 'm3u8_native', m3u8_id='hls')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': content.get('dataPoster') or config.get('poster', {}).get('imageUrl'),
|
||||
'duration': int_or_none(content.get('dataDuration')),
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
article = self._parse_json(self._search_regex(
|
||||
r'window\.\$REACTBASE_STATE\.article(?:_multisite)?\s*=\s*({.+})',
|
||||
webpage, 'article'), display_id)['article']
|
||||
title = article.get('title')
|
||||
description = clean_html(article.get('leadParagraph')) or ''
|
||||
article = self._search_json(
|
||||
r'window\.\$REACTBASE_STATE\.article(?:_multisite)?\s*=',
|
||||
webpage, 'article', display_id)['article']
|
||||
description = traverse_obj(article, ('leadParagraph', {clean_html}, filter))
|
||||
|
||||
if article.get('editorialType') != 'VID':
|
||||
entries = []
|
||||
body = [article.get('opening')]
|
||||
body.extend(try_get(article, lambda x: x['body'], list) or [])
|
||||
for p in body:
|
||||
if not isinstance(p, dict):
|
||||
continue
|
||||
content = p.get('content')
|
||||
if not content:
|
||||
continue
|
||||
|
||||
for p in traverse_obj(article, ((('opening', all), 'body'), lambda _, v: v['content'])):
|
||||
content = p['content']
|
||||
type_ = p.get('type')
|
||||
if type_ == 'paragraph':
|
||||
content_str = str_or_none(content)
|
||||
if content_str:
|
||||
description += content_str
|
||||
continue
|
||||
if type_ == 'video' and isinstance(content, dict):
|
||||
if type_ == 'paragraph' and isinstance(content, str):
|
||||
description = join_nonempty(description, content, delim='')
|
||||
elif type_ == 'video' and isinstance(content, dict):
|
||||
entries.append(self._parse_content(content, url))
|
||||
|
||||
return self.playlist_result(
|
||||
entries, str_or_none(article.get('id')), title, description)
|
||||
content = article['opening']['content']
|
||||
info = self._parse_content(content, url)
|
||||
info.update({
|
||||
'description': description,
|
||||
})
|
||||
entries, str_or_none(article.get('id')),
|
||||
traverse_obj(article, ('title', {str})), clean_html(description))
|
||||
|
||||
info = self._parse_content(article['opening']['content'], url)
|
||||
info['description'] = description
|
||||
return info
|
||||
|
||||
@@ -236,7 +236,7 @@ class TubeTuGrazSeriesIE(TubeTuGrazBaseIE):
|
||||
},
|
||||
},
|
||||
],
|
||||
'min_playlist_count': 4,
|
||||
'playlist_mincount': 4,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
@@ -6,6 +6,7 @@ from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
strip_or_none,
|
||||
traverse_obj,
|
||||
url_or_none,
|
||||
urlencode_postdata,
|
||||
@@ -132,12 +133,12 @@ class TubiTvIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'title': strip_or_none(title),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'season_number': int_or_none(season_number),
|
||||
'episode_number': int_or_none(episode_number),
|
||||
'episode': episode_title,
|
||||
'episode': strip_or_none(episode_title),
|
||||
**traverse_obj(video_data, {
|
||||
'description': ('description', {str}),
|
||||
'duration': ('duration', {int_or_none}),
|
||||
|
||||
@@ -934,14 +934,13 @@ class TwitterIE(TwitterBaseIE):
|
||||
'uploader_id': 'MoniqueCamarra',
|
||||
'live_status': 'was_live',
|
||||
'release_timestamp': 1658417414,
|
||||
'description': 'md5:acce559345fd49f129c20dbcda3f1201',
|
||||
'description': r're:Twitter Space participated by Sergej Sumlenny.+',
|
||||
'timestamp': 1658407771,
|
||||
'release_date': '20220721',
|
||||
'upload_date': '20220721',
|
||||
},
|
||||
'add_ie': ['TwitterSpaces'],
|
||||
'params': {'skip_download': 'm3u8'},
|
||||
'skip': 'Requires authentication',
|
||||
}, {
|
||||
# URL specifies video number but --yes-playlist
|
||||
'url': 'https://twitter.com/CTVJLaidlaw/status/1600649710662213632/video/1',
|
||||
@@ -1856,8 +1855,6 @@ class TwitterSpacesIE(TwitterBaseIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
space_id = self._match_id(url)
|
||||
if not self.is_logged_in:
|
||||
self.raise_login_required('Twitter Spaces require authentication')
|
||||
space_data = self._call_graphql_api('HPEisOmj1epUNLCWTYhUWw/AudioSpaceById', space_id)['audioSpace']
|
||||
if not space_data:
|
||||
raise ExtractorError('Twitter Space not found', expected=True)
|
||||
|
||||
@@ -73,7 +73,7 @@ class UstreamIE(InfoExtractor):
|
||||
def num_to_hex(n):
|
||||
return hex(n)[2:]
|
||||
|
||||
rnd = random.randrange
|
||||
rnd = lambda x: random.randrange(int(x))
|
||||
|
||||
if not extra_note:
|
||||
extra_note = ''
|
||||
|
||||
@@ -8,7 +8,8 @@ from ..utils import (
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
qualities,
|
||||
try_get,
|
||||
remove_start,
|
||||
strip_or_none,
|
||||
)
|
||||
|
||||
|
||||
@@ -108,7 +109,7 @@ class VeohIE(InfoExtractor):
|
||||
|
||||
categories = metadata.get('categoryPath')
|
||||
if not categories:
|
||||
category = try_get(video, lambda x: x['category'].strip().removeprefix('category_'))
|
||||
category = remove_start(strip_or_none(video.get('category')), 'category_')
|
||||
categories = [category] if category else None
|
||||
tags = video.get('tags')
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ import urllib.parse
|
||||
from .common import InfoExtractor, SearchInfoExtractor
|
||||
from .openload import PhantomJSwrapper
|
||||
from ..jsinterp import JSInterpreter
|
||||
from ..networking.exceptions import HTTPError, network_exceptions
|
||||
from ..networking.exceptions import HTTPError, TransportError, network_exceptions
|
||||
from ..utils import (
|
||||
NO_DEFAULT,
|
||||
ExtractorError,
|
||||
@@ -55,6 +55,7 @@ from ..utils import (
|
||||
str_or_none,
|
||||
str_to_int,
|
||||
strftime_or_none,
|
||||
time_seconds,
|
||||
traverse_obj,
|
||||
try_call,
|
||||
try_get,
|
||||
@@ -114,6 +115,7 @@ INNERTUBE_CLIENTS = {
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 67,
|
||||
},
|
||||
# This client now requires sign-in for every video
|
||||
'web_creator': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
@@ -153,6 +155,7 @@ INNERTUBE_CLIENTS = {
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
'REQUIRE_PO_TOKEN': True,
|
||||
},
|
||||
# This client now requires sign-in for every video
|
||||
'android_creator': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
@@ -200,21 +203,6 @@ INNERTUBE_CLIENTS = {
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
'PLAYER_PARAMS': '2AMB',
|
||||
},
|
||||
# This client only has legacy formats and storyboards
|
||||
'android_producer': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'ANDROID_PRODUCER',
|
||||
'clientVersion': '0.111.1',
|
||||
'androidSdkVersion': 30,
|
||||
'userAgent': 'com.google.android.apps.youtube.producer/0.111.1 (Linux; U; Android 11) gzip',
|
||||
'osName': 'Android',
|
||||
'osVersion': '11',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 91,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
# iOS clients have HLS live streams. Setting device model to get 60fps formats.
|
||||
# See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680#issuecomment-1002724558
|
||||
'ios': {
|
||||
@@ -247,6 +235,7 @@ INNERTUBE_CLIENTS = {
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 26,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
# This client now requires sign-in for every video
|
||||
'ios_creator': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
@@ -282,8 +271,9 @@ INNERTUBE_CLIENTS = {
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 7,
|
||||
},
|
||||
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
|
||||
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
|
||||
# This client now requires sign-in for every video
|
||||
# It was previously an age-gate workaround for videos that were `playable_in_embed`
|
||||
# It may still be useful if signed into an EU account that is not age-verified
|
||||
'tv_embedded': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
@@ -526,6 +516,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
_YT_HANDLE_RE = r'@[\w.-]{3,30}' # https://support.google.com/youtube/answer/11585688?hl=en
|
||||
_YT_CHANNEL_UCID_RE = r'UC[\w-]{22}'
|
||||
|
||||
_NETRC_MACHINE = 'youtube'
|
||||
|
||||
def ucid_or_none(self, ucid):
|
||||
return self._search_regex(rf'^({self._YT_CHANNEL_UCID_RE})$', ucid, 'UC-id', default=None)
|
||||
|
||||
@@ -584,9 +576,213 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
self._initialize_consent()
|
||||
self._check_login_required()
|
||||
|
||||
def _perform_login(self, username, password):
|
||||
auth_type, _, user = (username or '').partition('+')
|
||||
|
||||
if auth_type != 'oauth':
|
||||
raise ExtractorError(self._youtube_login_hint, expected=True)
|
||||
|
||||
self._initialize_oauth(user, password)
|
||||
|
||||
'''
|
||||
OAuth 2.0 Device Authorization Grant flow, used by the YouTube TV client (youtube.com/tv).
|
||||
|
||||
For more information regarding OAuth 2.0 and the Device Authorization Grant flow in general, see:
|
||||
- https://developers.google.com/identity/protocols/oauth2/limited-input-device
|
||||
- https://accounts.google.com/.well-known/openid-configuration
|
||||
- https://www.rfc-editor.org/rfc/rfc8628
|
||||
- https://www.rfc-editor.org/rfc/rfc6749
|
||||
|
||||
Note: The official client appears to use a proxied version of the oauth2 endpoints on youtube.com/o/oauth2,
|
||||
which applies some modifications to the response (such as returning errors as 200 OK).
|
||||
Since the client works with the standard API, we will use that as it is well-documented.
|
||||
'''
|
||||
|
||||
_OAUTH_PROFILE = None
|
||||
_OAUTH_ACCESS_TOKEN_CACHE = {}
|
||||
_OAUTH_DISPLAY_ID = 'oauth'
|
||||
|
||||
# YouTube TV (TVHTML5) client. You can find these at youtube.com/tv
|
||||
_OAUTH_CLIENT_ID = '861556708454-d6dlm3lh05idd8npek18k6be8ba3oc68.apps.googleusercontent.com'
|
||||
_OAUTH_CLIENT_SECRET = 'SboVhoG9s0rNafixCSGGKXAT'
|
||||
_OAUTH_SCOPE = 'http://gdata.youtube.com https://www.googleapis.com/auth/youtube-paid-content'
|
||||
|
||||
# From https://accounts.google.com/.well-known/openid-configuration
|
||||
# Technically, these should be fetched dynamically and not hard-coded.
|
||||
# However, as these endpoints rarely change, we can risk saving an extra request for every invocation.
|
||||
_OAUTH_DEVICE_AUTHORIZATION_ENDPOINT = 'https://oauth2.googleapis.com/device/code'
|
||||
_OAUTH_TOKEN_ENDPOINT = 'https://oauth2.googleapis.com/token'
|
||||
|
||||
@property
|
||||
def _oauth_cache_key(self):
|
||||
return f'oauth_refresh_token_{self._OAUTH_PROFILE}'
|
||||
|
||||
def _read_oauth_error_response(self, response):
|
||||
return traverse_obj(
|
||||
self._webpage_read_content(response, self._OAUTH_TOKEN_ENDPOINT, self._OAUTH_DISPLAY_ID, fatal=False),
|
||||
({json.loads}, 'error', {str}))
|
||||
|
||||
def _set_oauth_info(self, token_response):
|
||||
YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE.setdefault(self._OAUTH_PROFILE, {}).update({
|
||||
'access_token': token_response['access_token'],
|
||||
'token_type': token_response['token_type'],
|
||||
'expiry': time_seconds(
|
||||
seconds=traverse_obj(token_response, ('expires_in', {float_or_none}), default=300) - 10),
|
||||
})
|
||||
refresh_token = traverse_obj(token_response, ('refresh_token', {str}))
|
||||
if refresh_token:
|
||||
self.cache.store(self._NETRC_MACHINE, self._oauth_cache_key, refresh_token)
|
||||
YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE[self._OAUTH_PROFILE]['refresh_token'] = refresh_token
|
||||
|
||||
def _initialize_oauth(self, user, refresh_token):
|
||||
self._OAUTH_PROFILE = user or 'default'
|
||||
|
||||
if self._OAUTH_PROFILE in YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE:
|
||||
self.write_debug(f'{self._OAUTH_DISPLAY_ID}: Using cached access token for profile "{self._OAUTH_PROFILE}"')
|
||||
return
|
||||
|
||||
YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE[self._OAUTH_PROFILE] = {}
|
||||
|
||||
if refresh_token:
|
||||
refresh_token = refresh_token.strip('\'') or None
|
||||
|
||||
# Allow refresh token passed to initialize cache
|
||||
if refresh_token:
|
||||
self.cache.store(self._NETRC_MACHINE, self._oauth_cache_key, refresh_token)
|
||||
|
||||
refresh_token = refresh_token or self.cache.load(self._NETRC_MACHINE, self._oauth_cache_key)
|
||||
if refresh_token:
|
||||
YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE[self._OAUTH_PROFILE]['refresh_token'] = refresh_token
|
||||
try:
|
||||
token_response = self._refresh_token(refresh_token)
|
||||
except ExtractorError as e:
|
||||
error_msg = str(e.orig_msg).replace('Failed to refresh access token: ', '')
|
||||
self.report_warning(f'{self._OAUTH_DISPLAY_ID}: Failed to refresh access token: {error_msg}')
|
||||
token_response = self._oauth_authorize
|
||||
else:
|
||||
token_response = self._oauth_authorize
|
||||
|
||||
self._set_oauth_info(token_response)
|
||||
self.write_debug(f'{self._OAUTH_DISPLAY_ID}: Logged in using profile "{self._OAUTH_PROFILE}"')
|
||||
|
||||
def _refresh_token(self, refresh_token):
|
||||
try:
|
||||
token_response = self._download_json(
|
||||
self._OAUTH_TOKEN_ENDPOINT,
|
||||
video_id=self._OAUTH_DISPLAY_ID,
|
||||
note='Refreshing access token',
|
||||
data=json.dumps({
|
||||
'client_id': self._OAUTH_CLIENT_ID,
|
||||
'client_secret': self._OAUTH_CLIENT_SECRET,
|
||||
'refresh_token': refresh_token,
|
||||
'grant_type': 'refresh_token',
|
||||
}).encode(),
|
||||
headers={'Content-Type': 'application/json'})
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, HTTPError):
|
||||
error = self._read_oauth_error_response(e.cause.response)
|
||||
if error == 'invalid_grant':
|
||||
# RFC6749 § 5.2
|
||||
raise ExtractorError(
|
||||
'Failed to refresh access token: Refresh token is invalid, revoked, or expired (invalid_grant)',
|
||||
expected=True, video_id=self._OAUTH_DISPLAY_ID)
|
||||
raise ExtractorError(
|
||||
f'Failed to refresh access token: Authorization server returned error {error}',
|
||||
video_id=self._OAUTH_DISPLAY_ID)
|
||||
raise
|
||||
return token_response
|
||||
|
||||
@property
|
||||
def _oauth_authorize(self):
|
||||
code_response = self._download_json(
|
||||
self._OAUTH_DEVICE_AUTHORIZATION_ENDPOINT,
|
||||
video_id=self._OAUTH_DISPLAY_ID,
|
||||
note='Initializing authorization flow',
|
||||
data=json.dumps({
|
||||
'client_id': self._OAUTH_CLIENT_ID,
|
||||
'scope': self._OAUTH_SCOPE,
|
||||
}).encode(),
|
||||
headers={'Content-Type': 'application/json'})
|
||||
|
||||
verification_url = traverse_obj(code_response, ('verification_url', {str}))
|
||||
user_code = traverse_obj(code_response, ('user_code', {str}))
|
||||
if not verification_url or not user_code:
|
||||
raise ExtractorError(
|
||||
'Authorization server did not provide verification_url or user_code', video_id=self._OAUTH_DISPLAY_ID)
|
||||
|
||||
# note: The whitespace is intentional
|
||||
self.to_screen(
|
||||
f'{self._OAUTH_DISPLAY_ID}: To give yt-dlp access to your account, '
|
||||
f'go to {verification_url} and enter code {user_code}')
|
||||
|
||||
# RFC8628 § 3.5: default poll interval is 5 seconds if not provided
|
||||
poll_interval = traverse_obj(code_response, ('interval', {int}), default=5)
|
||||
|
||||
for retry in self.RetryManager():
|
||||
while True:
|
||||
try:
|
||||
token_response = self._download_json(
|
||||
self._OAUTH_TOKEN_ENDPOINT,
|
||||
video_id=self._OAUTH_DISPLAY_ID,
|
||||
note=False,
|
||||
errnote='Failed to request access token',
|
||||
data=json.dumps({
|
||||
'client_id': self._OAUTH_CLIENT_ID,
|
||||
'client_secret': self._OAUTH_CLIENT_SECRET,
|
||||
'device_code': code_response['device_code'],
|
||||
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code',
|
||||
}).encode(),
|
||||
headers={'Content-Type': 'application/json'})
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, TransportError):
|
||||
retry.error = e
|
||||
break
|
||||
elif isinstance(e.cause, HTTPError):
|
||||
error = self._read_oauth_error_response(e.cause.response)
|
||||
if not error:
|
||||
retry.error = e
|
||||
break
|
||||
|
||||
if error == 'authorization_pending':
|
||||
time.sleep(poll_interval)
|
||||
continue
|
||||
elif error == 'expired_token':
|
||||
raise ExtractorError(
|
||||
'Authorization timed out', expected=True, video_id=self._OAUTH_DISPLAY_ID)
|
||||
elif error == 'access_denied':
|
||||
raise ExtractorError(
|
||||
'You denied access to an account', expected=True, video_id=self._OAUTH_DISPLAY_ID)
|
||||
elif error == 'slow_down':
|
||||
# RFC8628 § 3.5: add 5 seconds to the poll interval
|
||||
poll_interval += 5
|
||||
time.sleep(poll_interval)
|
||||
continue
|
||||
else:
|
||||
raise ExtractorError(
|
||||
f'Authorization server returned an error when fetching access token: {error}',
|
||||
video_id=self._OAUTH_DISPLAY_ID)
|
||||
raise
|
||||
|
||||
return token_response
|
||||
|
||||
def _update_oauth(self):
|
||||
token = YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE.get(self._OAUTH_PROFILE)
|
||||
if token is None or token['expiry'] > time.time():
|
||||
return
|
||||
|
||||
self._set_oauth_info(self._refresh_token(token['refresh_token']))
|
||||
|
||||
@property
|
||||
def _youtube_login_hint(self):
|
||||
return ('Use --username=oauth[+PROFILE] --password="" to log in using oauth, '
|
||||
f'or else u{self._login_hint(method="cookies")[1:]}. '
|
||||
'See https://github.com/yt-dlp/yt-dlp/wiki/Extractors#logging-in-with-oauth for more on how to use oauth. '
|
||||
'See https://github.com/yt-dlp/yt-dlp/wiki/Extractors#exporting-youtube-cookies for help with cookies')
|
||||
|
||||
def _check_login_required(self):
|
||||
if self._LOGIN_REQUIRED and not self._cookies_passed:
|
||||
self.raise_login_required('Login details are needed to download this content', method='cookies')
|
||||
if self._LOGIN_REQUIRED and not self.is_authenticated:
|
||||
self.raise_login_required(
|
||||
f'Login details are needed to download this content. {self._youtube_login_hint}', method=None)
|
||||
|
||||
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*='
|
||||
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*='
|
||||
@@ -685,17 +881,6 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
if session_index is not None:
|
||||
return session_index
|
||||
|
||||
# Deprecated?
|
||||
def _extract_identity_token(self, ytcfg=None, webpage=None):
|
||||
if ytcfg:
|
||||
token = try_get(ytcfg, lambda x: x['ID_TOKEN'], str)
|
||||
if token:
|
||||
return token
|
||||
if webpage:
|
||||
return self._search_regex(
|
||||
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
|
||||
'identity token', default=None, fatal=False)
|
||||
|
||||
def _data_sync_id_to_delegated_session_id(self, data_sync_id):
|
||||
if not data_sync_id:
|
||||
return
|
||||
@@ -742,7 +927,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
|
||||
@functools.cached_property
|
||||
def is_authenticated(self):
|
||||
return bool(self._generate_sapisidhash_header())
|
||||
return self._OAUTH_PROFILE or bool(self._generate_sapisidhash_header())
|
||||
|
||||
def extract_ytcfg(self, video_id, webpage):
|
||||
if not webpage:
|
||||
@@ -752,21 +937,21 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
|
||||
default='{}'), video_id, fatal=False) or {}
|
||||
|
||||
def generate_api_headers(
|
||||
self, *, ytcfg=None, account_syncid=None, session_index=None,
|
||||
visitor_data=None, identity_token=None, api_hostname=None, default_client='web'):
|
||||
def _generate_oauth_headers(self):
|
||||
self._update_oauth()
|
||||
oauth_token = YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE.get(self._OAUTH_PROFILE)
|
||||
if not oauth_token:
|
||||
return {}
|
||||
|
||||
origin = 'https://' + (self._select_api_hostname(api_hostname, default_client))
|
||||
headers = {
|
||||
'X-YouTube-Client-Name': str(
|
||||
self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)),
|
||||
'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client),
|
||||
'Origin': origin,
|
||||
'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg),
|
||||
'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg),
|
||||
'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg),
|
||||
'User-Agent': self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT']['client']['userAgent'], default_client=default_client),
|
||||
return {
|
||||
'Authorization': f'{oauth_token["token_type"]} {oauth_token["access_token"]}',
|
||||
}
|
||||
|
||||
def _generate_cookie_auth_headers(self, *, ytcfg=None, account_syncid=None, session_index=None, origin=None, **kwargs):
|
||||
headers = {}
|
||||
account_syncid = account_syncid or self._extract_account_syncid(ytcfg)
|
||||
if account_syncid:
|
||||
headers['X-Goog-PageId'] = account_syncid
|
||||
if session_index is None:
|
||||
session_index = self._extract_session_index(ytcfg)
|
||||
if account_syncid or session_index is not None:
|
||||
@@ -776,8 +961,29 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
if auth is not None:
|
||||
headers['Authorization'] = auth
|
||||
headers['X-Origin'] = origin
|
||||
|
||||
return headers
|
||||
|
||||
def generate_api_headers(
|
||||
self, *, ytcfg=None, account_syncid=None, session_index=None,
|
||||
visitor_data=None, api_hostname=None, default_client='web', **kwargs):
|
||||
|
||||
origin = 'https://' + (self._select_api_hostname(api_hostname, default_client))
|
||||
headers = {
|
||||
'X-YouTube-Client-Name': str(
|
||||
self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)),
|
||||
'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client),
|
||||
'Origin': origin,
|
||||
'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg),
|
||||
'User-Agent': self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT']['client']['userAgent'], default_client=default_client),
|
||||
**self._generate_oauth_headers(),
|
||||
**self._generate_cookie_auth_headers(ytcfg=ytcfg, account_syncid=account_syncid, session_index=session_index, origin=origin),
|
||||
}
|
||||
return filter_dict(headers)
|
||||
|
||||
def _generate_webpage_headers(self):
|
||||
return self._generate_oauth_headers()
|
||||
|
||||
def _download_ytcfg(self, client, video_id):
|
||||
url = {
|
||||
'web': 'https://www.youtube.com',
|
||||
@@ -787,7 +993,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
if not url:
|
||||
return {}
|
||||
webpage = self._download_webpage(
|
||||
url, video_id, fatal=False, note=f'Downloading {client.replace("_", " ").strip()} client config')
|
||||
url, video_id, fatal=False, note=f'Downloading {client.replace("_", " ").strip()} client config',
|
||||
headers=self._generate_webpage_headers())
|
||||
return self.extract_ytcfg(video_id, webpage) or {}
|
||||
|
||||
@staticmethod
|
||||
@@ -1525,6 +1732,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
'heatmap': 'count:100',
|
||||
'timestamp': 1401991663,
|
||||
},
|
||||
'skip': 'Age-restricted; requires authentication',
|
||||
},
|
||||
{
|
||||
'note': 'Age-gate video with embed allowed in public site',
|
||||
@@ -1555,6 +1763,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
'comment_count': int,
|
||||
'channel_is_verified': True,
|
||||
},
|
||||
'skip': 'Age-restricted; requires authentication',
|
||||
},
|
||||
{
|
||||
'note': 'Age-gate video embedable only with clientScreen=EMBED',
|
||||
@@ -1585,6 +1794,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
'uploader_id': '@ProjektMelody',
|
||||
'timestamp': 1577508724,
|
||||
},
|
||||
'skip': 'Age-restricted; requires authentication',
|
||||
},
|
||||
{
|
||||
'note': 'Non-Agegated non-embeddable video',
|
||||
@@ -2356,6 +2566,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
'channel_is_verified': True,
|
||||
'timestamp': 1405513526,
|
||||
},
|
||||
'skip': 'Age-restricted; requires authentication',
|
||||
},
|
||||
{
|
||||
# restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
|
||||
@@ -2726,6 +2937,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
'timestamp': 1577508724,
|
||||
},
|
||||
'params': {'extractor_args': {'youtube': {'player_client': ['tv_embedded']}}, 'format': '251-drc'},
|
||||
'skip': 'Age-restricted; requires authentication',
|
||||
},
|
||||
{
|
||||
'url': 'https://www.youtube.com/live/qVv6vCqciTM',
|
||||
@@ -3047,7 +3259,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
code = self._download_webpage(
|
||||
player_url, video_id, fatal=fatal,
|
||||
note='Downloading player ' + player_id,
|
||||
errnote=f'Download of {player_url} failed')
|
||||
errnote=f'Download of {player_url} failed',
|
||||
headers=self._generate_webpage_headers())
|
||||
if code:
|
||||
self._code_cache[player_id] = code
|
||||
return self._code_cache.get(player_id)
|
||||
@@ -3330,7 +3543,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
|
||||
self._download_webpage(
|
||||
url, video_id, f'Marking {label}watched',
|
||||
'Unable to mark watched', fatal=False)
|
||||
'Unable to mark watched', fatal=False,
|
||||
headers=self._generate_webpage_headers())
|
||||
|
||||
@classmethod
|
||||
def _extract_from_webpage(cls, url, webpage):
|
||||
@@ -3953,26 +4167,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
else:
|
||||
prs.append(pr)
|
||||
|
||||
# tv_embedded can work around age-gate and age-verification IF the video is embeddable
|
||||
if self._is_agegated(pr) and variant != 'tv_embedded':
|
||||
append_client(f'tv_embedded.{base_client}')
|
||||
|
||||
# Unauthenticated users will only get tv_embedded client formats if age-gated
|
||||
if self._is_agegated(pr) and not self.is_authenticated:
|
||||
self.to_screen(
|
||||
f'{video_id}: This video is age-restricted; some formats may be missing '
|
||||
f'without authentication. {self._login_hint()}', only_once=True)
|
||||
|
||||
# EU countries require age-verification for accounts to access age-restricted videos
|
||||
# If account is not age-verified, _is_agegated() will be truthy for non-embedded clients
|
||||
# If embedding is disabled for the video, _is_unplayable() will be truthy for tv_embedded
|
||||
embedding_is_disabled = variant == 'tv_embedded' and self._is_unplayable(pr)
|
||||
if self.is_authenticated and (self._is_agegated(pr) or embedding_is_disabled):
|
||||
if self.is_authenticated and self._is_agegated(pr):
|
||||
self.to_screen(
|
||||
f'{video_id}: This video is age-restricted and YouTube is requiring '
|
||||
'account age-verification; some formats may be missing', only_once=True)
|
||||
# web_creator and mediaconnect can work around the age-verification requirement
|
||||
# _producer, _testsuite, & _vr variants can also work around age-verification
|
||||
# _testsuite & _vr variants can also work around age-verification
|
||||
# tv_embedded may(?) still work around age-verification if the video is embeddable
|
||||
append_client('web_creator', 'mediaconnect')
|
||||
|
||||
prs.extend(deprioritized_prs)
|
||||
@@ -4322,7 +4525,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
if pp:
|
||||
query['pp'] = pp
|
||||
webpage = self._download_webpage(
|
||||
webpage_url, video_id, fatal=False, query=query)
|
||||
webpage_url, video_id, fatal=False, query=query, headers=self._generate_webpage_headers())
|
||||
|
||||
master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg()
|
||||
|
||||
@@ -4701,11 +4904,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
headers=self.generate_api_headers(ytcfg=master_ytcfg),
|
||||
note='Downloading initial data API JSON')
|
||||
|
||||
COMMENTS_SECTION_IDS = ('comment-item-section', 'engagement-panel-comments-section')
|
||||
info['comment_count'] = traverse_obj(initial_data, (
|
||||
'contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents', ..., 'itemSectionRenderer',
|
||||
'contents', ..., 'commentsEntryPointHeaderRenderer', 'commentCount',
|
||||
), (
|
||||
'engagementPanels', lambda _, v: v['engagementPanelSectionListRenderer']['panelIdentifier'] == 'comment-item-section',
|
||||
'engagementPanels', lambda _, v: v['engagementPanelSectionListRenderer']['panelIdentifier'] in COMMENTS_SECTION_IDS,
|
||||
'engagementPanelSectionListRenderer', 'header', 'engagementPanelTitleHeaderRenderer', 'contextualInfo',
|
||||
), expected_type=self._get_count, get_all=False)
|
||||
|
||||
@@ -5609,7 +5813,7 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
|
||||
webpage, data = None, None
|
||||
for retry in self.RetryManager(fatal=fatal):
|
||||
try:
|
||||
webpage = self._download_webpage(url, item_id, note='Downloading webpage')
|
||||
webpage = self._download_webpage(url, item_id, note='Downloading webpage', headers=self._generate_webpage_headers())
|
||||
data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {}
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, network_exceptions):
|
||||
@@ -6983,7 +7187,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
|
||||
raise ExtractorError('Unable to recognize tab page')
|
||||
|
||||
|
||||
class YoutubePlaylistIE(InfoExtractor):
|
||||
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
||||
IE_DESC = 'YouTube playlists'
|
||||
_VALID_URL = r'''(?x)(?:
|
||||
(?:https?://)?
|
||||
@@ -7097,7 +7301,7 @@ class YoutubePlaylistIE(InfoExtractor):
|
||||
return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
|
||||
|
||||
|
||||
class YoutubeYtBeIE(InfoExtractor):
|
||||
class YoutubeYtBeIE(YoutubeBaseInfoExtractor):
|
||||
IE_DESC = 'youtu.be'
|
||||
_VALID_URL = rf'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{{11}})/*?.*?\blist=(?P<playlist_id>{YoutubeBaseInfoExtractor._PLAYLIST_ID_RE})'
|
||||
_TESTS = [{
|
||||
@@ -7148,7 +7352,7 @@ class YoutubeYtBeIE(InfoExtractor):
|
||||
}), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
|
||||
|
||||
|
||||
class YoutubeLivestreamEmbedIE(InfoExtractor):
|
||||
class YoutubeLivestreamEmbedIE(YoutubeBaseInfoExtractor):
|
||||
IE_DESC = 'YouTube livestream embeds'
|
||||
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/embed/live_stream/?\?(?:[^#]+&)?channel=(?P<id>[^&#]+)'
|
||||
_TESTS = [{
|
||||
@@ -7163,7 +7367,7 @@ class YoutubeLivestreamEmbedIE(InfoExtractor):
|
||||
ie=YoutubeTabIE.ie_key(), video_id=channel_id)
|
||||
|
||||
|
||||
class YoutubeYtUserIE(InfoExtractor):
|
||||
class YoutubeYtUserIE(YoutubeBaseInfoExtractor):
|
||||
IE_DESC = 'YouTube user videos; "ytuser:" prefix'
|
||||
IE_NAME = 'youtube:user'
|
||||
_VALID_URL = r'ytuser:(?P<id>.+)'
|
||||
@@ -7450,7 +7654,7 @@ class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor):
|
||||
return self.playlist_result(self._search_results(query, params, default_client='web_music'), title, title)
|
||||
|
||||
|
||||
class YoutubeFeedsInfoExtractor(InfoExtractor):
|
||||
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
|
||||
"""
|
||||
Base class for feed extractors
|
||||
Subclasses must re-define the _FEED_NAME property.
|
||||
@@ -7458,9 +7662,6 @@ class YoutubeFeedsInfoExtractor(InfoExtractor):
|
||||
_LOGIN_REQUIRED = True
|
||||
_FEED_NAME = 'feeds'
|
||||
|
||||
def _real_initialize(self):
|
||||
YoutubeBaseInfoExtractor._check_login_required(self)
|
||||
|
||||
@classproperty
|
||||
def IE_NAME(cls):
|
||||
return f'youtube:{cls._FEED_NAME}'
|
||||
@@ -7470,7 +7671,7 @@ class YoutubeFeedsInfoExtractor(InfoExtractor):
|
||||
f'https://www.youtube.com/feed/{self._FEED_NAME}', ie=YoutubeTabIE.ie_key())
|
||||
|
||||
|
||||
class YoutubeWatchLaterIE(InfoExtractor):
|
||||
class YoutubeWatchLaterIE(YoutubeBaseInfoExtractor):
|
||||
IE_NAME = 'youtube:watchlater'
|
||||
IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
|
||||
_VALID_URL = r':ytwatchlater'
|
||||
@@ -7524,7 +7725,7 @@ class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
|
||||
}]
|
||||
|
||||
|
||||
class YoutubeShortsAudioPivotIE(InfoExtractor):
|
||||
class YoutubeShortsAudioPivotIE(YoutubeBaseInfoExtractor):
|
||||
IE_DESC = 'YouTube Shorts audio pivot (Shorts using audio of a given video)'
|
||||
IE_NAME = 'youtube:shorts:pivot:audio'
|
||||
_VALID_URL = r'https?://(?:www\.)?youtube\.com/source/(?P<id>[\w-]{11})/shorts'
|
||||
@@ -7548,7 +7749,7 @@ class YoutubeShortsAudioPivotIE(InfoExtractor):
|
||||
ie=YoutubeTabIE)
|
||||
|
||||
|
||||
class YoutubeTruncatedURLIE(InfoExtractor):
|
||||
class YoutubeTruncatedURLIE(YoutubeBaseInfoExtractor):
|
||||
IE_NAME = 'youtube:truncated_url'
|
||||
IE_DESC = False # Do not list
|
||||
_VALID_URL = r'''(?x)
|
||||
@@ -7591,9 +7792,9 @@ class YoutubeTruncatedURLIE(InfoExtractor):
|
||||
raise ExtractorError(
|
||||
'Did you forget to quote the URL? Remember that & is a meta '
|
||||
'character in most shells, so you want to put the URL in quotes, '
|
||||
'like youtube-dl '
|
||||
'like yt-dlp '
|
||||
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
|
||||
' or simply youtube-dl BaW_jenozKc .',
|
||||
' or simply yt-dlp BaW_jenozKc .',
|
||||
expected=True)
|
||||
|
||||
|
||||
@@ -7707,7 +7908,7 @@ class YoutubeConsentRedirectIE(YoutubeBaseInfoExtractor):
|
||||
return self.url_result(redirect_url)
|
||||
|
||||
|
||||
class YoutubeTruncatedIDIE(InfoExtractor):
|
||||
class YoutubeTruncatedIDIE(YoutubeBaseInfoExtractor):
|
||||
IE_NAME = 'youtube:truncated_id'
|
||||
IE_DESC = False # Do not list
|
||||
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
|
||||
|
||||
@@ -408,6 +408,14 @@ def create_parser():
|
||||
help=(
|
||||
'Location of the main configuration file; either the path to the config or its containing directory '
|
||||
'("-" for stdin). Can be used multiple times and inside other configuration files'))
|
||||
general.add_option(
|
||||
'--plugin-dirs',
|
||||
dest='plugin_dirs', metavar='PATH', action='append',
|
||||
help=(
|
||||
'Path to an additional directory to search for plugins. '
|
||||
'This option can be used multiple times to add multiple directories. '
|
||||
'Note that this currently only works for extractor plugins; '
|
||||
'postprocessor plugins can only be loaded from the default plugin directories'))
|
||||
general.add_option(
|
||||
'--flat-playlist',
|
||||
action='store_const', dest='extract_flat', const='in_playlist', default=False,
|
||||
@@ -623,13 +631,13 @@ def create_parser():
|
||||
metavar='DATE', dest='datebefore', default=None,
|
||||
help=(
|
||||
'Download only videos uploaded on or before this date. '
|
||||
'The date formats accepted is the same as --date'))
|
||||
'The date formats accepted are the same as --date'))
|
||||
selection.add_option(
|
||||
'--dateafter',
|
||||
metavar='DATE', dest='dateafter', default=None,
|
||||
help=(
|
||||
'Download only videos uploaded on or after this date. '
|
||||
'The date formats accepted is the same as --date'))
|
||||
'The date formats accepted are the same as --date'))
|
||||
selection.add_option(
|
||||
'--min-views',
|
||||
metavar='COUNT', dest='min_views', default=None, type=int,
|
||||
@@ -825,7 +833,7 @@ def create_parser():
|
||||
'--prefer-free-formats',
|
||||
action='store_true', dest='prefer_free_formats', default=False,
|
||||
help=(
|
||||
'Prefer video formats with free containers over non-free ones of same quality. '
|
||||
'Prefer video formats with free containers over non-free ones of the same quality. '
|
||||
'Use with "-S ext" to strictly prefer free containers irrespective of quality'))
|
||||
video_format.add_option(
|
||||
'--no-prefer-free-formats',
|
||||
@@ -899,13 +907,14 @@ def create_parser():
|
||||
subtitles.add_option(
|
||||
'--sub-format',
|
||||
action='store', dest='subtitlesformat', metavar='FORMAT', default='best',
|
||||
help='Subtitle format; accepts formats preference, e.g. "srt" or "ass/srt/best"')
|
||||
help='Subtitle format; accepts formats preference separated by "/", e.g. "srt" or "ass/srt/best"')
|
||||
subtitles.add_option(
|
||||
'--sub-langs', '--srt-langs',
|
||||
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
|
||||
default=[], callback=_list_from_options_callback,
|
||||
help=(
|
||||
'Languages of the subtitles to download (can be regex) or "all" separated by commas, e.g. --sub-langs "en.*,ja". '
|
||||
'Languages of the subtitles to download (can be regex) or "all" separated by commas, e.g. --sub-langs "en.*,ja" '
|
||||
'(where "en.*" is a regex pattern that matches "en" followed by 0 or more of any character). '
|
||||
'You can prefix the language code with a "-" to exclude it from the requested languages, e.g. --sub-langs all,-live_chat. '
|
||||
'Use --list-subs for a list of available language tags'))
|
||||
|
||||
@@ -1174,7 +1183,7 @@ def create_parser():
|
||||
'--print-to-file',
|
||||
metavar='[WHEN:]TEMPLATE FILE', dest='print_to_file', nargs=2, **when_prefix('video'),
|
||||
help=(
|
||||
'Append given template to the file. The values of WHEN and TEMPLATE are same as that of --print. '
|
||||
'Append given template to the file. The values of WHEN and TEMPLATE are the same as that of --print. '
|
||||
'FILE uses the same syntax as the output template. This option can be used multiple times'))
|
||||
verbosity.add_option(
|
||||
'-g', '--get-url',
|
||||
@@ -1218,7 +1227,7 @@ def create_parser():
|
||||
'-J', '--dump-single-json',
|
||||
action='store_true', dest='dump_single_json', default=False,
|
||||
help=(
|
||||
'Quiet, but print JSON information for each url or infojson passed. Simulate unless --no-simulate is used. '
|
||||
'Quiet, but print JSON information for each URL or infojson passed. Simulate unless --no-simulate is used. '
|
||||
'If the URL refers to a playlist, the whole playlist information is dumped in a single line'))
|
||||
verbosity.add_option(
|
||||
'--print-json',
|
||||
@@ -1562,7 +1571,7 @@ def create_parser():
|
||||
help=(
|
||||
'Remux the video into another container if necessary '
|
||||
f'(currently supported: {", ".join(FFmpegVideoRemuxerPP.SUPPORTED_EXTS)}). '
|
||||
'If target container does not support the video/audio codec, remuxing will fail. You can specify multiple rules; '
|
||||
'If the target container does not support the video/audio codec, remuxing will fail. You can specify multiple rules; '
|
||||
'e.g. "aac>m4a/mov>mp4/mkv" will remux aac to m4a, mov to mp4 and anything else to mkv'))
|
||||
postproc.add_option(
|
||||
'--recode-video',
|
||||
@@ -1668,7 +1677,7 @@ def create_parser():
|
||||
postproc.add_option(
|
||||
'--xattrs', '--xattr',
|
||||
action='store_true', dest='xattrs', default=False,
|
||||
help='Write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
||||
help='Write metadata to the video file\'s xattrs (using Dublin Core and XDG standards)')
|
||||
postproc.add_option(
|
||||
'--concat-playlist',
|
||||
metavar='POLICY', dest='concat_playlist', default='multi_video',
|
||||
@@ -1676,7 +1685,7 @@ def create_parser():
|
||||
help=(
|
||||
'Concatenate videos in a playlist. One of "never", "always", or '
|
||||
'"multi_video" (default; only when the videos form a single show). '
|
||||
'All the video files must have same codecs and number of streams to be concatable. '
|
||||
'All the video files must have the same codecs and number of streams to be concatenable. '
|
||||
'The "pl_video:" prefix can be used with "--paths" and "--output" to '
|
||||
'set the output filename for the concatenated files. See "OUTPUT TEMPLATE" for details'))
|
||||
postproc.add_option(
|
||||
@@ -1686,8 +1695,8 @@ def create_parser():
|
||||
help=(
|
||||
'Automatically correct known faults of the file. '
|
||||
'One of never (do nothing), warn (only emit a warning), '
|
||||
'detect_or_warn (the default; fix file if we can, warn otherwise), '
|
||||
'force (try fixing even if file already exists)'))
|
||||
'detect_or_warn (the default; fix the file if we can, warn otherwise), '
|
||||
'force (try fixing even if the file already exists)'))
|
||||
postproc.add_option(
|
||||
'--prefer-avconv', '--no-prefer-ffmpeg',
|
||||
action='store_false', dest='prefer_ffmpeg',
|
||||
@@ -1706,7 +1715,7 @@ def create_parser():
|
||||
help=(
|
||||
'Execute a command, optionally prefixed with when to execute it, separated by a ":". '
|
||||
'Supported values of "WHEN" are the same as that of --use-postprocessor (default: after_move). '
|
||||
'Same syntax as the output template can be used to pass any field as arguments to the command. '
|
||||
'The same syntax as the output template can be used to pass any field as arguments to the command. '
|
||||
'If no fields are passed, %(filepath,_filename|)q is appended to the end of the command. '
|
||||
'This option can be used multiple times'))
|
||||
postproc.add_option(
|
||||
@@ -1777,14 +1786,14 @@ def create_parser():
|
||||
'delim': None,
|
||||
'process': lambda val: dict(_postprocessor_opts_parser(*val.split(':', 1))),
|
||||
}, help=(
|
||||
'The (case sensitive) name of plugin postprocessors to be enabled, '
|
||||
'The (case-sensitive) name of plugin postprocessors to be enabled, '
|
||||
'and (optionally) arguments to be passed to it, separated by a colon ":". '
|
||||
'ARGS are a semicolon ";" delimited list of NAME=VALUE. '
|
||||
'The "when" argument determines when the postprocessor is invoked. '
|
||||
'It can be one of "pre_process" (after video extraction), "after_filter" (after video passes filter), '
|
||||
'"video" (after --format; before --print/--output), "before_dl" (before each video download), '
|
||||
'"post_process" (after each video download; default), '
|
||||
'"after_move" (after moving video file to its final locations), '
|
||||
'"after_move" (after moving the video file to its final location), '
|
||||
'"after_video" (after downloading and processing all formats of a video), '
|
||||
'or "playlist" (at end of playlist). '
|
||||
'This option can be used multiple times to add different postprocessors'))
|
||||
@@ -1801,7 +1810,7 @@ def create_parser():
|
||||
}, help=(
|
||||
'SponsorBlock categories to create chapters for, separated by commas. '
|
||||
f'Available categories are {", ".join(SponsorBlockPP.CATEGORIES.keys())}, all and default (=all). '
|
||||
'You can prefix the category with a "-" to exclude it. See [1] for description of the categories. '
|
||||
'You can prefix the category with a "-" to exclude it. See [1] for descriptions of the categories. '
|
||||
'E.g. --sponsorblock-mark all,-preview [1] https://wiki.sponsor.ajay.app/w/Segment_Categories'))
|
||||
sponsorblock.add_option(
|
||||
'--sponsorblock-remove', metavar='CATS',
|
||||
@@ -1887,7 +1896,7 @@ def create_parser():
|
||||
extractor.add_option(
|
||||
'--no-hls-split-discontinuity',
|
||||
dest='hls_split_discontinuity', action='store_false',
|
||||
help='Do not split HLS playlists to different formats at discontinuities such as ad breaks (default)')
|
||||
help='Do not split HLS playlists into different formats at discontinuities such as ad breaks (default)')
|
||||
_extractor_arg_parser = lambda key, vals='': (key.strip().lower().replace('-', '_'), [
|
||||
val.replace(r'\,', ',').strip() for val in re.split(r'(?<!\\),', vals)])
|
||||
extractor.add_option(
|
||||
|
||||
@@ -15,6 +15,7 @@ from zipfile import ZipFile
|
||||
|
||||
from .compat import functools # isort: split
|
||||
from .utils import (
|
||||
Config,
|
||||
get_executable_path,
|
||||
get_system_config_dirs,
|
||||
get_user_config_dirs,
|
||||
@@ -84,6 +85,12 @@ class PluginFinder(importlib.abc.MetaPathFinder):
|
||||
with contextlib.suppress(ValueError): # Added when running __main__.py directly
|
||||
candidate_locations.remove(Path(__file__).parent)
|
||||
|
||||
# TODO(coletdjnz): remove when plugin globals system is implemented
|
||||
if Config._plugin_dirs:
|
||||
candidate_locations.extend(_get_package_paths(
|
||||
*Config._plugin_dirs,
|
||||
containing_folder=''))
|
||||
|
||||
parts = Path(*fullname.split('.'))
|
||||
for path in orderedSet(candidate_locations, lazy=True):
|
||||
candidate = path / parts
|
||||
|
||||
@@ -103,7 +103,6 @@ def current_git_head():
|
||||
|
||||
_FILE_SUFFIXES = {
|
||||
'zip': '',
|
||||
'py2exe': '_min.exe',
|
||||
'win_exe': '.exe',
|
||||
'win_x86_exe': '_x86.exe',
|
||||
'darwin_exe': '_macos',
|
||||
@@ -117,6 +116,7 @@ _NON_UPDATEABLE_REASONS = {
|
||||
**{variant: None for variant in _FILE_SUFFIXES}, # Updatable
|
||||
**{variant: f'Auto-update is not supported for unpackaged {name} executable; Re-download the latest release'
|
||||
for variant, name in {'win32_dir': 'Windows', 'darwin_dir': 'MacOS', 'linux_dir': 'Linux'}.items()},
|
||||
'py2exe': 'py2exe is no longer supported by yt-dlp; This executable cannot be updated',
|
||||
'source': 'You cannot update when running from source code; Use git to pull the latest changes',
|
||||
'unknown': 'You installed yt-dlp from a manual build or with a package manager; Use that to update',
|
||||
'other': 'You are using an unofficial build of yt-dlp; Build the executable again',
|
||||
@@ -152,22 +152,10 @@ def _get_system_deprecation():
|
||||
variant = detect_variant()
|
||||
|
||||
# Temporary until Windows builds use 3.9, which will drop support for Win7 and 2008ServerR2
|
||||
if variant in ('win_exe', 'win_x86_exe', 'py2exe'):
|
||||
if variant in ('win_exe', 'win_x86_exe'):
|
||||
platform_name = platform.platform()
|
||||
if any(platform_name.startswith(f'Windows-{name}') for name in ('7', '2008ServerR2')):
|
||||
return EXE_MSG_TMPL.format('Windows 7/Server 2008 R2', 'issues/10086', STOP_MSG)
|
||||
elif variant == 'py2exe':
|
||||
return EXE_MSG_TMPL.format(
|
||||
'py2exe builds (yt-dlp_min.exe)', 'issues/10087',
|
||||
'In a future update you will be migrated to the PyInstaller-bundled executable. '
|
||||
'This will be done automatically; no action is required on your part')
|
||||
return None
|
||||
|
||||
# Temporary until aarch64/armv7l build flow is bumped to Ubuntu 20.04 and Python 3.9
|
||||
elif variant in ('linux_aarch64_exe', 'linux_armv7l_exe'):
|
||||
libc_ver = version_tuple(os.confstr('CS_GNU_LIBC_VERSION').partition(' ')[2])
|
||||
if libc_ver < (2, 31):
|
||||
return EXE_MSG_TMPL.format('system glibc version < 2.31', 'pull/8638', STOP_MSG)
|
||||
return None
|
||||
|
||||
return f'Support for Python version {major}.{minor} has been deprecated. {PYTHON_MSG}'
|
||||
@@ -362,7 +350,8 @@ class Updater:
|
||||
continue
|
||||
|
||||
self._report_error(
|
||||
f'yt-dlp cannot be updated to {resolved_tag} since you are on an older Python version', True)
|
||||
f'yt-dlp cannot be updated to {resolved_tag} since you are on an older Python version '
|
||||
'or your operating system is not compatible with the requested build', True)
|
||||
return None
|
||||
|
||||
return resolved_tag
|
||||
@@ -525,7 +514,7 @@ class Updater:
|
||||
return os.rename(old_filename, self.filename)
|
||||
|
||||
variant = detect_variant()
|
||||
if variant.startswith('win') or variant == 'py2exe':
|
||||
if variant.startswith('win'):
|
||||
atexit.register(Popen, f'ping 127.0.0.1 -n 5 -w 1000 & del /F "{old_filename}"',
|
||||
shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
elif old_filename:
|
||||
|
||||
@@ -824,14 +824,18 @@ class Popen(subprocess.Popen):
|
||||
_startupinfo = None
|
||||
|
||||
@staticmethod
|
||||
def _fix_pyinstaller_ld_path(env):
|
||||
"""Restore LD_LIBRARY_PATH when using PyInstaller
|
||||
Ref: https://github.com/pyinstaller/pyinstaller/blob/develop/doc/runtime-information.rst#ld_library_path--libpath-considerations
|
||||
https://github.com/yt-dlp/yt-dlp/issues/4573
|
||||
"""
|
||||
def _fix_pyinstaller_issues(env):
|
||||
if not hasattr(sys, '_MEIPASS'):
|
||||
return
|
||||
|
||||
# Force spawning independent subprocesses for exes bundled with PyInstaller>=6.10
|
||||
# Ref: https://pyinstaller.org/en/v6.10.0/CHANGES.html#incompatible-changes
|
||||
# https://github.com/yt-dlp/yt-dlp/issues/11259
|
||||
env['PYINSTALLER_RESET_ENVIRONMENT'] = '1'
|
||||
|
||||
# Restore LD_LIBRARY_PATH when using PyInstaller
|
||||
# Ref: https://pyinstaller.org/en/v6.10.0/runtime-information.html#ld-library-path-libpath-considerations
|
||||
# https://github.com/yt-dlp/yt-dlp/issues/4573
|
||||
def _fix(key):
|
||||
orig = env.get(f'{key}_ORIG')
|
||||
if orig is None:
|
||||
@@ -845,7 +849,7 @@ class Popen(subprocess.Popen):
|
||||
def __init__(self, args, *remaining, env=None, text=False, shell=False, **kwargs):
|
||||
if env is None:
|
||||
env = os.environ.copy()
|
||||
self._fix_pyinstaller_ld_path(env)
|
||||
self._fix_pyinstaller_issues(env)
|
||||
|
||||
self.__text_mode = kwargs.get('encoding') or kwargs.get('errors') or text or kwargs.get('universal_newlines')
|
||||
if text is True:
|
||||
@@ -4893,6 +4897,10 @@ class Config:
|
||||
filename = None
|
||||
__initialized = False
|
||||
|
||||
# Internal only, do not use! Hack to enable --plugin-dirs
|
||||
# TODO(coletdjnz): remove when plugin globals system is implemented
|
||||
_plugin_dirs = None
|
||||
|
||||
def __init__(self, parser, label=None):
|
||||
self.parser, self.label = parser, label
|
||||
self._loaded_paths, self.configs = set(), []
|
||||
|
||||
@@ -55,7 +55,7 @@ def traverse_obj(
|
||||
The keys in the path can be one of:
|
||||
- `None`: Return the current object.
|
||||
- `set`: Requires the only item in the set to be a type or function,
|
||||
like `{type}`/`{type, type, ...}/`{func}`. If a `type`, return only
|
||||
like `{type}`/`{type, type, ...}`/`{func}`. If a `type`, return only
|
||||
values of this type. If a function, returns `func(obj)`.
|
||||
- `str`/`int`: Return `obj[key]`. For `re.Match`, return `obj.group(key)`.
|
||||
- `slice`: Branch out and return all values in `obj[key]`.
|
||||
@@ -75,7 +75,7 @@ def traverse_obj(
|
||||
|
||||
`tuple`, `list`, and `dict` all support nested paths and branches.
|
||||
|
||||
@params paths Paths which to traverse by.
|
||||
@params paths Paths by which to traverse.
|
||||
@param default Value to return if the paths do not match.
|
||||
If the last key in the path is a `dict`, it will apply to each value inside
|
||||
the dict instead, depth first. Try to avoid if using nested `dict` keys.
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Autogenerated by devscripts/update-version.py
|
||||
|
||||
__version__ = '2024.10.07'
|
||||
__version__ = '2024.10.22'
|
||||
|
||||
RELEASE_GIT_HEAD = '1a176d874e6772cd898ce507379ea388e96ee3f7'
|
||||
RELEASE_GIT_HEAD = '67adeb7bab00662ba55d473e405b301abb42fe61'
|
||||
|
||||
VARIANT = None
|
||||
|
||||
@@ -12,4 +12,4 @@ CHANNEL = 'stable'
|
||||
|
||||
ORIGIN = 'yt-dlp/yt-dlp'
|
||||
|
||||
_pkg_version = '2024.10.07'
|
||||
_pkg_version = '2024.10.22'
|
||||
|
||||
Reference in New Issue
Block a user