1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2026-02-06 22:17:17 +00:00

Merge branch 'master' of github-cb:c-basalt/yt-dlp into jsi

This commit is contained in:
c-basalt
2025-04-13 12:46:05 -04:00
105 changed files with 7822 additions and 5741 deletions

View File

@@ -336,6 +336,7 @@ from .canal1 import Canal1IE
from .canalalpha import CanalAlphaIE
from .canalc2 import Canalc2IE
from .canalplus import CanalplusIE
from .canalsurmas import CanalsurmasIE
from .caracoltv import CaracolTvPlayIE
from .cartoonnetwork import CartoonNetworkIE
from .cbc import (
@@ -495,10 +496,6 @@ from .daum import (
from .daystar import DaystarClipIE
from .dbtv import DBTVIE
from .dctp import DctpTvIE
from .deezer import (
DeezerAlbumIE,
DeezerPlaylistIE,
)
from .democracynow import DemocracynowIE
from .detik import DetikEmbedIE
from .deuxm import (
@@ -686,6 +683,7 @@ from .foxnews import (
)
from .foxsports import FoxSportsIE
from .fptplay import FptplayIE
from .francaisfacile import FrancaisFacileIE
from .franceinter import FranceInterIE
from .francetv import (
FranceTVIE,
@@ -842,6 +840,7 @@ from .icareus import IcareusIE
from .ichinanalive import (
IchinanaLiveClipIE,
IchinanaLiveIE,
IchinanaLiveVODIE,
)
from .idolplus import IdolPlusIE
from .ign import (
@@ -904,6 +903,7 @@ from .ivi import (
IviIE,
)
from .ivideon import IvideonIE
from .ivoox import IvooxIE
from .iwara import (
IwaraIE,
IwaraPlaylistIE,
@@ -961,7 +961,10 @@ from .kick import (
)
from .kicker import KickerIE
from .kickstarter import KickStarterIE
from .kika import KikaIE
from .kika import (
KikaIE,
KikaPlaylistIE,
)
from .kinja import KinjaEmbedIE
from .kinopoisk import KinoPoiskIE
from .kommunetv import KommunetvIE
@@ -1054,6 +1057,7 @@ from .livestream import (
)
from .livestreamfails import LivestreamfailsIE
from .lnk import LnkIE
from .loco import LocoIE
from .loom import (
LoomFolderIE,
LoomIE,
@@ -1061,6 +1065,7 @@ from .loom import (
from .lovehomeporn import LoveHomePornIE
from .lrt import (
LRTVODIE,
LRTRadioIE,
LRTStreamIE,
)
from .lsm import (
@@ -1493,6 +1498,10 @@ from .paramountplus import (
)
from .parler import ParlerIE
from .parlview import ParlviewIE
from .parti import (
PartiLivestreamIE,
PartiVideoIE,
)
from .patreon import (
PatreonCampaignIE,
PatreonIE,
@@ -1739,6 +1748,7 @@ from .roosterteeth import (
RoosterTeethSeriesIE,
)
from .rottentomatoes import RottenTomatoesIE
from .roya import RoyaLiveIE
from .rozhlas import (
MujRozhlasIE,
RozhlasIE,
@@ -1882,6 +1892,8 @@ from .skyit import (
SkyItVideoIE,
SkyItVideoLiveIE,
TV8ItIE,
TV8ItLiveIE,
TV8ItPlaylistIE,
)
from .skylinewebcams import SkylineWebcamsIE
from .skynewsarabia import (
@@ -1985,6 +1997,7 @@ from .storyfire import (
StoryFireSeriesIE,
StoryFireUserIE,
)
from .streaks import StreaksIE
from .streamable import StreamableIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
@@ -2224,6 +2237,7 @@ from .tvplay import (
TVPlayIE,
)
from .tvplayer import TVPlayerIE
from .tvw import TvwIE
from .tweakers import TweakersIE
from .twentymin import TwentyMinutenIE
from .twentythreevideo import TwentyThreeVideoIE
@@ -2347,10 +2361,6 @@ from .viewlift import (
ViewLiftIE,
)
from .viidea import ViideaIE
from .viki import (
VikiChannelIE,
VikiIE,
)
from .vimeo import (
VHXEmbedIE,
VimeoAlbumIE,
@@ -2395,10 +2405,15 @@ from .voxmedia import (
VoxMediaIE,
VoxMediaVolumeIE,
)
from .vrsquare import (
VrSquareChannelIE,
VrSquareIE,
VrSquareSearchIE,
VrSquareSectionIE,
)
from .vrt import (
VRTIE,
DagelijkseKostIE,
KetnetIE,
Radio1BeIE,
VrtNUIE,
)

View File

@@ -1,3 +1,4 @@
import datetime as dt
import functools
from .common import InfoExtractor
@@ -10,7 +11,7 @@ from ..utils import (
filter_dict,
int_or_none,
orderedSet,
unified_timestamp,
parse_iso8601,
url_or_none,
urlencode_postdata,
urljoin,
@@ -87,9 +88,9 @@ class AfreecaTVIE(AfreecaTVBaseIE):
'uploader_id': 'rlantnghks',
'uploader': '페이즈으',
'duration': 10840,
'thumbnail': r're:https?://videoimg\.sooplive\.co/.kr/.+',
'thumbnail': r're:https?://videoimg\.(?:sooplive\.co\.kr|afreecatv\.com)/.+',
'upload_date': '20230108',
'timestamp': 1673218805,
'timestamp': 1673186405,
'title': '젠지 페이즈',
},
'params': {
@@ -102,7 +103,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
'id': '20170411_BE689A0E_190960999_1_2_h',
'ext': 'mp4',
'title': '혼자사는여자집',
'thumbnail': r're:https?://(?:video|st)img\.sooplive\.co\.kr/.+',
'thumbnail': r're:https?://(?:video|st)img\.(?:sooplive\.co\.kr|afreecatv\.com)/.+',
'uploader': '♥이슬이',
'uploader_id': 'dasl8121',
'upload_date': '20170411',
@@ -119,7 +120,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
'id': '20180327_27901457_202289533_1',
'ext': 'mp4',
'title': '[생]빨개요♥ (part 1)',
'thumbnail': r're:https?://(?:video|st)img\.sooplive\.co\.kr/.+',
'thumbnail': r're:https?://(?:video|st)img\.(?:sooplive\.co\.kr|afreecatv\.com)/.+',
'uploader': '[SA]서아',
'uploader_id': 'bjdyrksu',
'upload_date': '20180327',
@@ -187,7 +188,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
'formats': formats,
**traverse_obj(file_element, {
'duration': ('duration', {int_or_none(scale=1000)}),
'timestamp': ('file_start', {unified_timestamp}),
'timestamp': ('file_start', {parse_iso8601(delimiter=' ', timezone=dt.timedelta(hours=9))}),
}),
})
@@ -370,7 +371,7 @@ class AfreecaTVLiveIE(AfreecaTVBaseIE):
'title': channel_info.get('TITLE') or station_info.get('station_title'),
'uploader': channel_info.get('BJNICK') or station_info.get('station_name'),
'uploader_id': broadcaster_id,
'timestamp': unified_timestamp(station_info.get('broad_start')),
'timestamp': parse_iso8601(station_info.get('broad_start'), delimiter=' ', timezone=dt.timedelta(hours=9)),
'formats': formats,
'is_live': True,
'http_headers': {'Referer': url},

View File

@@ -146,7 +146,7 @@ class TokFMPodcastIE(InfoExtractor):
'url': 'https://audycje.tokfm.pl/podcast/91275,-Systemowy-rasizm-Czy-zamieszki-w-USA-po-morderstwie-w-Minneapolis-doprowadza-do-zmian-w-sluzbach-panstwowych',
'info_dict': {
'id': '91275',
'ext': 'aac',
'ext': 'mp3',
'title': 'md5:a9b15488009065556900169fb8061cce',
'episode': 'md5:a9b15488009065556900169fb8061cce',
'series': 'Analizy',
@@ -164,23 +164,20 @@ class TokFMPodcastIE(InfoExtractor):
raise ExtractorError('No such podcast', expected=True)
metadata = metadata[0]
formats = []
for ext in ('aac', 'mp3'):
url_data = self._download_json(
f'https://api.podcast.radioagora.pl/api4/getSongUrl?podcast_id={media_id}&device_id={uuid.uuid4()}&ppre=false&audio={ext}',
media_id, f'Downloading podcast {ext} URL')
# prevents inserting the mp3 (default) multiple times
if 'link_ssl' in url_data and f'.{ext}' in url_data['link_ssl']:
formats.append({
'url': url_data['link_ssl'],
'ext': ext,
'vcodec': 'none',
'acodec': ext,
})
mp3_url = self._download_json(
'https://api.podcast.radioagora.pl/api4/getSongUrl',
media_id, 'Downloading podcast mp3 URL', query={
'podcast_id': media_id,
'device_id': str(uuid.uuid4()),
'ppre': 'false',
'audio': 'mp3',
})['link_ssl']
return {
'id': media_id,
'formats': formats,
'url': mp3_url,
'vcodec': 'none',
'ext': 'mp3',
'title': metadata.get('podcast_name'),
'series': metadata.get('series_name'),
'episode': metadata.get('podcast_name'),

View File

@@ -86,7 +86,7 @@ class BandlabBaseIE(InfoExtractor):
'webpage_url': (
'id', ({value(url)}, {format_field(template='https://www.bandlab.com/post/%s')}), filter, any),
'url': ('video', 'url', {url_or_none}),
'title': ('caption', {lambda x: x.replace('\n', ' ')}, {truncate_string(left=50)}),
'title': ('caption', {lambda x: x.replace('\n', ' ')}, {truncate_string(left=72)}),
'description': ('caption', {str}),
'thumbnail': ('video', 'picture', 'url', {url_or_none}),
'view_count': ('video', 'counters', 'plays', {int_or_none}),
@@ -120,7 +120,7 @@ class BandlabIE(BandlabBaseIE):
'duration': 54.629999999999995,
'title': 'sweet black',
'upload_date': '20231210',
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/',
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/',
'genres': ['Lofi'],
'uploader': 'ender milze',
'comment_count': int,
@@ -142,7 +142,7 @@ class BandlabIE(BandlabBaseIE):
'duration': 54.629999999999995,
'title': 'sweet black',
'upload_date': '20231210',
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/',
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/',
'genres': ['Lofi'],
'uploader': 'ender milze',
'comment_count': int,
@@ -158,7 +158,7 @@ class BandlabIE(BandlabBaseIE):
'comment_count': int,
'genres': ['Other'],
'uploader_id': 'user8353034818103753',
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/51b18363-da23-4b9b-a29c-2933a3e561ca/',
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/songs/51b18363-da23-4b9b-a29c-2933a3e561ca/',
'timestamp': 1709625771,
'track': 'PodcastMaerchen4b',
'duration': 468.14,
@@ -178,7 +178,7 @@ class BandlabIE(BandlabBaseIE):
'id': '110343fc-148b-ea11-96d2-0003ffd1fc09',
'ext': 'm4a',
'timestamp': 1588273294,
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/users/b612e533-e4f7-4542-9f50-3fcfd8dd822c/',
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/users/b612e533-e4f7-4542-9f50-3fcfd8dd822c/',
'description': 'Final Revision.',
'title': 'Replay ( Instrumental)',
'uploader': 'David R Sparks',
@@ -200,7 +200,7 @@ class BandlabIE(BandlabBaseIE):
'id': '5cdf9036-3857-ef11-991a-6045bd36e0d9',
'ext': 'mp4',
'duration': 44.705,
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/videos/67c6cef1-cef6-40d3-831e-a55bc1dcb972/',
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/videos/67c6cef1-cef6-40d3-831e-a55bc1dcb972/',
'comment_count': int,
'title': 'backing vocals',
'uploader_id': 'marliashya',
@@ -224,7 +224,7 @@ class BandlabIE(BandlabBaseIE):
'view_count': int,
'track': 'Positronic Meltdown',
'duration': 318.55,
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/87165bc3-5439-496e-b1f7-a9f13b541ff2/',
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/songs/87165bc3-5439-496e-b1f7-a9f13b541ff2/',
'description': 'Checkout my tracks at AOMX http://aomxsounds.com/',
'uploader_id': 'microfreaks',
'title': 'Positronic Meltdown',
@@ -246,7 +246,7 @@ class BandlabIE(BandlabBaseIE):
'comment_count': int,
'uploader': 'Sorakime',
'uploader_id': 'sorakime',
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/users/572a351a-0f3a-4c6a-ac39-1a5defdeeb1c/',
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.0/users/572a351a-0f3a-4c6a-ac39-1a5defdeeb1c/',
'timestamp': 1691162128,
'upload_date': '20230804',
'media_type': 'track',

View File

@@ -1596,16 +1596,16 @@ class BilibiliPlaylistIE(BilibiliSpaceListBaseIE):
webpage = self._download_webpage(url, list_id)
initial_state = self._search_json(r'window\.__INITIAL_STATE__\s*=', webpage, 'initial state', list_id)
if traverse_obj(initial_state, ('error', 'code', {int_or_none})) != 200:
error_code = traverse_obj(initial_state, ('error', 'trueCode', {int_or_none}))
error_message = traverse_obj(initial_state, ('error', 'message', {str_or_none}))
error = traverse_obj(initial_state, (('error', 'listError'), all, lambda _, v: v['code'], any))
if error and error['code'] != 200:
error_code = error.get('trueCode')
if error_code == -400 and list_id == 'watchlater':
self.raise_login_required('You need to login to access your watchlater playlist')
elif error_code == -403:
self.raise_login_required('This is a private playlist. You need to login as its owner')
elif error_code == 11010:
raise ExtractorError('Playlist is no longer available', expected=True)
raise ExtractorError(f'Could not access playlist: {error_code} {error_message}')
raise ExtractorError(f'Could not access playlist: {error_code} {error.get("message")}')
query = {
'ps': 20,

View File

@@ -53,7 +53,7 @@ class BlueskyIE(InfoExtractor):
'channel_id': 'did:plc:z72i7hdynmk6r22z27h6tvur',
'channel_url': 'https://bsky.app/profile/did:plc:z72i7hdynmk6r22z27h6tvur',
'thumbnail': r're:https://video.bsky.app/watch/.*\.jpg$',
'title': 'Bluesky now has video! Update your app to versi...',
'title': 'Bluesky now has video! Update your app to version 1.91 or refresh on ...',
'alt_title': 'Bluesky video feature announcement',
'description': r're:(?s)Bluesky now has video! .{239}',
'upload_date': '20240911',
@@ -172,7 +172,7 @@ class BlueskyIE(InfoExtractor):
'channel_id': 'did:plc:z72i7hdynmk6r22z27h6tvur',
'channel_url': 'https://bsky.app/profile/did:plc:z72i7hdynmk6r22z27h6tvur',
'thumbnail': r're:https://video.bsky.app/watch/.*\.jpg$',
'title': 'Bluesky now has video! Update your app to versi...',
'title': 'Bluesky now has video! Update your app to version 1.91 or refresh on ...',
'alt_title': 'Bluesky video feature announcement',
'description': r're:(?s)Bluesky now has video! .{239}',
'upload_date': '20240911',
@@ -191,7 +191,7 @@ class BlueskyIE(InfoExtractor):
'info_dict': {
'id': '3l7rdfxhyds2f',
'ext': 'mp4',
'uploader': 'cinnamon',
'uploader': 'cinnamon 🐇 🏳️‍⚧️',
'uploader_id': 'cinny.bun.how',
'uploader_url': 'https://bsky.app/profile/cinny.bun.how',
'channel_id': 'did:plc:7x6rtuenkuvxq3zsvffp2ide',
@@ -255,7 +255,7 @@ class BlueskyIE(InfoExtractor):
'info_dict': {
'id': '3l77u64l7le2e',
'ext': 'mp4',
'title': 'hearing people on twitter say that bluesky isn\'...',
'title': "hearing people on twitter say that bluesky isn't funny yet so post t...",
'like_count': int,
'uploader_id': 'thafnine.net',
'uploader_url': 'https://bsky.app/profile/thafnine.net',
@@ -387,7 +387,7 @@ class BlueskyIE(InfoExtractor):
'age_limit': (
'labels', ..., 'val', {lambda x: 18 if x in ('sexual', 'porn', 'graphic-media') else None}, any),
'description': (*record_path, 'text', {str}, filter),
'title': (*record_path, 'text', {lambda x: x.replace('\n', ' ')}, {truncate_string(left=50)}),
'title': (*record_path, 'text', {lambda x: x.replace('\n', ' ')}, {truncate_string(left=72)}),
}),
})
return entries

View File

@@ -24,7 +24,7 @@ class BokeCCBaseIE(InfoExtractor):
class BokeCCIE(BokeCCBaseIE):
_IE_DESC = 'CC视频'
IE_DESC = 'CC视频'
_VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
_TESTS = [{

View File

@@ -0,0 +1,84 @@
import json
import time
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
jwt_decode_hs256,
parse_iso8601,
url_or_none,
variadic,
)
from ..utils.traversal import traverse_obj
class CanalsurmasIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?canalsurmas\.es/videos/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.canalsurmas.es/videos/44006-el-gran-queo-1-lora-del-rio-sevilla-20072014',
'md5': '861f86fdc1221175e15523047d0087ef',
'info_dict': {
'id': '44006',
'ext': 'mp4',
'title': 'Lora del Río (Sevilla)',
'description': 'md5:3d9ee40a9b1b26ed8259e6b71ed27b8b',
'thumbnail': 'https://cdn2.rtva.interactvty.com/content_cards/00f3e8f67b0a4f3b90a4a14618a48b0d.jpg',
'timestamp': 1648123182,
'upload_date': '20220324',
},
}]
_API_BASE = 'https://api-rtva.interactvty.com'
_access_token = None
@staticmethod
def _is_jwt_expired(token):
return jwt_decode_hs256(token)['exp'] - time.time() < 300
def _call_api(self, endpoint, video_id, fields=None):
if not self._access_token or self._is_jwt_expired(self._access_token):
self._access_token = self._download_json(
f'{self._API_BASE}/jwt/token/', None,
'Downloading access token', 'Failed to download access token',
headers={'Content-Type': 'application/json'},
data=json.dumps({
'username': 'canalsur_demo',
'password': 'dsUBXUcI',
}).encode())['access']
return self._download_json(
f'{self._API_BASE}/api/2.0/contents/{endpoint}/{video_id}/', video_id,
f'Downloading {endpoint} API JSON', f'Failed to download {endpoint} API JSON',
headers={'Authorization': f'jwtok {self._access_token}'},
query={'optional_fields': ','.join(variadic(fields))} if fields else None)
def _real_extract(self, url):
video_id = self._match_id(url)
video_info = self._call_api('content', video_id, fields=[
'description', 'image', 'duration', 'created_at', 'tags',
])
stream_info = self._call_api('content_resources', video_id, 'media_url')
formats, subtitles = [], {}
for stream_url in traverse_obj(stream_info, ('results', ..., 'media_url', {url_or_none})):
if determine_ext(stream_url) == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
stream_url, video_id, m3u8_id='hls', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
formats.append({'url': stream_url})
return {
'id': video_id,
'formats': formats,
'subtitles': subtitles,
**traverse_obj(video_info, {
'title': ('name', {str.strip}),
'description': ('description', {str}),
'thumbnail': ('image', {url_or_none}),
'duration': ('duration', {float_or_none}),
'timestamp': ('created_at', {parse_iso8601}),
'tags': ('tags', ..., {str}),
}),
}

View File

@@ -121,10 +121,7 @@ class CDAIE(InfoExtractor):
}, **kwargs)
def _perform_login(self, username, password):
app_version = random.choice((
'1.2.88 build 15306',
'1.2.174 build 18469',
))
app_version = '1.2.255 build 21541'
android_version = random.randrange(8, 14)
phone_model = random.choice((
# x-kom.pl top selling Android smartphones, as of 2022-12-26
@@ -190,7 +187,7 @@ class CDAIE(InfoExtractor):
meta = self._download_json(
f'{self._BASE_API_URL}/video/{video_id}', video_id, headers=self._API_HEADERS)['video']
uploader = traverse_obj(meta, 'author', 'login')
uploader = traverse_obj(meta, ('author', 'login', {str}))
formats = [{
'url': quality['file'],

View File

@@ -21,7 +21,7 @@ class CHZZKLiveIE(InfoExtractor):
'channel': '진짜도현',
'channel_id': 'c68b8ef525fb3d2fa146344d84991753',
'channel_is_verified': False,
'thumbnail': r're:^https?://.*\.jpg$',
'thumbnail': r're:https?://.+/.+\.jpg',
'timestamp': 1705510344,
'upload_date': '20240117',
'live_status': 'is_live',
@@ -98,7 +98,7 @@ class CHZZKVideoIE(InfoExtractor):
'channel': '침착맨',
'channel_id': 'bb382c2c0cc9fa7c86ab3b037fb5799c',
'channel_is_verified': False,
'thumbnail': r're:^https?://.*\.jpg$',
'thumbnail': r're:https?://.+/.+\.jpg',
'duration': 15577,
'timestamp': 1702970505.417,
'upload_date': '20231219',
@@ -115,7 +115,7 @@ class CHZZKVideoIE(InfoExtractor):
'channel': '라디유radiyu',
'channel_id': '68f895c59a1043bc5019b5e08c83a5c5',
'channel_is_verified': False,
'thumbnail': r're:^https?://.*\.jpg$',
'thumbnail': r're:https?://.+/.+\.jpg',
'duration': 95,
'timestamp': 1703102631.722,
'upload_date': '20231220',
@@ -131,12 +131,30 @@ class CHZZKVideoIE(InfoExtractor):
'channel': '강지',
'channel_id': 'b5ed5db484d04faf4d150aedd362f34b',
'channel_is_verified': True,
'thumbnail': r're:^https?://.*\.jpg$',
'thumbnail': r're:https?://.+/.+\.jpg',
'duration': 4433,
'timestamp': 1703307460.214,
'upload_date': '20231223',
'view_count': int,
},
}, {
# video_status == 'NONE' but is downloadable
'url': 'https://chzzk.naver.com/video/6325166',
'info_dict': {
'id': '6325166',
'ext': 'mp4',
'title': '와이프 숙제빼주기',
'channel': '이 다',
'channel_id': '0076a519f147ee9fd0959bf02f9571ca',
'channel_is_verified': False,
'view_count': int,
'duration': 28167,
'thumbnail': r're:https?://.+/.+\.jpg',
'timestamp': 1742139216.86,
'upload_date': '20250316',
'live_status': 'was_live',
},
'params': {'skip_download': 'm3u8'},
}]
def _real_extract(self, url):
@@ -147,11 +165,7 @@ class CHZZKVideoIE(InfoExtractor):
live_status = 'was_live' if video_meta.get('liveOpenDate') else 'not_live'
video_status = video_meta.get('vodStatus')
if video_status == 'UPLOAD':
playback = self._parse_json(video_meta['liveRewindPlaybackJson'], video_id)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
playback['media'][0]['path'], video_id, 'mp4', m3u8_id='hls')
elif video_status == 'ABR_HLS':
if video_status == 'ABR_HLS':
formats, subtitles = self._extract_mpd_formats_and_subtitles(
f'https://apis.naver.com/neonplayer/vodplay/v1/playback/{video_meta["videoId"]}',
video_id, query={
@@ -161,10 +175,17 @@ class CHZZKVideoIE(InfoExtractor):
'cpl': 'en_US',
})
else:
self.raise_no_formats(
f'Unknown video status detected: "{video_status}"', expected=True, video_id=video_id)
formats, subtitles = [], {}
live_status = 'post_live' if live_status == 'was_live' else None
fatal = video_status == 'UPLOAD'
playback = self._parse_json(video_meta['liveRewindPlaybackJson'], video_id, fatal=fatal)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
traverse_obj(playback, ('media', 0, 'path')), video_id, 'mp4', m3u8_id='hls', fatal=fatal)
if formats and video_status != 'UPLOAD':
self.write_debug(f'Video found with status: "{video_status}"')
elif not formats:
self.raise_no_formats(
f'Unknown video status detected: "{video_status}"', expected=True, video_id=video_id)
formats, subtitles = [], {}
live_status = 'post_live' if live_status == 'was_live' else None
return {
'id': video_id,

View File

@@ -78,6 +78,7 @@ from ..utils import (
parse_iso8601,
parse_m3u8_attributes,
parse_resolution,
qualities,
sanitize_url,
smuggle_url,
str_or_none,
@@ -1569,6 +1570,8 @@ class InfoExtractor:
"""Yield all json ld objects in the html"""
if default is not NO_DEFAULT:
fatal = False
if not fatal and not isinstance(html, str):
return
for mobj in re.finditer(JSON_LD_RE, html):
json_ld_item = self._parse_json(
mobj.group('json_ld'), video_id, fatal=fatal,
@@ -2177,6 +2180,8 @@ class InfoExtractor:
media_url = media.get('URI')
if media_url:
manifest_url = format_url(media_url)
is_audio = media_type == 'AUDIO'
is_alternate = media.get('DEFAULT') == 'NO' or media.get('AUTOSELECT') == 'NO'
formats.extend({
'format_id': join_nonempty(m3u8_id, group_id, name, idx),
'format_note': name,
@@ -2189,7 +2194,11 @@ class InfoExtractor:
'preference': preference,
'quality': quality,
'has_drm': has_drm,
'vcodec': 'none' if media_type == 'AUDIO' else None,
'vcodec': 'none' if is_audio else None,
# Alternate audio formats (e.g. audio description) should be deprioritized
'source_preference': -2 if is_audio and is_alternate else None,
# Save this to assign source_preference based on associated video stream
'_audio_group_id': group_id if is_audio and not is_alternate else None,
} for idx in _extract_m3u8_playlist_indices(manifest_url))
def build_stream_name():
@@ -2284,6 +2293,8 @@ class InfoExtractor:
# ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
# Save this to determine quality of audio formats that only have a GROUP-ID
f['_audio_group_id'] = audio_group_id
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
@@ -2306,6 +2317,28 @@ class InfoExtractor:
formats.append(http_f)
last_stream_inf = {}
# Some audio-only formats only have a GROUP-ID without any other quality/bitrate/codec info
# Each audio GROUP-ID corresponds with one or more video formats' AUDIO attribute
# For sorting purposes, set source_preference based on the quality of the video formats they are grouped with
# See https://github.com/yt-dlp/yt-dlp/issues/11178
audio_groups_by_quality = orderedSet(f['_audio_group_id'] for f in sorted(
traverse_obj(formats, lambda _, v: v.get('vcodec') != 'none' and v['_audio_group_id']),
key=lambda x: (x.get('tbr') or 0, x.get('width') or 0)))
audio_quality_map = {
audio_groups_by_quality[0]: 'low',
audio_groups_by_quality[-1]: 'high',
} if len(audio_groups_by_quality) > 1 else None
audio_preference = qualities(audio_groups_by_quality)
for fmt in formats:
audio_group_id = fmt.pop('_audio_group_id', None)
if not audio_quality_map or not audio_group_id or fmt.get('vcodec') != 'none':
continue
# Use source_preference since quality and preference are set by params
fmt['source_preference'] = audio_preference(audio_group_id)
fmt['format_note'] = join_nonempty(
fmt.get('format_note'), audio_quality_map.get(audio_group_id), delim=', ')
return formats, subtitles
def _extract_m3u8_vod_duration(
@@ -2935,8 +2968,7 @@ class InfoExtractor:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(
float_or_none(period_duration, segment_duration, default=0)))
representation_ms_info['total_number'] = math.ceil(float_or_none(period_duration, segment_duration, default=0))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,

View File

@@ -5,7 +5,9 @@ from ..utils import (
int_or_none,
try_get,
unified_strdate,
url_or_none,
)
from ..utils.traversal import traverse_obj
class CrowdBunkerIE(InfoExtractor):
@@ -44,16 +46,15 @@ class CrowdBunkerIE(InfoExtractor):
'url': sub_url,
})
mpd_url = try_get(video_json, lambda x: x['dashManifest']['url'])
if mpd_url:
fmts, subs = self._extract_mpd_formats_and_subtitles(mpd_url, video_id)
if mpd_url := traverse_obj(video_json, ('dashManifest', 'url', {url_or_none})):
fmts, subs = self._extract_mpd_formats_and_subtitles(mpd_url, video_id, mpd_id='dash', fatal=False)
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
m3u8_url = try_get(video_json, lambda x: x['hlsManifest']['url'])
if m3u8_url:
fmts, subs = self._extract_m3u8_formats_and_subtitles(mpd_url, video_id)
self._merge_subtitles(subs, target=subtitles)
if m3u8_url := traverse_obj(video_json, ('hlsManifest', 'url', {url_or_none})):
fmts, subs = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, m3u8_id='hls', fatal=False)
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
self._merge_subtitles(subs, target=subtitles)
thumbnails = [{
'url': image['url'],

View File

@@ -1,142 +0,0 @@
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
orderedSet,
)
class DeezerBaseInfoExtractor(InfoExtractor):
def get_data(self, url):
if not self.get_param('test'):
self.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
mobj = self._match_valid_url(url)
data_id = mobj.group('id')
webpage = self._download_webpage(url, data_id)
geoblocking_msg = self._html_search_regex(
r'<p class="soon-txt">(.*?)</p>', webpage, 'geoblocking message',
default=None)
if geoblocking_msg is not None:
raise ExtractorError(
f'Deezer said: {geoblocking_msg}', expected=True)
data_json = self._search_regex(
(r'__DZR_APP_STATE__\s*=\s*({.+?})\s*</script>',
r'naboo\.display\(\'[^\']+\',\s*(.*?)\);\n'),
webpage, 'data JSON')
data = json.loads(data_json)
return data_id, webpage, data
class DeezerPlaylistIE(DeezerBaseInfoExtractor):
_VALID_URL = r'https?://(?:www\.)?deezer\.com/(../)?playlist/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.deezer.com/playlist/176747451',
'info_dict': {
'id': '176747451',
'title': 'Best!',
'uploader': 'anonymous',
'thumbnail': r're:^https?://(e-)?cdns-images\.dzcdn\.net/images/cover/.*\.jpg$',
},
'playlist_count': 29,
}
def _real_extract(self, url):
playlist_id, webpage, data = self.get_data(url)
playlist_title = data.get('DATA', {}).get('TITLE')
playlist_uploader = data.get('DATA', {}).get('PARENT_USERNAME')
playlist_thumbnail = self._search_regex(
r'<img id="naboo_playlist_image".*?src="([^"]+)"', webpage,
'playlist thumbnail')
entries = []
for s in data.get('SONGS', {}).get('data'):
formats = [{
'format_id': 'preview',
'url': s.get('MEDIA', [{}])[0].get('HREF'),
'preference': -100, # Only the first 30 seconds
'ext': 'mp3',
}]
artists = ', '.join(
orderedSet(a.get('ART_NAME') for a in s.get('ARTISTS')))
entries.append({
'id': s.get('SNG_ID'),
'duration': int_or_none(s.get('DURATION')),
'title': '{} - {}'.format(artists, s.get('SNG_TITLE')),
'uploader': s.get('ART_NAME'),
'uploader_id': s.get('ART_ID'),
'age_limit': 16 if s.get('EXPLICIT_LYRICS') == '1' else 0,
'formats': formats,
})
return {
'_type': 'playlist',
'id': playlist_id,
'title': playlist_title,
'uploader': playlist_uploader,
'thumbnail': playlist_thumbnail,
'entries': entries,
}
class DeezerAlbumIE(DeezerBaseInfoExtractor):
_VALID_URL = r'https?://(?:www\.)?deezer\.com/(../)?album/(?P<id>[0-9]+)'
_TEST = {
'url': 'https://www.deezer.com/fr/album/67505622',
'info_dict': {
'id': '67505622',
'title': 'Last Week',
'uploader': 'Home Brew',
'thumbnail': r're:^https?://(e-)?cdns-images\.dzcdn\.net/images/cover/.*\.jpg$',
},
'playlist_count': 7,
}
def _real_extract(self, url):
album_id, webpage, data = self.get_data(url)
album_title = data.get('DATA', {}).get('ALB_TITLE')
album_uploader = data.get('DATA', {}).get('ART_NAME')
album_thumbnail = self._search_regex(
r'<img id="naboo_album_image".*?src="([^"]+)"', webpage,
'album thumbnail')
entries = []
for s in data.get('SONGS', {}).get('data'):
formats = [{
'format_id': 'preview',
'url': s.get('MEDIA', [{}])[0].get('HREF'),
'preference': -100, # Only the first 30 seconds
'ext': 'mp3',
}]
artists = ', '.join(
orderedSet(a.get('ART_NAME') for a in s.get('ARTISTS')))
entries.append({
'id': s.get('SNG_ID'),
'duration': int_or_none(s.get('DURATION')),
'title': '{} - {}'.format(artists, s.get('SNG_TITLE')),
'uploader': s.get('ART_NAME'),
'uploader_id': s.get('ART_ID'),
'age_limit': 16 if s.get('EXPLICIT_LYRICS') == '1' else 0,
'formats': formats,
'track': s.get('SNG_TITLE'),
'track_number': int_or_none(s.get('TRACK_NUMBER')),
'track_id': s.get('SNG_ID'),
'artist': album_uploader,
'album': album_title,
'album_artist': album_uploader,
})
return {
'_type': 'playlist',
'id': album_id,
'title': album_title,
'uploader': album_uploader,
'thumbnail': album_thumbnail,
'entries': entries,
}

View File

@@ -0,0 +1,87 @@
import urllib.parse
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
float_or_none,
url_or_none,
)
from ..utils.traversal import traverse_obj
class FrancaisFacileIE(InfoExtractor):
_VALID_URL = r'https?://francaisfacile\.rfi\.fr/[a-z]{2}/(?:actualit%C3%A9|podcasts/[^/#?]+)/(?P<id>[^/#?]+)'
_TESTS = [{
'url': 'https://francaisfacile.rfi.fr/fr/actualit%C3%A9/20250305-r%C3%A9concilier-les-jeunes-avec-la-lecture-gr%C3%A2ce-aux-r%C3%A9seaux-sociaux',
'md5': '4f33674cb205744345cc835991100afa',
'info_dict': {
'id': 'WBMZ58952-FLE-FR-20250305',
'display_id': '20250305-réconcilier-les-jeunes-avec-la-lecture-grâce-aux-réseaux-sociaux',
'title': 'Réconcilier les jeunes avec la lecture grâce aux réseaux sociaux',
'url': 'https://aod-fle.akamaized.net/fle/sounds/fr/2025/03/05/6b6af52a-f9ba-11ef-a1f8-005056a97652.mp3',
'ext': 'mp3',
'description': 'md5:b903c63d8585bd59e8cc4d5f80c4272d',
'duration': 103.15,
'timestamp': 1741177984,
'upload_date': '20250305',
},
}, {
'url': 'https://francaisfacile.rfi.fr/fr/actualit%C3%A9/20250307-argentine-le-sac-d-un-alpiniste-retrouv%C3%A9-40-ans-apr%C3%A8s-sa-mort',
'md5': 'b8c3a63652d4ae8e8092dda5700c1cd9',
'info_dict': {
'id': 'WBMZ59102-FLE-FR-20250307',
'display_id': '20250307-argentine-le-sac-d-un-alpiniste-retrouvé-40-ans-après-sa-mort',
'title': 'Argentine: le sac d\'un alpiniste retrouvé 40 ans après sa mort',
'url': 'https://aod-fle.akamaized.net/fle/sounds/fr/2025/03/07/8edf4082-fb46-11ef-8a37-005056bf762b.mp3',
'ext': 'mp3',
'description': 'md5:7fd088fbdf4a943bb68cf82462160dca',
'duration': 117.74,
'timestamp': 1741352789,
'upload_date': '20250307',
},
}, {
'url': 'https://francaisfacile.rfi.fr/fr/podcasts/un-mot-une-histoire/20250317-le-mot-de-david-foenkinos-peut-%C3%AAtre',
'md5': 'db83c2cc2589b4c24571c6b6cf14f5f1',
'info_dict': {
'id': 'WBMZ59441-FLE-FR-20250317',
'display_id': '20250317-le-mot-de-david-foenkinos-peut-être',
'title': 'Le mot de David Foenkinos: «peut-être» - Un mot, une histoire',
'url': 'https://aod-fle.akamaized.net/fle/sounds/fr/2025/03/17/4ca6cbbe-0315-11f0-a85b-005056a97652.mp3',
'ext': 'mp3',
'description': 'md5:3fe35fae035803df696bfa7af2496e49',
'duration': 198.96,
'timestamp': 1742210897,
'upload_date': '20250317',
},
}]
def _real_extract(self, url):
display_id = urllib.parse.unquote(self._match_id(url))
try: # yt-dlp's default user-agents are too old and blocked by the site
webpage = self._download_webpage(url, display_id, headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:136.0) Gecko/20100101 Firefox/136.0',
})
except ExtractorError as e:
if not isinstance(e.cause, HTTPError) or e.cause.status != 403:
raise
# Retry with impersonation if hardcoded UA is insufficient
webpage = self._download_webpage(url, display_id, impersonate=True)
data = self._search_json(
r'<script[^>]+\bdata-media-id=[^>]+\btype="application/json"[^>]*>',
webpage, 'audio data', display_id)
return {
'id': data['mediaId'],
'display_id': display_id,
'vcodec': 'none',
'title': self._html_extract_title(webpage),
**self._search_json_ld(webpage, display_id, fatal=False),
**traverse_obj(data, {
'title': ('title', {str}),
'url': ('sources', ..., 'url', {url_or_none}, any),
'duration': ('sources', ..., 'duration', {float_or_none}, any),
}),
}

View File

@@ -16,6 +16,7 @@ from ..utils import (
MEDIA_EXTENSIONS,
ExtractorError,
UnsupportedError,
base_url,
determine_ext,
determine_protocol,
dict_get,
@@ -2213,10 +2214,21 @@ class GenericIE(InfoExtractor):
if is_live is not None:
info['live_status'] = 'not_live' if is_live == 'false' else 'is_live'
return
headers = m3u8_format.get('http_headers') or info.get('http_headers')
duration = self._extract_m3u8_vod_duration(
m3u8_format['url'], info.get('id'), note='Checking m3u8 live status',
errnote='Failed to download m3u8 media playlist', headers=headers)
headers = m3u8_format.get('http_headers') or info.get('http_headers') or {}
display_id = info.get('id')
urlh = self._request_webpage(
m3u8_format['url'], display_id, 'Checking m3u8 live status', errnote=False,
headers={**headers, 'Accept-Encoding': 'identity'}, fatal=False)
if urlh is False:
return
first_bytes = urlh.read(512)
if not first_bytes.startswith(b'#EXTM3U'):
return
m3u8_doc = self._webpage_read_content(
urlh, urlh.url, display_id, prefix=first_bytes, fatal=False, errnote=False)
if not m3u8_doc:
return
duration = self._parse_m3u8_vod_duration(m3u8_doc, display_id)
if not duration:
info['live_status'] = 'is_live'
info['duration'] = info.get('duration') or duration
@@ -2531,7 +2543,7 @@ class GenericIE(InfoExtractor):
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
info_dict['formats'], info_dict['subtitles'] = self._parse_mpd_formats_and_subtitles(
doc,
mpd_base_url=full_response.url.rpartition('/')[0],
mpd_base_url=base_url(full_response.url),
mpd_url=url)
info_dict['live_status'] = 'is_live' if doc.get('type') == 'dynamic' else None
self._extra_manifest_info(info_dict, url)

View File

@@ -1,19 +0,0 @@
from .common import InfoExtractor
from ..utils import (
ExtractorError,
urlencode_postdata,
)
class GigyaBaseIE(InfoExtractor):
def _gigya_login(self, auth_data):
auth_info = self._download_json(
'https://accounts.eu1.gigya.com/accounts.login', None,
note='Logging in', errnote='Unable to log in',
data=urlencode_postdata(auth_data))
error_message = auth_info.get('errorDetails') or auth_info.get('errorMessage')
if error_message:
raise ExtractorError(
f'Unable to login: {error_message}', expected=True)
return auth_info

View File

@@ -6,7 +6,7 @@ from ..utils import (
)
class HSEShowBaseInfoExtractor(InfoExtractor):
class HSEShowBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['DE']
def _extract_redux_data(self, url, video_id):
@@ -28,7 +28,7 @@ class HSEShowBaseInfoExtractor(InfoExtractor):
return formats, subtitles
class HSEShowIE(HSEShowBaseInfoExtractor):
class HSEShowIE(HSEShowBaseIE):
_VALID_URL = r'https?://(?:www\.)?hse\.de/dpl/c/tv-shows/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.hse.de/dpl/c/tv-shows/505350',
@@ -64,7 +64,7 @@ class HSEShowIE(HSEShowBaseInfoExtractor):
}
class HSEProductIE(HSEShowBaseInfoExtractor):
class HSEProductIE(HSEShowBaseIE):
_VALID_URL = r'https?://(?:www\.)?hse\.de/dpl/p/product/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.hse.de/dpl/p/product/408630',

View File

@@ -1,5 +1,13 @@
from .common import InfoExtractor
from ..utils import ExtractorError, str_or_none, traverse_obj, unified_strdate
from ..utils import (
ExtractorError,
int_or_none,
str_or_none,
traverse_obj,
unified_strdate,
url_or_none,
)
class IchinanaLiveIE(InfoExtractor):
@@ -157,3 +165,51 @@ class IchinanaLiveClipIE(InfoExtractor):
'description': view_data.get('caption'),
'upload_date': unified_strdate(str_or_none(view_data.get('createdAt'))),
}
class IchinanaLiveVODIE(InfoExtractor):
IE_NAME = '17live:vod'
_VALID_URL = r'https?://(?:www\.)?17\.live/ja/vod/[^/?#]+/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://17.live/ja/vod/27323042/2cf84520-e65e-4b22-891e-1d3a00b0f068',
'md5': '3299b930d7457b069639486998a89580',
'info_dict': {
'id': '2cf84520-e65e-4b22-891e-1d3a00b0f068',
'ext': 'mp4',
'title': 'md5:b5f8cbf497d54cc6a60eb3b480182f01',
'uploader': 'md5:29fb12122ab94b5a8495586e7c3085a5',
'uploader_id': '27323042',
'channel': '🌟オールナイトニッポン アーカイブ🌟',
'channel_id': '2b4f85f1-d61e-429d-a901-68d32bdd8645',
'like_count': int,
'view_count': int,
'thumbnail': r're:https?://.+/.+\.(?:jpe?g|png)',
'duration': 549,
'description': 'md5:116f326579700f00eaaf5581aae1192e',
'timestamp': 1741058645,
'upload_date': '20250304',
},
}, {
'url': 'https://17.live/ja/vod/27323042/0de11bac-9bea-40b8-9eab-0239a7d88079',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
json_data = self._download_json(f'https://wap-api.17app.co/api/v1/vods/{video_id}', video_id)
return traverse_obj(json_data, {
'id': ('vodID', {str}),
'title': ('title', {str}),
'formats': ('vodURL', {lambda x: self._extract_m3u8_formats(x, video_id)}),
'uploader': ('userInfo', 'displayName', {str}),
'uploader_id': ('userInfo', 'roomID', {int}, {str_or_none}),
'channel': ('userInfo', 'name', {str}),
'channel_id': ('userInfo', 'userID', {str}),
'like_count': ('likeCount', {int_or_none}),
'view_count': ('viewCount', {int_or_none}),
'thumbnail': ('imageURL', {url_or_none}),
'duration': ('duration', {int_or_none}),
'description': ('description', {str}),
'timestamp': ('createdAt', {int_or_none}),
})

78
yt_dlp/extractor/ivoox.py Normal file
View File

@@ -0,0 +1,78 @@
from .common import InfoExtractor
from ..utils import int_or_none, parse_iso8601, url_or_none, urljoin
from ..utils.traversal import traverse_obj
class IvooxIE(InfoExtractor):
_VALID_URL = (
r'https?://(?:www\.)?ivoox\.com/(?:\w{2}/)?[^/?#]+_rf_(?P<id>[0-9]+)_1\.html',
r'https?://go\.ivoox\.com/rf/(?P<id>[0-9]+)',
)
_TESTS = [{
'url': 'https://www.ivoox.com/dex-08x30-rostros-del-mal-los-asesinos-en-audios-mp3_rf_143594959_1.html',
'md5': '993f712de5b7d552459fc66aa3726885',
'info_dict': {
'id': '143594959',
'ext': 'mp3',
'timestamp': 1742731200,
'channel': 'DIAS EXTRAÑOS con Santiago Camacho',
'title': 'DEx 08x30 Rostros del mal: Los asesinos en serie que aterrorizaron España',
'description': 'md5:eae8b4b9740d0216d3871390b056bb08',
'uploader': 'Santiago Camacho',
'thumbnail': 'https://static-1.ivoox.com/audios/c/d/5/2/cd52f46783fe735000c33a803dce2554_XXL.jpg',
'upload_date': '20250323',
'episode': 'DEx 08x30 Rostros del mal: Los asesinos en serie que aterrorizaron España',
'duration': 11837,
'tags': ['españa', 'asesinos en serie', 'arropiero', 'historia criminal', 'mataviejas'],
},
}, {
'url': 'https://go.ivoox.com/rf/143594959',
'only_matching': True,
}, {
'url': 'https://www.ivoox.com/en/campodelgas-28-03-2025-audios-mp3_rf_144036942_1.html',
'only_matching': True,
}]
def _real_extract(self, url):
media_id = self._match_id(url)
webpage = self._download_webpage(url, media_id, fatal=False)
data = self._search_nuxt_data(
webpage, media_id, fatal=False, traverse=('data', 0, 'data', 'audio'))
direct_download = self._download_json(
f'https://vcore-web.ivoox.com/v1/public/audios/{media_id}/download-url', media_id, fatal=False,
note='Fetching direct download link', headers={'Referer': url})
download_paths = {
*traverse_obj(direct_download, ('data', 'downloadUrl', {str}, filter, all)),
*traverse_obj(data, (('downloadUrl', 'mediaUrl'), {str}, filter)),
}
formats = []
for path in download_paths:
formats.append({
'url': urljoin('https://ivoox.com', path),
'http_headers': {'Referer': url},
})
return {
'id': media_id,
'formats': formats,
'uploader': self._html_search_regex(r'data-prm-author="([^"]+)"', webpage, 'author', default=None),
'timestamp': parse_iso8601(
self._html_search_regex(r'data-prm-pubdate="([^"]+)"', webpage, 'timestamp', default=None)),
'channel': self._html_search_regex(r'data-prm-podname="([^"]+)"', webpage, 'channel', default=None),
'title': self._html_search_regex(r'data-prm-title="([^"]+)"', webpage, 'title', default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'description': self._og_search_description(webpage, default=None),
**self._search_json_ld(webpage, media_id, default={}),
**traverse_obj(data, {
'title': ('title', {str}),
'description': ('description', {str}),
'thumbnail': ('image', {url_or_none}),
'timestamp': ('uploadDate', {parse_iso8601(delimiter=' ')}),
'duration': ('duration', {int_or_none}),
'tags': ('tags', ..., 'name', {str}),
}),
}

View File

@@ -2,10 +2,12 @@ import hashlib
import random
from .common import InfoExtractor
from ..networking import HEADRequest
from ..utils import (
clean_html,
int_or_none,
try_get,
urlhandle_detect_ext,
)
@@ -27,7 +29,7 @@ class JamendoIE(InfoExtractor):
'ext': 'flac',
# 'title': 'Maya Filipič - Stories from Emona I',
'title': 'Stories from Emona I',
'artist': 'Maya Filipič',
'artists': ['Maya Filipič'],
'album': 'Between two worlds',
'track': 'Stories from Emona I',
'duration': 210,
@@ -93,9 +95,15 @@ class JamendoIE(InfoExtractor):
if not cover_url or cover_url in urls:
continue
urls.append(cover_url)
urlh = self._request_webpage(
HEADRequest(cover_url), track_id, 'Checking thumbnail extension',
errnote=False, fatal=False)
if not urlh:
continue
size = int_or_none(cover_id.lstrip('size'))
thumbnails.append({
'id': cover_id,
'ext': urlhandle_detect_ext(urlh, default='jpg'),
'url': cover_url,
'width': size,
'height': size,

View File

@@ -1,3 +1,5 @@
import itertools
from .common import InfoExtractor
from ..utils import (
determine_ext,
@@ -124,3 +126,43 @@ class KikaIE(InfoExtractor):
'vbr': ('bitrateVideo', {int_or_none}, {lambda x: None if x == -1 else x}),
}),
}
class KikaPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?kika\.de/[\w-]+/(?P<id>[a-z-]+\d+)'
_TESTS = [{
'url': 'https://www.kika.de/logo/logo-die-welt-und-ich-562',
'info_dict': {
'id': 'logo-die-welt-und-ich-562',
'title': 'logo!',
'description': 'md5:7b9d7f65561b82fa512f2cfb553c397d',
},
'playlist_count': 100,
}]
def _entries(self, playlist_url, playlist_id):
for page in itertools.count(1):
data = self._download_json(playlist_url, playlist_id, note=f'Downloading page {page}')
for item in traverse_obj(data, ('content', lambda _, v: url_or_none(v['api']['url']))):
yield self.url_result(
item['api']['url'], ie=KikaIE,
**traverse_obj(item, {
'id': ('id', {str}),
'title': ('title', {str}),
'duration': ('duration', {int_or_none}),
'timestamp': ('date', {parse_iso8601}),
}))
playlist_url = traverse_obj(data, ('links', 'next', {url_or_none}))
if not playlist_url:
break
def _real_extract(self, url):
playlist_id = self._match_id(url)
brand_data = self._download_json(
f'https://www.kika.de/_next-api/proxy/v1/brands/{playlist_id}', playlist_id)
return self.playlist_result(
self._entries(brand_data['videoSubchannel']['videosPageUrl'], playlist_id),
playlist_id, title=brand_data.get('title'), description=brand_data.get('description'))

87
yt_dlp/extractor/loco.py Normal file
View File

@@ -0,0 +1,87 @@
from .common import InfoExtractor
from ..utils import int_or_none, url_or_none
from ..utils.traversal import require, traverse_obj
class LocoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?loco\.com/(?P<type>streamers|stream)/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://loco.com/streamers/teuzinfps',
'info_dict': {
'id': 'teuzinfps',
'ext': 'mp4',
'title': r're:MS BOLADAO, RESENHA & GAMEPLAY ALTO NIVEL',
'description': 'bom e novo',
'uploader_id': 'RLUVE3S9JU',
'channel': 'teuzinfps',
'channel_follower_count': int,
'comment_count': int,
'view_count': int,
'concurrent_view_count': int,
'like_count': int,
'thumbnail': 'https://static.ivory.getloconow.com/default_thumb/743701a9-98ca-41ae-9a8b-70bd5da070ad.jpg',
'tags': ['MMORPG', 'Gameplay'],
'series': 'Tibia',
'timestamp': int,
'modified_timestamp': int,
'live_status': 'is_live',
'upload_date': str,
'modified_date': str,
},
'params': {
'skip_download': 'Livestream',
},
}, {
'url': 'https://loco.com/stream/c64916eb-10fb-46a9-9a19-8c4b7ed064e7',
'md5': '45ebc8a47ee1c2240178757caf8881b5',
'info_dict': {
'id': 'c64916eb-10fb-46a9-9a19-8c4b7ed064e7',
'ext': 'mp4',
'title': 'PAULINHO LOKO NA LOCO!',
'description': 'live on na loco',
'uploader_id': '2MDO7Z1DPM',
'channel': 'paulinholokobr',
'channel_follower_count': int,
'comment_count': int,
'view_count': int,
'concurrent_view_count': int,
'like_count': int,
'duration': 14491,
'thumbnail': 'https://static.ivory.getloconow.com/default_thumb/59b5970b-23c1-4518-9e96-17ce341299fe.jpg',
'tags': ['Gameplay'],
'series': 'GTA 5',
'timestamp': 1740612872,
'modified_timestamp': 1740613037,
'upload_date': '20250226',
'modified_date': '20250226',
},
}]
def _real_extract(self, url):
video_type, video_id = self._match_valid_url(url).group('type', 'id')
webpage = self._download_webpage(url, video_id)
stream = traverse_obj(self._search_nextjs_data(webpage, video_id), (
'props', 'pageProps', ('liveStreamData', 'stream'), {dict}, any, {require('stream info')}))
return {
'formats': self._extract_m3u8_formats(stream['conf']['hls'], video_id),
'id': video_id,
'is_live': video_type == 'streamers',
**traverse_obj(stream, {
'title': ('title', {str}),
'series': ('game_name', {str}),
'uploader_id': ('user_uid', {str}),
'channel': ('alias', {str}),
'description': ('description', {str}),
'concurrent_view_count': ('viewersCurrent', {int_or_none}),
'view_count': ('total_views', {int_or_none}),
'thumbnail': ('thumbnail_url_small', {url_or_none}),
'like_count': ('likes', {int_or_none}),
'tags': ('tags', ..., {str}),
'timestamp': ('started_at', {int_or_none(scale=1000)}),
'modified_timestamp': ('updated_at', {int_or_none(scale=1000)}),
'comment_count': ('comments_count', {int_or_none}),
'channel_follower_count': ('followers_count', {int_or_none}),
'duration': ('duration', {int_or_none}),
}),
}

View File

@@ -2,8 +2,11 @@ from .common import InfoExtractor
from ..utils import (
clean_html,
merge_dicts,
str_or_none,
traverse_obj,
unified_timestamp,
url_or_none,
urljoin,
)
@@ -80,7 +83,7 @@ class LRTVODIE(LRTBaseIE):
}]
def _real_extract(self, url):
path, video_id = self._match_valid_url(url).groups()
path, video_id = self._match_valid_url(url).group('path', 'id')
webpage = self._download_webpage(url, video_id)
media_url = self._extract_js_var(webpage, 'main_url', path)
@@ -106,3 +109,42 @@ class LRTVODIE(LRTBaseIE):
}
return merge_dicts(clean_info, jw_data, json_ld_data)
class LRTRadioIE(LRTBaseIE):
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/radioteka/irasas/(?P<id>\d+)/(?P<path>[^?#/]+)'
_TESTS = [{
# m3u8 download
'url': 'https://www.lrt.lt/radioteka/irasas/2000359728/nemarios-eiles-apie-pragarus-ir-skaistyklas-su-aiste-kiltinaviciute',
'info_dict': {
'id': '2000359728',
'ext': 'm4a',
'title': 'Nemarios eilės: apie pragarus ir skaistyklas su Aiste Kiltinavičiūte',
'description': 'md5:5eee9a0e86a55bf547bd67596204625d',
'timestamp': 1726143120,
'upload_date': '20240912',
'tags': 'count:5',
'thumbnail': r're:https?://.+/.+\.jpe?g',
'categories': ['Daiktiniai įrodymai'],
},
}, {
'url': 'https://www.lrt.lt/radioteka/irasas/2000304654/vakaras-su-knyga-svetlana-aleksijevic-cernobylio-malda-v-dalis?season=%2Fmediateka%2Faudio%2Fvakaras-su-knyga%2F2023',
'only_matching': True,
}]
def _real_extract(self, url):
video_id, path = self._match_valid_url(url).group('id', 'path')
media = self._download_json(
'https://www.lrt.lt/radioteka/api/media', video_id,
query={'url': f'/mediateka/irasas/{video_id}/{path}'})
return traverse_obj(media, {
'id': ('id', {int}, {str_or_none}),
'title': ('title', {str}),
'tags': ('tags', ..., 'name', {str}),
'categories': ('playlist_item', 'category', {str}, filter, all, filter),
'description': ('content', {clean_html}, {str}),
'timestamp': ('date', {lambda x: x.replace('.', '/')}, {unified_timestamp}),
'thumbnail': ('playlist_item', 'image', {urljoin('https://www.lrt.lt')}),
'formats': ('playlist_item', 'file', {lambda x: self._extract_m3u8_formats(x, video_id)}),
})

View File

@@ -1,35 +1,36 @@
from .common import InfoExtractor
from ..utils import parse_age_limit, parse_duration, traverse_obj
from ..utils import parse_age_limit, parse_duration, url_or_none
from ..utils.traversal import traverse_obj
class MagellanTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?magellantv\.com/(?:watch|video)/(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://www.magellantv.com/watch/my-dads-on-death-row?type=v',
'url': 'https://www.magellantv.com/watch/incas-the-new-story?type=v',
'info_dict': {
'id': 'my-dads-on-death-row',
'id': 'incas-the-new-story',
'ext': 'mp4',
'title': 'My Dad\'s On Death Row',
'description': 'md5:33ba23b9f0651fc4537ed19b1d5b0d7a',
'duration': 3780.0,
'title': 'Incas: The New Story',
'description': 'md5:936c7f6d711c02dfb9db22a067b586fe',
'age_limit': 14,
'tags': ['Justice', 'Reality', 'United States', 'True Crime'],
'duration': 3060.0,
'tags': ['Ancient History', 'Archaeology', 'Anthropology'],
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://www.magellantv.com/video/james-bulger-the-new-revelations',
'url': 'https://www.magellantv.com/video/tortured-to-death-murdering-the-nanny',
'info_dict': {
'id': 'james-bulger-the-new-revelations',
'id': 'tortured-to-death-murdering-the-nanny',
'ext': 'mp4',
'title': 'James Bulger: The New Revelations',
'description': 'md5:7b97922038bad1d0fe8d0470d8a189f2',
'title': 'Tortured to Death: Murdering the Nanny',
'description': 'md5:d87033594fa218af2b1a8b49f52511e5',
'age_limit': 14,
'duration': 2640.0,
'age_limit': 0,
'tags': ['Investigation', 'True Crime', 'Justice', 'Europe'],
'tags': ['True Crime', 'Murder'],
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://www.magellantv.com/watch/celebration-nation',
'url': 'https://www.magellantv.com/watch/celebration-nation?type=s',
'info_dict': {
'id': 'celebration-nation',
'ext': 'mp4',
@@ -43,10 +44,19 @@ class MagellanTVIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data = traverse_obj(self._search_nextjs_data(webpage, video_id), (
'props', 'pageProps', 'reactContext',
(('video', 'detail'), ('series', 'currentEpisode')), {dict}), get_all=False)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(data['jwpVideoUrl'], video_id)
context = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['reactContext']
data = traverse_obj(context, ((('video', 'detail'), ('series', 'currentEpisode')), {dict}, any))
formats, subtitles = [], {}
for m3u8_url in set(traverse_obj(data, ((('manifests', ..., 'hls'), 'jwp_video_url'), {url_or_none}))):
fmts, subs = self._extract_m3u8_formats_and_subtitles(
m3u8_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
if not formats and (error := traverse_obj(context, ('errorDetailPage', 'errorMessage', {str}))):
if 'available in your country' in error:
self.raise_geo_restricted(msg=error)
self.raise_no_formats(f'{self.IE_NAME} said: {error}', expected=True)
return {
'id': video_id,

View File

@@ -102,11 +102,10 @@ class MedalTVIE(InfoExtractor):
item_id = item_id or '%dp' % height
if item_id not in item_url:
return
width = int(round(aspect_ratio * height))
container.append({
'url': item_url,
id_key: item_id,
'width': width,
'width': round(aspect_ratio * height),
'height': height,
})

View File

@@ -4,6 +4,7 @@ from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
parse_resolution,
traverse_obj,
unified_timestamp,
url_basename,
@@ -83,8 +84,8 @@ class MicrosoftMediusBaseIE(InfoExtractor):
subtitles.setdefault(sub.pop('tag', 'und'), []).append(sub)
return subtitles
def _extract_ism(self, ism_url, video_id):
formats = self._extract_ism_formats(ism_url, video_id)
def _extract_ism(self, ism_url, video_id, fatal=True):
formats = self._extract_ism_formats(ism_url, video_id, fatal=fatal)
for fmt in formats:
if fmt['language'] != 'eng' and 'English' not in fmt['format_id']:
fmt['language_preference'] = -10
@@ -218,9 +219,21 @@ class MicrosoftLearnEpisodeIE(MicrosoftMediusBaseIE):
'description': 'md5:7bbbfb593d21c2cf2babc3715ade6b88',
'timestamp': 1676339547,
'upload_date': '20230214',
'thumbnail': r're:https://learn\.microsoft\.com/video/media/.*\.png',
'thumbnail': r're:https://learn\.microsoft\.com/video/media/.+\.png',
'subtitles': 'count:14',
},
}, {
'url': 'https://learn.microsoft.com/en-gb/shows/on-demand-instructor-led-training-series/az-900-module-1',
'info_dict': {
'id': '4fe10f7c-d83c-463b-ac0e-c30a8195e01b',
'ext': 'mp4',
'title': 'AZ-900 Cloud fundamentals (1 of 6)',
'description': 'md5:3c2212ce865e9142f402c766441bd5c9',
'thumbnail': r're:https://.+/.+\.jpg',
'timestamp': 1706605184,
'upload_date': '20240130',
},
'params': {'format': 'bv[protocol=https]'},
}]
def _real_extract(self, url):
@@ -230,9 +243,32 @@ class MicrosoftLearnEpisodeIE(MicrosoftMediusBaseIE):
entry_id = self._html_search_meta('entryId', webpage, 'entryId', fatal=True)
video_info = self._download_json(
f'https://learn.microsoft.com/api/video/public/v1/entries/{entry_id}', video_id)
formats = []
if ism_url := traverse_obj(video_info, ('publicVideo', 'adaptiveVideoUrl', {url_or_none})):
formats.extend(self._extract_ism(ism_url, video_id, fatal=False))
if hls_url := traverse_obj(video_info, ('publicVideo', 'adaptiveVideoHLSUrl', {url_or_none})):
formats.extend(self._extract_m3u8_formats(hls_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
if mpd_url := traverse_obj(video_info, ('publicVideo', 'adaptiveVideoDashUrl', {url_or_none})):
formats.extend(self._extract_mpd_formats(mpd_url, video_id, mpd_id='dash', fatal=False))
for key in ('low', 'medium', 'high'):
if video_url := traverse_obj(video_info, ('publicVideo', f'{key}QualityVideoUrl', {url_or_none})):
formats.append({
'url': video_url,
'format_id': f'video-http-{key}',
'acodec': 'none',
**parse_resolution(video_url),
})
if audio_url := traverse_obj(video_info, ('publicVideo', 'audioUrl', {url_or_none})):
formats.append({
'url': audio_url,
'format_id': 'audio-http',
'vcodec': 'none',
})
return {
'id': entry_id,
'formats': self._extract_ism(video_info['publicVideo']['adaptiveVideoUrl'], video_id),
'formats': formats,
'subtitles': self._sub_to_dict(traverse_obj(video_info, (
'publicVideo', 'captions', lambda _, v: url_or_none(v['url']), {
'tag': ('language', {str}),

View File

@@ -1,5 +1,7 @@
from .telecinco import TelecincoBaseIE
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
int_or_none,
parse_iso8601,
)
@@ -79,7 +81,17 @@ class MiTeleIE(TelecincoBaseIE):
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
try: # yt-dlp's default user-agents are too old and blocked by akamai
webpage = self._download_webpage(url, display_id, headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:136.0) Gecko/20100101 Firefox/136.0',
})
except ExtractorError as e:
if not isinstance(e.cause, HTTPError) or e.cause.status != 403:
raise
# Retry with impersonation if hardcoded UA is insufficient to bypass akamai
webpage = self._download_webpage(url, display_id, impersonate=True)
pre_player = self._search_json(
r'window\.\$REACTBASE_STATE\.prePlayer_mtweb\s*=',
webpage, 'Pre Player', display_id)['prePlayer']

View File

@@ -10,7 +10,9 @@ from ..utils import (
parse_iso8601,
strip_or_none,
try_get,
url_or_none,
)
from ..utils.traversal import traverse_obj
class MixcloudBaseIE(InfoExtractor):
@@ -37,7 +39,7 @@ class MixcloudIE(MixcloudBaseIE):
'ext': 'm4a',
'title': 'Cryptkeeper',
'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
'uploader': 'Daniel Holbach',
'uploader': 'dholbach',
'uploader_id': 'dholbach',
'thumbnail': r're:https?://.*\.jpg',
'view_count': int,
@@ -46,10 +48,11 @@ class MixcloudIE(MixcloudBaseIE):
'uploader_url': 'https://www.mixcloud.com/dholbach/',
'artist': 'Submorphics & Chino , Telekinesis, Porter Robinson, Enei, Breakage ft Jess Mills',
'duration': 3723,
'tags': [],
'tags': ['liquid drum and bass', 'drum and bass'],
'comment_count': int,
'repost_count': int,
'like_count': int,
'artists': list,
},
'params': {'skip_download': 'm3u8'},
}, {
@@ -67,7 +70,7 @@ class MixcloudIE(MixcloudBaseIE):
'upload_date': '20150203',
'uploader_url': 'https://www.mixcloud.com/gillespeterson/',
'duration': 2992,
'tags': [],
'tags': ['jazz', 'soul', 'world music', 'funk'],
'comment_count': int,
'repost_count': int,
'like_count': int,
@@ -149,8 +152,6 @@ class MixcloudIE(MixcloudBaseIE):
elif reason:
raise ExtractorError('Track is restricted', expected=True)
title = cloudcast['name']
stream_info = cloudcast['streamInfo']
formats = []
@@ -182,47 +183,39 @@ class MixcloudIE(MixcloudBaseIE):
self.raise_login_required(metadata_available=True)
comments = []
for edge in (try_get(cloudcast, lambda x: x['comments']['edges']) or []):
node = edge.get('node') or {}
for node in traverse_obj(cloudcast, ('comments', 'edges', ..., 'node', {dict})):
text = strip_or_none(node.get('comment'))
if not text:
continue
user = node.get('user') or {}
comments.append({
'author': user.get('displayName'),
'author_id': user.get('username'),
'text': text,
'timestamp': parse_iso8601(node.get('created')),
**traverse_obj(node, {
'author': ('user', 'displayName', {str}),
'author_id': ('user', 'username', {str}),
'timestamp': ('created', {parse_iso8601}),
}),
})
tags = []
for t in cloudcast.get('tags'):
tag = try_get(t, lambda x: x['tag']['name'], str)
if not tag:
tags.append(tag)
get_count = lambda x: int_or_none(try_get(cloudcast, lambda y: y[x]['totalCount']))
owner = cloudcast.get('owner') or {}
return {
'id': track_id,
'title': title,
'formats': formats,
'description': cloudcast.get('description'),
'thumbnail': try_get(cloudcast, lambda x: x['picture']['url'], str),
'uploader': owner.get('displayName'),
'timestamp': parse_iso8601(cloudcast.get('publishDate')),
'uploader_id': owner.get('username'),
'uploader_url': owner.get('url'),
'duration': int_or_none(cloudcast.get('audioLength')),
'view_count': int_or_none(cloudcast.get('plays')),
'like_count': get_count('favorites'),
'repost_count': get_count('reposts'),
'comment_count': get_count('comments'),
'comments': comments,
'tags': tags,
'artist': ', '.join(cloudcast.get('featuringArtistList') or []) or None,
**traverse_obj(cloudcast, {
'title': ('name', {str}),
'description': ('description', {str}),
'thumbnail': ('picture', 'url', {url_or_none}),
'timestamp': ('publishDate', {parse_iso8601}),
'duration': ('audioLength', {int_or_none}),
'uploader': ('owner', 'displayName', {str}),
'uploader_id': ('owner', 'username', {str}),
'uploader_url': ('owner', 'url', {url_or_none}),
'view_count': ('plays', {int_or_none}),
'like_count': ('favorites', 'totalCount', {int_or_none}),
'repost_count': ('reposts', 'totalCount', {int_or_none}),
'comment_count': ('comments', 'totalCount', {int_or_none}),
'tags': ('tags', ..., 'tag', 'name', {str}, filter, all, filter),
'artists': ('featuringArtistList', ..., {str}, filter, all, filter),
}),
}
@@ -295,7 +288,7 @@ class MixcloudUserIE(MixcloudPlaylistBaseIE):
'url': 'http://www.mixcloud.com/dholbach/',
'info_dict': {
'id': 'dholbach_uploads',
'title': 'Daniel Holbach (uploads)',
'title': 'dholbach (uploads)',
'description': 'md5:a3f468a60ac8c3e1f8616380fc469b2b',
},
'playlist_mincount': 36,
@@ -303,7 +296,7 @@ class MixcloudUserIE(MixcloudPlaylistBaseIE):
'url': 'http://www.mixcloud.com/dholbach/uploads/',
'info_dict': {
'id': 'dholbach_uploads',
'title': 'Daniel Holbach (uploads)',
'title': 'dholbach (uploads)',
'description': 'md5:a3f468a60ac8c3e1f8616380fc469b2b',
},
'playlist_mincount': 36,
@@ -311,7 +304,7 @@ class MixcloudUserIE(MixcloudPlaylistBaseIE):
'url': 'http://www.mixcloud.com/dholbach/favorites/',
'info_dict': {
'id': 'dholbach_favorites',
'title': 'Daniel Holbach (favorites)',
'title': 'dholbach (favorites)',
'description': 'md5:a3f468a60ac8c3e1f8616380fc469b2b',
},
# 'params': {
@@ -337,7 +330,7 @@ class MixcloudUserIE(MixcloudPlaylistBaseIE):
'title': 'First Ear (stream)',
'description': 'we maraud for ears',
},
'playlist_mincount': 269,
'playlist_mincount': 267,
}]
_TITLE_KEY = 'displayName'
@@ -361,7 +354,7 @@ class MixcloudPlaylistIE(MixcloudPlaylistBaseIE):
'id': 'maxvibes_jazzcat-on-ness-radio',
'title': 'Ness Radio sessions',
},
'playlist_mincount': 59,
'playlist_mincount': 58,
}]
_TITLE_KEY = 'name'
_DESCRIPTION_KEY = 'description'

View File

@@ -449,9 +449,7 @@ mutation initPlaybackSession(
if not (m3u8_url and token):
errors = '; '.join(traverse_obj(response, ('errors', ..., 'message', {str})))
if 'not entitled' in errors:
raise ExtractorError(errors, expected=True)
elif errors: # Only warn when 'blacked out' since radio formats are available
if errors: # Only warn when 'blacked out' or 'not entitled'; radio formats may be available
self.report_warning(f'API returned errors for {format_id}: {errors}')
else:
self.report_warning(f'No formats available for {format_id} broadcast; skipping')

View File

@@ -3,8 +3,8 @@ from .dailymotion import DailymotionIE
class MoviepilotIE(InfoExtractor):
_IE_NAME = 'moviepilot'
_IE_DESC = 'Moviepilot trailer'
IE_NAME = 'moviepilot'
IE_DESC = 'Moviepilot trailer'
_VALID_URL = r'https?://(?:www\.)?moviepilot\.de/movies/(?P<id>[^/]+)'
_TESTS = [{

View File

@@ -1,167 +1,215 @@
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
clean_html,
determine_ext,
int_or_none,
unescapeHTML,
parse_iso8601,
url_or_none,
)
from ..utils.traversal import traverse_obj
class MSNIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:(?:www|preview)\.)?msn\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/[a-z]{2}-(?P<id>[\da-zA-Z]+)'
_VALID_URL = r'https?://(?:(?:www|preview)\.)?msn\.com/(?P<locale>[a-z]{2}-[a-z]{2})/(?:[^/?#]+/)+(?P<display_id>[^/?#]+)/[a-z]{2}-(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'https://www.msn.com/en-in/money/video/7-ways-to-get-rid-of-chest-congestion/vi-BBPxU6d',
'md5': '087548191d273c5c55d05028f8d2cbcd',
'url': 'https://www.msn.com/en-gb/video/news/president-macron-interrupts-trump-over-ukraine-funding/vi-AA1zMcD7',
'info_dict': {
'id': 'BBPxU6d',
'display_id': '7-ways-to-get-rid-of-chest-congestion',
'id': 'AA1zMcD7',
'ext': 'mp4',
'title': 'Seven ways to get rid of chest congestion',
'description': '7 Ways to Get Rid of Chest Congestion',
'duration': 88,
'uploader': 'Health',
'uploader_id': 'BBPrMqa',
'display_id': 'president-macron-interrupts-trump-over-ukraine-funding',
'title': 'President Macron interrupts Trump over Ukraine funding',
'description': 'md5:5fd3857ac25849e7a56cb25fbe1a2a8b',
'uploader': 'k! News UK',
'uploader_id': 'BB1hz5Rj',
'duration': 59,
'thumbnail': 'https://img-s-msn-com.akamaized.net/tenant/amp/entityid/AA1zMagX.img',
'tags': 'count:14',
'timestamp': 1740510914,
'upload_date': '20250225',
'release_timestamp': 1740513600,
'release_date': '20250225',
'modified_timestamp': 1741413241,
'modified_date': '20250308',
},
}, {
# Article, multiple Dailymotion Embeds
'url': 'https://www.msn.com/en-in/money/sports/hottest-football-wags-greatest-footballers-turned-managers-and-more/ar-BBpc7Nl',
'url': 'https://www.msn.com/en-gb/video/watch/films-success-saved-adam-pearsons-acting-career/vi-AA1znZGE?ocid=hpmsn',
'info_dict': {
'id': 'BBpc7Nl',
'id': 'AA1znZGE',
'ext': 'mp4',
'display_id': 'films-success-saved-adam-pearsons-acting-career',
'title': "Films' success saved Adam Pearson's acting career",
'description': 'md5:98c05f7bd9ab4f9c423400f62f2d3da5',
'uploader': 'Sky News',
'uploader_id': 'AA2eki',
'duration': 52,
'thumbnail': 'https://img-s-msn-com.akamaized.net/tenant/amp/entityid/AA1zo7nU.img',
'timestamp': 1739993965,
'upload_date': '20250219',
'release_timestamp': 1739977753,
'release_date': '20250219',
'modified_timestamp': 1742076259,
'modified_date': '20250315',
},
'playlist_mincount': 4,
}, {
'url': 'http://www.msn.com/en-ae/news/offbeat/meet-the-nine-year-old-self-made-millionaire/ar-BBt6ZKf',
'only_matching': True,
}, {
'url': 'http://www.msn.com/en-ae/video/watch/obama-a-lot-of-people-will-be-disappointed/vi-AAhxUMH',
'only_matching': True,
}, {
# geo restricted
'url': 'http://www.msn.com/en-ae/foodanddrink/joinourtable/the-first-fart-makes-you-laugh-the-last-fart-makes-you-cry/vp-AAhzIBU',
'only_matching': True,
}, {
'url': 'http://www.msn.com/en-ae/entertainment/bollywood/watch-how-salman-khan-reacted-when-asked-if-he-would-apologize-for-his-raped-woman-comment/vi-AAhvzW6',
'only_matching': True,
}, {
# Vidible(AOL) Embed
'url': 'https://www.msn.com/en-us/money/other/jupiter-is-about-to-come-so-close-you-can-see-its-moons-with-binoculars/vi-AACqsHR',
'only_matching': True,
'url': 'https://www.msn.com/en-us/entertainment/news/rock-frontman-replacements-you-might-not-know-happened/vi-AA1yLVcD',
'info_dict': {
'id': 'AA1yLVcD',
'ext': 'mp4',
'display_id': 'rock-frontman-replacements-you-might-not-know-happened',
'title': 'Rock Frontman Replacements You Might Not Know Happened',
'description': 'md5:451a125496ff0c9f6816055bb1808da9',
'uploader': 'Grunge (Video)',
'uploader_id': 'BB1oveoV',
'duration': 596,
'thumbnail': 'https://img-s-msn-com.akamaized.net/tenant/amp/entityid/AA1yM4OJ.img',
'timestamp': 1739223456,
'upload_date': '20250210',
'release_timestamp': 1739219731,
'release_date': '20250210',
'modified_timestamp': 1741427272,
'modified_date': '20250308',
},
}, {
# Dailymotion Embed
'url': 'https://www.msn.com/es-ve/entretenimiento/watch/winston-salem-paire-refait-des-siennes-en-perdant-sa-raquette-au-service/vp-AAG704L',
'only_matching': True,
'url': 'https://www.msn.com/de-de/nachrichten/other/the-first-descendant-gameplay-trailer-zu-serena-der-neuen-gefl%C3%BCgelten-nachfahrin/vi-AA1B1d06',
'info_dict': {
'id': 'x9g6oli',
'ext': 'mp4',
'title': 'The First Descendant: Gameplay-Trailer zu Serena, der neuen geflügelten Nachfahrin',
'description': '',
'uploader': 'MeinMMO',
'uploader_id': 'x2mvqi4',
'view_count': int,
'like_count': int,
'age_limit': 0,
'duration': 60,
'thumbnail': 'https://s1.dmcdn.net/v/Y3fO61drj56vPB9SS/x1080',
'tags': ['MeinMMO', 'The First Descendant'],
'timestamp': 1742124877,
'upload_date': '20250316',
},
}, {
# YouTube Embed
'url': 'https://www.msn.com/en-in/money/news/meet-vikram-%E2%80%94-chandrayaan-2s-lander/vi-AAGUr0v',
'only_matching': True,
# Youtube Embed
'url': 'https://www.msn.com/en-gb/video/webcontent/web-content/vi-AA1ybFaJ',
'info_dict': {
'id': 'kQSChWu95nE',
'ext': 'mp4',
'title': '7 Daily Habits to Nurture Your Personal Growth',
'description': 'md5:6f233c68341b74dee30c8c121924e827',
'uploader': 'TopThink',
'uploader_id': '@TopThink',
'uploader_url': 'https://www.youtube.com/@TopThink',
'channel': 'TopThink',
'channel_id': 'UCMlGmHokrQRp-RaNO7aq4Uw',
'channel_url': 'https://www.youtube.com/channel/UCMlGmHokrQRp-RaNO7aq4Uw',
'channel_is_verified': True,
'channel_follower_count': int,
'comment_count': int,
'view_count': int,
'like_count': int,
'age_limit': 0,
'duration': 705,
'thumbnail': 'https://i.ytimg.com/vi/kQSChWu95nE/maxresdefault.jpg',
'categories': ['Howto & Style'],
'tags': ['topthink', 'top think', 'personal growth'],
'timestamp': 1722711620,
'upload_date': '20240803',
'playable_in_embed': True,
'availability': 'public',
'live_status': 'not_live',
},
}, {
# NBCSports Embed
'url': 'https://www.msn.com/en-us/money/football_nfl/week-13-preview-redskins-vs-panthers/vi-BBXsCDb',
'only_matching': True,
# Article with social embed
'url': 'https://www.msn.com/en-in/news/techandscience/watch-earth-sets-and-rises-behind-moon-in-breathtaking-blue-ghost-video/ar-AA1zKoAc',
'info_dict': {
'id': 'AA1zKoAc',
'title': 'Watch: Earth sets and rises behind Moon in breathtaking Blue Ghost video',
'description': 'md5:0ad51cfa77e42e7f0c46cf98a619dbbf',
'uploader': 'India Today',
'uploader_id': 'AAyFWG',
'tags': 'count:11',
'timestamp': 1740485034,
'upload_date': '20250225',
'release_timestamp': 1740484875,
'release_date': '20250225',
'modified_timestamp': 1740488561,
'modified_date': '20250225',
},
'playlist_count': 1,
}]
def _real_extract(self, url):
display_id, page_id = self._match_valid_url(url).groups()
locale, display_id, page_id = self._match_valid_url(url).group('locale', 'display_id', 'id')
webpage = self._download_webpage(url, display_id)
json_data = self._download_json(
f'https://assets.msn.com/content/view/v2/Detail/{locale}/{page_id}', page_id)
entries = []
for _, metadata in re.findall(r'data-metadata\s*=\s*(["\'])(?P<data>.+?)\1', webpage):
video = self._parse_json(unescapeHTML(metadata), display_id)
provider_id = video.get('providerId')
player_name = video.get('playerName')
if player_name and provider_id:
entry = None
if player_name == 'AOL':
if provider_id.startswith('http'):
provider_id = self._search_regex(
r'https?://delivery\.vidible\.tv/video/redirect/([0-9a-f]{24})',
provider_id, 'vidible id')
entry = self.url_result(
'aol-video:' + provider_id, 'Aol', provider_id)
elif player_name == 'Dailymotion':
entry = self.url_result(
'https://www.dailymotion.com/video/' + provider_id,
'Dailymotion', provider_id)
elif player_name == 'YouTube':
entry = self.url_result(
provider_id, 'Youtube', provider_id)
elif player_name == 'NBCSports':
entry = self.url_result(
'http://vplayer.nbcsports.com/p/BxmELC/nbcsports_embed/select/media/' + provider_id,
'NBCSportsVPlayer', provider_id)
if entry:
entries.append(entry)
continue
video_id = video['uuid']
title = video['title']
common_metadata = traverse_obj(json_data, {
'title': ('title', {str}),
'description': (('abstract', ('body', {clean_html})), {str}, filter, any),
'timestamp': ('createdDateTime', {parse_iso8601}),
'release_timestamp': ('publishedDateTime', {parse_iso8601}),
'modified_timestamp': ('updatedDateTime', {parse_iso8601}),
'thumbnail': ('thumbnail', 'image', 'url', {url_or_none}),
'duration': ('videoMetadata', 'playTime', {int_or_none}),
'tags': ('keywords', ..., {str}),
'uploader': ('provider', 'name', {str}),
'uploader_id': ('provider', 'id', {str}),
})
page_type = json_data['type']
source_url = traverse_obj(json_data, ('sourceHref', {url_or_none}))
if page_type == 'video':
if traverse_obj(json_data, ('thirdPartyVideoPlayer', 'enabled')) and source_url:
return self.url_result(source_url)
formats = []
for file_ in video.get('videoFiles', []):
format_url = file_.get('url')
if not format_url:
continue
if 'format=m3u8-aapl' in format_url:
# m3u8_native should not be used here until
# https://github.com/ytdl-org/youtube-dl/issues/9913 is fixed
formats.extend(self._extract_m3u8_formats(
format_url, display_id, 'mp4',
m3u8_id='hls', fatal=False))
elif 'format=mpd-time-csf' in format_url:
formats.extend(self._extract_mpd_formats(
format_url, display_id, 'dash', fatal=False))
elif '.ism' in format_url:
if format_url.endswith('.ism'):
format_url += '/manifest'
formats.extend(self._extract_ism_formats(
format_url, display_id, 'mss', fatal=False))
else:
format_id = file_.get('formatCode')
formats.append({
'url': format_url,
'ext': 'mp4',
'format_id': format_id,
'width': int_or_none(file_.get('width')),
'height': int_or_none(file_.get('height')),
'vbr': int_or_none(self._search_regex(r'_(\d+)\.mp4', format_url, 'vbr', default=None)),
'quality': 1 if format_id == '1001' else None,
})
subtitles = {}
for file_ in video.get('files', []):
format_url = file_.get('url')
format_code = file_.get('formatCode')
if not format_url or not format_code:
continue
if str(format_code) == '3100':
subtitles.setdefault(file_.get('culture', 'en'), []).append({
'ext': determine_ext(format_url, 'ttml'),
'url': format_url,
})
for file in traverse_obj(json_data, ('videoMetadata', 'externalVideoFiles', lambda _, v: url_or_none(v['url']))):
file_url = file['url']
ext = determine_ext(file_url)
if ext == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
file_url, page_id, 'mp4', m3u8_id='hls', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
elif ext == 'mpd':
fmts, subs = self._extract_mpd_formats_and_subtitles(
file_url, page_id, mpd_id='dash', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
formats.append(
traverse_obj(file, {
'url': 'url',
'format_id': ('format', {str}),
'filesize': ('fileSize', {int_or_none}),
'height': ('height', {int_or_none}),
'width': ('width', {int_or_none}),
}))
for caption in traverse_obj(json_data, ('videoMetadata', 'closedCaptions', lambda _, v: url_or_none(v['href']))):
lang = caption.get('locale') or 'en-us'
subtitles.setdefault(lang, []).append({
'url': caption['href'],
'ext': 'ttml',
})
entries.append({
'id': video_id,
return {
'id': page_id,
'display_id': display_id,
'title': title,
'description': video.get('description'),
'thumbnail': video.get('headlineImage', {}).get('url'),
'duration': int_or_none(video.get('durationSecs')),
'uploader': video.get('sourceFriendly'),
'uploader_id': video.get('providerId'),
'creator': video.get('creator'),
'subtitles': subtitles,
'formats': formats,
})
'subtitles': subtitles,
**common_metadata,
}
elif page_type == 'webcontent':
if not source_url:
raise ExtractorError('Could not find source URL')
return self.url_result(source_url)
elif page_type == 'article':
entries = []
for embed_url in traverse_obj(json_data, ('socialEmbeds', ..., 'postUrl', {url_or_none})):
entries.append(self.url_result(embed_url))
if not entries:
error = unescapeHTML(self._search_regex(
r'data-error=(["\'])(?P<error>.+?)\1',
webpage, 'error', group='error'))
raise ExtractorError(f'{self.IE_NAME} said: {error}', expected=True)
return self.playlist_result(entries, page_id, **common_metadata)
return self.playlist_result(entries, page_id)
raise ExtractorError(f'Unsupported page type: {page_type}')

View File

@@ -4,7 +4,9 @@ from .common import InfoExtractor
from ..utils import (
extract_attributes,
unified_timestamp,
url_or_none,
)
from ..utils.traversal import traverse_obj
class N1InfoAssetIE(InfoExtractor):
@@ -35,9 +37,9 @@ class N1InfoIIE(InfoExtractor):
IE_NAME = 'N1Info:article'
_VALID_URL = r'https?://(?:(?:\w+\.)?n1info\.\w+|nova\.rs)/(?:[^/?#]+/){1,2}(?P<id>[^/?#]+)'
_TESTS = [{
# Youtube embedded
# YouTube embedded
'url': 'https://rs.n1info.com/sport-klub/tenis/kako-je-djokovic-propustio-istorijsku-priliku-video/',
'md5': '01ddb6646d0fd9c4c7d990aa77fe1c5a',
'md5': '987ce6fd72acfecc453281e066b87973',
'info_dict': {
'id': 'L5Hd4hQVUpk',
'ext': 'mp4',
@@ -45,7 +47,26 @@ class N1InfoIIE(InfoExtractor):
'title': 'Ozmo i USO21, ep. 13: Novak Đoković Danil Medvedev | Ključevi Poraza, Budućnost | SPORT KLUB TENIS',
'description': 'md5:467f330af1effedd2e290f10dc31bb8e',
'uploader': 'Sport Klub',
'uploader_id': 'sportklub',
'uploader_id': '@sportklub',
'uploader_url': 'https://www.youtube.com/@sportklub',
'channel': 'Sport Klub',
'channel_id': 'UChpzBje9Ro6CComXe3BgNaw',
'channel_url': 'https://www.youtube.com/channel/UChpzBje9Ro6CComXe3BgNaw',
'channel_is_verified': True,
'channel_follower_count': int,
'comment_count': int,
'view_count': int,
'like_count': int,
'age_limit': 0,
'duration': 1049,
'thumbnail': 'https://i.ytimg.com/vi/L5Hd4hQVUpk/maxresdefault.jpg',
'chapters': 'count:9',
'categories': ['Sports'],
'tags': 'count:10',
'timestamp': 1631522787,
'playable_in_embed': True,
'availability': 'public',
'live_status': 'not_live',
},
}, {
'url': 'https://rs.n1info.com/vesti/djilas-los-plan-za-metro-nece-resiti-nijedan-saobracajni-problem/',
@@ -55,6 +76,7 @@ class N1InfoIIE(InfoExtractor):
'title': 'Đilas: Predlog izgradnje metroa besmislen; SNS odbacuje navode',
'upload_date': '20210924',
'timestamp': 1632481347,
'thumbnail': 'http://n1info.rs/wp-content/themes/ucnewsportal-n1/dist/assets/images/placeholder-image-video.jpg',
},
'params': {
'skip_download': True,
@@ -67,6 +89,7 @@ class N1InfoIIE(InfoExtractor):
'title': 'Zadnji dnevi na kopališču Ilirija: “Ilirija ni umrla, ubili so jo”',
'timestamp': 1632567630,
'upload_date': '20210925',
'thumbnail': 'https://n1info.si/wp-content/uploads/2021/09/06/1630945843-tomaz3.png',
},
'params': {
'skip_download': True,
@@ -81,6 +104,14 @@ class N1InfoIIE(InfoExtractor):
'upload_date': '20210924',
'timestamp': 1632448649.0,
'uploader': 'YouLotWhatDontStop',
'display_id': 'pu9wbx',
'channel_id': 'serbia',
'comment_count': int,
'like_count': int,
'dislike_count': int,
'age_limit': 0,
'duration': 134,
'thumbnail': 'https://external-preview.redd.it/5nmmawSeGx60miQM3Iq-ueC9oyCLTLjjqX-qqY8uRsc.png?format=pjpg&auto=webp&s=2f973400b04d23f871b608b178e47fc01f9b8f1d',
},
'params': {
'skip_download': True,
@@ -93,6 +124,7 @@ class N1InfoIIE(InfoExtractor):
'title': 'Žaklina Tatalović Ani Brnabić: Pričate laži (VIDEO)',
'upload_date': '20211102',
'timestamp': 1635861677,
'thumbnail': 'https://nova.rs/wp-content/uploads/2021/11/02/1635860298-TNJG_Ana_Brnabic_i_Zaklina_Tatalovic_100_dana_Vlade_GP.jpg',
},
}, {
'url': 'https://n1info.rs/vesti/cuta-biti-u-kosovskoj-mitrovici-znaci-da-te-docekaju-eksplozivnim-napravama/',
@@ -104,6 +136,16 @@ class N1InfoIIE(InfoExtractor):
'timestamp': 1687290536,
'thumbnail': 'https://cdn.brid.tv/live/partners/26827/snapshot/1332368_th_6492013a8356f_1687290170.jpg',
},
}, {
'url': 'https://n1info.rs/vesti/vuciceva-turneja-po-srbiji-najavljuje-kontrarevoluciju-preti-svom-narodu-vredja-novinare/',
'info_dict': {
'id': '2025974',
'ext': 'mp4',
'title': 'Vučićeva turneja po Srbiji: Najavljuje kontrarevoluciju, preti svom narodu, vređa novinare',
'thumbnail': 'https://cdn-uc.brid.tv/live/partners/26827/snapshot/2025974_fhd_67c4a23280a81_1740939826.jpg',
'timestamp': 1740939936,
'upload_date': '20250302',
},
}, {
'url': 'https://hr.n1info.com/vijesti/pravobraniteljica-o-ubojstvu-u-zagrebu-radi-se-o-doista-nezapamcenoj-situaciji/',
'only_matching': True,
@@ -115,11 +157,11 @@ class N1InfoIIE(InfoExtractor):
title = self._html_search_regex(r'<h1[^>]+>(.+?)</h1>', webpage, 'title')
timestamp = unified_timestamp(self._html_search_meta('article:published_time', webpage))
plugin_data = self._html_search_meta('BridPlugin', webpage)
plugin_data = re.findall(r'\$bp\("(?:Brid|TargetVideo)_\d+",\s(.+)\);', webpage)
entries = []
if plugin_data:
site_id = self._html_search_regex(r'site:(\d+)', webpage, 'site id')
for video_data in re.findall(r'\$bp\("Brid_\d+", (.+)\);', webpage):
for video_data in plugin_data:
video_id = self._parse_json(video_data, title)['video']
entries.append({
'id': video_id,
@@ -140,7 +182,7 @@ class N1InfoIIE(InfoExtractor):
'url': video_data.get('data-url'),
'id': video_data.get('id'),
'title': title,
'thumbnail': video_data.get('data-thumbnail'),
'thumbnail': traverse_obj(video_data, (('data-thumbnail', 'data-default_thumbnail'), {url_or_none}, any)),
'timestamp': timestamp,
'ie_key': 'N1InfoAsset',
})
@@ -152,7 +194,7 @@ class N1InfoIIE(InfoExtractor):
if url.startswith('https://www.youtube.com'):
entries.append(self.url_result(url, ie='Youtube'))
elif url.startswith('https://www.redditmedia.com'):
entries.append(self.url_result(url, ie='RedditR'))
entries.append(self.url_result(url, ie='Reddit'))
return {
'_type': 'playlist',

View File

@@ -736,7 +736,7 @@ class NBCStationsIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
nbc_data = self._search_json(
r'<script>\s*var\s+nbc\s*=', webpage, 'NBC JSON data', video_id)
r'(?:<script>\s*var\s+nbc\s*=|Object\.assign\(nbc,)', webpage, 'NBC JSON data', video_id)
pdk_acct = nbc_data.get('pdkAcct') or 'Yh1nAC'
fw_ssid = traverse_obj(nbc_data, ('video', 'fwSSID'))

View File

@@ -27,6 +27,7 @@ from ..utils import (
traverse_obj,
try_get,
unescapeHTML,
unified_timestamp,
update_url_query,
url_basename,
url_or_none,
@@ -985,6 +986,7 @@ class NiconicoLiveIE(InfoExtractor):
'quality': 'abr',
'protocol': 'hls+fmp4',
'latency': latency,
'accessRightMethod': 'single_cookie',
'chasePlay': False,
},
'room': {
@@ -1005,6 +1007,7 @@ class NiconicoLiveIE(InfoExtractor):
if data.get('type') == 'stream':
m3u8_url = data['data']['uri']
qualities = data['data']['availableQualities']
cookies = data['data']['cookies']
break
elif data.get('type') == 'disconnect':
self.write_debug(recv)
@@ -1043,6 +1046,11 @@ class NiconicoLiveIE(InfoExtractor):
**res,
})
for cookie in cookies:
self._set_cookie(
cookie['domain'], cookie['name'], cookie['value'],
expire_time=unified_timestamp(cookie['expires']), path=cookie['path'], secure=cookie['secure'])
formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', live=True)
for fmt, q in zip(formats, reversed(qualities[1:])):
fmt.update({

View File

@@ -1,34 +1,46 @@
import json
import re
from .brightcove import BrightcoveNewIE
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
smuggle_url,
parse_iso8601,
parse_resolution,
str_or_none,
try_get,
unified_strdate,
unified_timestamp,
url_or_none,
)
from ..utils.traversal import require, traverse_obj, value
class NineNowIE(InfoExtractor):
IE_NAME = '9now.com.au'
_VALID_URL = r'https?://(?:www\.)?9now\.com\.au/(?:[^/]+/){2}(?P<id>[^/?#]+)'
_GEO_COUNTRIES = ['AU']
_VALID_URL = r'https?://(?:www\.)?9now\.com\.au/(?:[^/?#]+/){2}(?P<id>(?P<type>clip|episode)-[^/?#]+)'
_GEO_BYPASS = False
_TESTS = [{
# clip
'url': 'https://www.9now.com.au/afl-footy-show/2016/clip-ciql02091000g0hp5oktrnytc',
'md5': '17cf47d63ec9323e562c9957a968b565',
'url': 'https://www.9now.com.au/today/season-2025/clip-cm8hw9h5z00080hquqa5hszq7',
'info_dict': {
'id': '16801',
'id': '6370295582112',
'ext': 'mp4',
'title': 'St. Kilda\'s Joey Montagna on the potential for a player\'s strike',
'description': 'Is a boycott of the NAB Cup "on the table"?',
'title': 'Would Karl Stefanovic be able to land a plane?',
'description': 'The Today host\'s skills are put to the test with the latest simulation tech.',
'uploader_id': '4460760524001',
'upload_date': '20160713',
'timestamp': 1468421266,
'duration': 197.376,
'tags': ['flights', 'technology', 'Karl Stefanovic'],
'season': 'Season 2025',
'season_number': 2025,
'series': 'TODAY',
'timestamp': 1742507988,
'upload_date': '20250320',
'release_timestamp': 1742507983,
'release_date': '20250320',
'thumbnail': r're:https?://.+/1920x0/.+\.jpg',
},
'params': {
'skip_download': 'HLS/DASH fragments and mp4 URLs are geo-restricted; only available in AU',
},
'skip': 'Only available in Australia',
}, {
# episode
'url': 'https://www.9now.com.au/afl-footy-show/2016/episode-19',
@@ -41,7 +53,7 @@ class NineNowIE(InfoExtractor):
# episode of series
'url': 'https://www.9now.com.au/lego-masters/season-3/episode-3',
'info_dict': {
'id': '6249614030001',
'id': '6308830406112',
'title': 'Episode 3',
'ext': 'mp4',
'season_number': 3,
@@ -50,72 +62,87 @@ class NineNowIE(InfoExtractor):
'uploader_id': '4460760524001',
'timestamp': 1619002200,
'upload_date': '20210421',
'duration': 3574.085,
'thumbnail': r're:https?://.+/1920x0/.+\.jpg',
'tags': ['episode'],
'series': 'Lego Masters',
'season': 'Season 3',
'episode': 'Episode 3',
'release_timestamp': 1619002200,
'release_date': '20210421',
},
'expected_warnings': ['Ignoring subtitle tracks'],
'params': {
'skip_download': True,
'skip_download': 'HLS/DASH fragments and mp4 URLs are geo-restricted; only available in AU',
},
}, {
'url': 'https://www.9now.com.au/married-at-first-sight/season-12/episode-1',
'info_dict': {
'id': '6367798770112',
'ext': 'mp4',
'title': 'Episode 1',
'description': r're:The cultural sensation of Married At First Sight returns with our first weddings! .{90}$',
'uploader_id': '4460760524001',
'duration': 5415.079,
'thumbnail': r're:https?://.+/1920x0/.+\.png',
'tags': ['episode'],
'season': 'Season 12',
'season_number': 12,
'episode': 'Episode 1',
'episode_number': 1,
'series': 'Married at First Sight',
'timestamp': 1737973800,
'upload_date': '20250127',
'release_timestamp': 1737973800,
'release_date': '20250127',
},
'params': {
'skip_download': 'HLS/DASH fragments and mp4 URLs are geo-restricted; only available in AU',
},
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4460760524001/default_default/index.html?videoId=%s'
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4460760524001/default_default/index.html?videoId={}'
# XXX: For parsing next.js v15+ data; see also yt_dlp.extractor.francetv and yt_dlp.extractor.goplay
def _find_json(self, s):
return self._search_json(
r'\w+\s*:\s*', s, 'next js data', None, contains_pattern=r'\[(?s:.+)\]', default=None)
def _real_extract(self, url):
display_id = self._match_id(url)
display_id, video_type = self._match_valid_url(url).group('id', 'type')
webpage = self._download_webpage(url, display_id)
page_data = self._parse_json(self._search_regex(
r'window\.__data\s*=\s*({.*?});', webpage,
'page data', default='{}'), display_id, fatal=False)
if not page_data:
page_data = self._parse_json(self._parse_json(self._search_regex(
r'window\.__data\s*=\s*JSON\.parse\s*\(\s*(".+?")\s*\)\s*;',
webpage, 'page data'), display_id), display_id)
for kind in ('episode', 'clip'):
current_key = page_data.get(kind, {}).get(
f'current{kind.capitalize()}Key')
if not current_key:
continue
cache = page_data.get(kind, {}).get(f'{kind}Cache', {})
if not cache:
continue
common_data = {
'episode': (cache.get(current_key) or next(iter(cache.values())))[kind],
'season': (cache.get(current_key) or next(iter(cache.values()))).get('season', None),
}
break
else:
raise ExtractorError('Unable to find video data')
common_data = traverse_obj(
re.findall(r'<script[^>]*>\s*self\.__next_f\.push\(\s*(\[.+?\])\s*\);?\s*</script>', webpage),
(..., {json.loads}, ..., {self._find_json},
lambda _, v: v['payload'][video_type]['slug'] == display_id,
'payload', any, {require('video data')}))
if not self.get_param('allow_unplayable_formats') and try_get(common_data, lambda x: x['episode']['video']['drm'], bool):
if traverse_obj(common_data, (video_type, 'video', 'drm', {bool})):
self.report_drm(display_id)
brightcove_id = try_get(
common_data, lambda x: x['episode']['video']['brightcoveId'], str) or 'ref:{}'.format(common_data['episode']['video']['referenceId'])
video_id = str_or_none(try_get(common_data, lambda x: x['episode']['video']['id'])) or brightcove_id
title = try_get(common_data, lambda x: x['episode']['name'], str)
season_number = try_get(common_data, lambda x: x['season']['seasonNumber'], int)
episode_number = try_get(common_data, lambda x: x['episode']['episodeNumber'], int)
timestamp = unified_timestamp(try_get(common_data, lambda x: x['episode']['airDate'], str))
release_date = unified_strdate(try_get(common_data, lambda x: x['episode']['availability'], str))
thumbnails_data = try_get(common_data, lambda x: x['episode']['image']['sizes'], dict) or {}
thumbnails = [{
'id': thumbnail_id,
'url': thumbnail_url,
'width': int_or_none(thumbnail_id[1:]),
} for thumbnail_id, thumbnail_url in thumbnails_data.items()]
brightcove_id = traverse_obj(common_data, (
video_type, 'video', (
('brightcoveId', {str}),
('referenceId', {str}, {lambda x: f'ref:{x}' if x else None}),
), any, {require('brightcove ID')}))
return {
'_type': 'url_transparent',
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
{'geo_countries': self._GEO_COUNTRIES}),
'id': video_id,
'title': title,
'description': try_get(common_data, lambda x: x['episode']['description'], str),
'duration': float_or_none(try_get(common_data, lambda x: x['episode']['video']['duration'], float), 1000),
'thumbnails': thumbnails,
'ie_key': 'BrightcoveNew',
'season_number': season_number,
'episode_number': episode_number,
'timestamp': timestamp,
'release_date': release_date,
'ie_key': BrightcoveNewIE.ie_key(),
'url': self.BRIGHTCOVE_URL_TEMPLATE.format(brightcove_id),
**traverse_obj(common_data, {
'id': (video_type, 'video', 'id', {int}, ({str_or_none}, {value(brightcove_id)}), any),
'title': (video_type, 'name', {str}),
'description': (video_type, 'description', {str}),
'duration': (video_type, 'video', 'duration', {float_or_none(scale=1000)}),
'tags': (video_type, 'tags', ..., 'name', {str}, all, filter),
'series': ('tvSeries', 'name', {str}),
'season_number': ('season', 'seasonNumber', {int_or_none}),
'episode_number': ('episode', 'episodeNumber', {int_or_none}),
'timestamp': ('episode', 'airDate', {parse_iso8601}),
'release_timestamp': (video_type, 'availability', {parse_iso8601}),
'thumbnails': (video_type, 'image', 'sizes', {dict.items}, lambda _, v: url_or_none(v[1]), {
'id': 0,
'url': 1,
'width': (1, {parse_resolution}, 'width'),
}),
}),
}

View File

@@ -11,12 +11,15 @@ class On24IE(InfoExtractor):
IE_NAME = 'on24'
IE_DESC = 'ON24'
_VALID_URL = r'''(?x)
https?://event\.on24\.com/(?:
wcc/r/(?P<id_1>\d{7})/(?P<key_1>[0-9A-F]{32})|
eventRegistration/(?:console/EventConsoleApollo|EventLobbyServlet\?target=lobby30)
\.jsp\?(?:[^/#?]*&)?eventid=(?P<id_2>\d{7})[^/#?]*&key=(?P<key_2>[0-9A-F]{32})
)'''
_ID_RE = r'(?P<id>\d{7})'
_KEY_RE = r'(?P<key>[0-9A-F]{32})'
_URL_BASE_RE = r'https?://event\.on24\.com'
_URL_QUERY_RE = rf'(?:[^#]*&)?eventid={_ID_RE}&(?:[^#]+&)?key={_KEY_RE}'
_VALID_URL = [
rf'{_URL_BASE_RE}/wcc/r/{_ID_RE}/{_KEY_RE}',
rf'{_URL_BASE_RE}/eventRegistration/console/(?:EventConsoleApollo\.jsp|apollox/mainEvent/?)\?{_URL_QUERY_RE}',
rf'{_URL_BASE_RE}/eventRegistration/EventLobbyServlet/?\?{_URL_QUERY_RE}',
]
_TESTS = [{
'url': 'https://event.on24.com/eventRegistration/console/EventConsoleApollo.jsp?uimode=nextgeneration&eventid=2197467&sessionid=1&key=5DF57BE53237F36A43B478DD36277A84&contenttype=A&eventuserid=305999&playerwidth=1000&playerheight=650&caller=previewLobby&text_language_id=en&format=fhaudio&newConsole=false',
@@ -34,12 +37,16 @@ class On24IE(InfoExtractor):
}, {
'url': 'https://event.on24.com/eventRegistration/console/EventConsoleApollo.jsp?&eventid=2639291&sessionid=1&username=&partnerref=&format=fhvideo1&mobile=&flashsupportedmobiledevice=&helpcenter=&key=82829018E813065A122363877975752E&newConsole=true&nxChe=true&newTabCon=true&text_language_id=en&playerwidth=748&playerheight=526&eventuserid=338788762&contenttype=A&mediametricsessionid=384764716&mediametricid=3558192&usercd=369267058&mode=launch',
'only_matching': True,
}, {
'url': 'https://event.on24.com/eventRegistration/EventLobbyServlet?target=reg20.jsp&eventid=3543176&key=BC0F6B968B67C34B50D461D40FDB3E18&groupId=3143628',
'only_matching': True,
}, {
'url': 'https://event.on24.com/eventRegistration/console/apollox/mainEvent?&eventid=4843671&sessionid=1&username=&partnerref=&format=fhvideo1&mobile=&flashsupportedmobiledevice=&helpcenter=&key=4EAC9B5C564CC98FF29E619B06A2F743&newConsole=true&nxChe=true&newTabCon=true&consoleEarEventConsole=false&consoleEarCloudApi=false&text_language_id=en&playerwidth=748&playerheight=526&referrer=https%3A%2F%2Fevent.on24.com%2Finterface%2Fregistration%2Fautoreg%2Findex.html%3Fsessionid%3D1%26eventid%3D4843671%26key%3D4EAC9B5C564CC98FF29E619B06A2F743%26email%3D000a3e42-7952-4dd6-8f8a-34c38ea3cf02%2540platform%26firstname%3Ds%26lastname%3Ds%26deletecookie%3Dtrue%26event_email%3DN%26marketing_email%3DN%26std1%3D0642572014177%26std2%3D0642572014179%26std3%3D550165f7-a44e-4725-9fe6-716f89908c2b%26std4%3D0&eventuserid=745776448&contenttype=A&mediametricsessionid=640613707&mediametricid=6810717&usercd=745776448&mode=launch',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
event_id = mobj.group('id_1') or mobj.group('id_2')
event_key = mobj.group('key_1') or mobj.group('key_2')
event_id, event_key = self._match_valid_url(url).group('id', 'key')
event_data = self._download_json(
'https://event.on24.com/apic/utilApp/EventConsoleCachedServlet',

View File

@@ -67,7 +67,7 @@ class OpenRecBaseIE(InfoExtractor):
class OpenRecIE(OpenRecBaseIE):
IE_NAME = 'openrec'
_VALID_URL = r'https?://(?:www\.)?openrec\.tv/live/(?P<id>[^/]+)'
_VALID_URL = r'https?://(?:www\.)?openrec\.tv/live/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.openrec.tv/live/2p8v31qe4zy',
'only_matching': True,
@@ -85,7 +85,7 @@ class OpenRecIE(OpenRecBaseIE):
class OpenRecCaptureIE(OpenRecBaseIE):
IE_NAME = 'openrec:capture'
_VALID_URL = r'https?://(?:www\.)?openrec\.tv/capture/(?P<id>[^/]+)'
_VALID_URL = r'https?://(?:www\.)?openrec\.tv/capture/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.openrec.tv/capture/l9nk2x4gn14',
'only_matching': True,
@@ -129,7 +129,7 @@ class OpenRecCaptureIE(OpenRecBaseIE):
class OpenRecMovieIE(OpenRecBaseIE):
IE_NAME = 'openrec:movie'
_VALID_URL = r'https?://(?:www\.)?openrec\.tv/movie/(?P<id>[^/]+)'
_VALID_URL = r'https?://(?:www\.)?openrec\.tv/movie/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.openrec.tv/movie/nqz5xl5km8v',
'info_dict': {
@@ -141,6 +141,9 @@ class OpenRecMovieIE(OpenRecBaseIE):
'uploader_id': 'taiki_to_kazuhiro',
'timestamp': 1638856800,
},
}, {
'url': 'https://www.openrec.tv/movie/2p8vvex548y?playlist_id=98brq96vvsgn2nd',
'only_matching': True,
}]
def _real_extract(self, url):

101
yt_dlp/extractor/parti.py Normal file
View File

@@ -0,0 +1,101 @@
from .common import InfoExtractor
from ..utils import UserNotLive, int_or_none, parse_iso8601, url_or_none, urljoin
from ..utils.traversal import traverse_obj
class PartiBaseIE(InfoExtractor):
def _call_api(self, path, video_id, note=None):
return self._download_json(
f'https://api-backend.parti.com/parti_v2/profile/{path}', video_id, note)
class PartiVideoIE(PartiBaseIE):
IE_NAME = 'parti:video'
_VALID_URL = r'https?://(?:www\.)?parti\.com/video/(?P<id>\d+)'
_TESTS = [{
'url': 'https://parti.com/video/66284',
'info_dict': {
'id': '66284',
'ext': 'mp4',
'title': 'NOW LIVE ',
'upload_date': '20250327',
'categories': ['Gaming'],
'thumbnail': 'https://assets.parti.com/351424_eb9e5250-2821-484a-9c5f-ca99aa666c87.png',
'channel': 'ItZTMGG',
'timestamp': 1743044379,
},
'params': {'skip_download': 'm3u8'},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._call_api(f'get_livestream_channel_info/recent/{video_id}', video_id)
return {
'id': video_id,
'formats': self._extract_m3u8_formats(
urljoin('https://watch.parti.com', data['livestream_recording']), video_id, 'mp4'),
**traverse_obj(data, {
'title': ('event_title', {str}),
'channel': ('user_name', {str}),
'thumbnail': ('event_file', {url_or_none}),
'categories': ('category_name', {str}, filter, all),
'timestamp': ('event_start_ts', {int_or_none}),
}),
}
class PartiLivestreamIE(PartiBaseIE):
IE_NAME = 'parti:livestream'
_VALID_URL = r'https?://(?:www\.)?parti\.com/creator/(?P<service>[\w]+)/(?P<id>[\w/-]+)'
_TESTS = [{
'url': 'https://parti.com/creator/parti/Capt_Robs_Adventures',
'info_dict': {
'id': 'Capt_Robs_Adventures',
'ext': 'mp4',
'title': r"re:I'm Live on Parti \d{4}-\d{2}-\d{2} \d{2}:\d{2}",
'view_count': int,
'thumbnail': r're:https://assets\.parti\.com/.+\.png',
'timestamp': 1743879776,
'upload_date': '20250405',
'live_status': 'is_live',
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://parti.com/creator/discord/sazboxgaming/0',
'only_matching': True,
}]
def _real_extract(self, url):
service, creator_slug = self._match_valid_url(url).group('service', 'id')
encoded_creator_slug = creator_slug.replace('/', '%23')
creator_id = self._call_api(
f'get_user_by_social_media/{service}/{encoded_creator_slug}',
creator_slug, note='Fetching user ID')
data = self._call_api(
f'get_livestream_channel_info/{creator_id}', creator_id,
note='Fetching user profile feed')['channel_info']
if not traverse_obj(data, ('channel', 'is_live', {bool})):
raise UserNotLive(video_id=creator_id)
channel_info = data['channel']
return {
'id': creator_slug,
'formats': self._extract_m3u8_formats(
channel_info['playback_url'], creator_slug, live=True, query={
'token': channel_info['playback_auth_token'],
'player_version': '1.17.0',
}),
'is_live': True,
**traverse_obj(data, {
'title': ('livestream_event_info', 'event_name', {str}),
'description': ('livestream_event_info', 'event_description', {str}),
'thumbnail': ('livestream_event_info', 'livestream_preview_file', {url_or_none}),
'timestamp': ('stream', 'start_time', {parse_iso8601}),
'view_count': ('stream', 'viewer_count', {int_or_none}),
}),
}

View File

@@ -23,9 +23,9 @@ class PinterestBaseIE(InfoExtractor):
def _call_api(self, resource, video_id, options):
return self._download_json(
f'https://www.pinterest.com/resource/{resource}Resource/get/',
video_id, f'Download {resource} JSON metadata', query={
'data': json.dumps({'options': options}),
})['resource_response']
video_id, f'Download {resource} JSON metadata',
query={'data': json.dumps({'options': options})},
headers={'X-Pinterest-PWS-Handler': 'www/[username].js'})['resource_response']
def _extract_video(self, data, extract_formats=True):
video_id = data['id']

View File

@@ -22,7 +22,7 @@ from ..utils import (
)
class PolskieRadioBaseExtractor(InfoExtractor):
class PolskieRadioBaseIE(InfoExtractor):
def _extract_webpage_player_entries(self, webpage, playlist_id, base_data):
media_urls = set()
@@ -47,7 +47,7 @@ class PolskieRadioBaseExtractor(InfoExtractor):
yield entry
class PolskieRadioLegacyIE(PolskieRadioBaseExtractor):
class PolskieRadioLegacyIE(PolskieRadioBaseIE):
# legacy sites
IE_NAME = 'polskieradio:legacy'
_VALID_URL = r'https?://(?:www\.)?polskieradio(?:24)?\.pl/\d+/\d+/[Aa]rtykul/(?P<id>\d+)'
@@ -127,7 +127,7 @@ class PolskieRadioLegacyIE(PolskieRadioBaseExtractor):
return self.playlist_result(entries, playlist_id, title, description)
class PolskieRadioIE(PolskieRadioBaseExtractor):
class PolskieRadioIE(PolskieRadioBaseIE):
# new next.js sites
_VALID_URL = r'https?://(?:[^/]+\.)?(?:polskieradio(?:24)?|radiokierowcow)\.pl/artykul/(?P<id>\d+)'
_TESTS = [{
@@ -519,7 +519,7 @@ class PolskieRadioPlayerIE(InfoExtractor):
}
class PolskieRadioPodcastBaseExtractor(InfoExtractor):
class PolskieRadioPodcastBaseIE(InfoExtractor):
_API_BASE = 'https://apipodcasts.polskieradio.pl/api'
def _parse_episode(self, data):
@@ -539,7 +539,7 @@ class PolskieRadioPodcastBaseExtractor(InfoExtractor):
}
class PolskieRadioPodcastListIE(PolskieRadioPodcastBaseExtractor):
class PolskieRadioPodcastListIE(PolskieRadioPodcastBaseIE):
IE_NAME = 'polskieradio:podcast:list'
_VALID_URL = r'https?://podcasty\.polskieradio\.pl/podcast/(?P<id>\d+)'
_TESTS = [{
@@ -578,7 +578,7 @@ class PolskieRadioPodcastListIE(PolskieRadioPodcastBaseExtractor):
}
class PolskieRadioPodcastIE(PolskieRadioPodcastBaseExtractor):
class PolskieRadioPodcastIE(PolskieRadioPodcastBaseIE):
IE_NAME = 'polskieradio:podcast'
_VALID_URL = r'https?://podcasty\.polskieradio\.pl/track/(?P<id>[a-f\d]{8}(?:-[a-f\d]{4}){4}[a-f\d]{8})'
_TESTS = [{

View File

@@ -8,6 +8,7 @@ from ..utils import (
int_or_none,
parse_qs,
traverse_obj,
truncate_string,
try_get,
unescapeHTML,
update_url_query,
@@ -26,6 +27,7 @@ class RedditIE(InfoExtractor):
'ext': 'mp4',
'display_id': '6rrwyj',
'title': 'That small heart attack.',
'alt_title': 'That small heart attack.',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'thumbnails': 'count:4',
'timestamp': 1501941939,
@@ -49,7 +51,8 @@ class RedditIE(InfoExtractor):
'id': 'gyh95hiqc0b11',
'ext': 'mp4',
'display_id': '90bu6w',
'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',
'title': 'Heat index was 110 degrees so we offered him a cold drink. He went fo...',
'alt_title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'thumbnails': 'count:7',
'timestamp': 1532051078,
@@ -69,7 +72,8 @@ class RedditIE(InfoExtractor):
'id': 'zasobba6wp071',
'ext': 'mp4',
'display_id': 'nip71r',
'title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
'title': 'I plan to make more stickers and prints! Check them out on my Etsy! O...',
'alt_title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'thumbnails': 'count:5',
'timestamp': 1621709093,
@@ -91,7 +95,17 @@ class RedditIE(InfoExtractor):
'playlist_count': 2,
'info_dict': {
'id': 'wzqkxp',
'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
'title': '[Finale] Kamen Rider Revice Episode 50 "Family to the End, Until the ...',
'alt_title': '[Finale] Kamen Rider Revice Episode 50 "Family to the End, Until the Day We Meet Again" Discussion',
'description': 'md5:5b7deb328062b164b15704c5fd67c335',
'uploader': 'TheTwelveYearOld',
'channel_id': 'KamenRider',
'comment_count': int,
'like_count': int,
'dislike_count': int,
'age_limit': 0,
'timestamp': 1661676059.0,
'upload_date': '20220828',
},
}, {
# crossposted reddit-hosted media
@@ -102,6 +116,7 @@ class RedditIE(InfoExtractor):
'ext': 'mp4',
'display_id': 'zjjw82',
'title': 'Cringe',
'alt_title': 'Cringe',
'uploader': 'Otaku-senpai69420',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'upload_date': '20221212',
@@ -122,6 +137,7 @@ class RedditIE(InfoExtractor):
'ext': 'mp4',
'display_id': '124pp33',
'title': 'Harmless prank of some old friends',
'alt_title': 'Harmless prank of some old friends',
'uploader': 'Dudezila',
'channel_id': 'ContagiousLaughter',
'duration': 17,
@@ -142,6 +158,7 @@ class RedditIE(InfoExtractor):
'ext': 'mp4',
'display_id': '12fujy3',
'title': 'Based Hasan?',
'alt_title': 'Based Hasan?',
'uploader': 'KingNigelXLII',
'channel_id': 'GenZedong',
'duration': 16,
@@ -161,6 +178,7 @@ class RedditIE(InfoExtractor):
'ext': 'mp4',
'display_id': '1cl9h0u',
'title': 'The insurance claim will be interesting',
'alt_title': 'The insurance claim will be interesting',
'uploader': 'darrenpauli',
'channel_id': 'Unexpected',
'duration': 53,
@@ -183,6 +201,7 @@ class RedditIE(InfoExtractor):
'ext': 'mp4',
'display_id': '1cxwzso',
'title': 'Tottenham [1] - 0 Newcastle United - James Maddison 31\'',
'alt_title': 'Tottenham [1] - 0 Newcastle United - James Maddison 31\'',
'uploader': 'Woodstovia',
'channel_id': 'soccer',
'duration': 30,
@@ -206,6 +225,7 @@ class RedditIE(InfoExtractor):
'ext': 'mp4',
'display_id': 'degtjo',
'title': 'When the K hits',
'alt_title': 'When the K hits',
'uploader': '[deleted]',
'channel_id': 'ketamine',
'comment_count': int,
@@ -304,14 +324,6 @@ class RedditIE(InfoExtractor):
data = data[0]['data']['children'][0]['data']
video_url = data['url']
over_18 = data.get('over_18')
if over_18 is True:
age_limit = 18
elif over_18 is False:
age_limit = 0
else:
age_limit = None
thumbnails = []
def add_thumbnail(src):
@@ -337,15 +349,19 @@ class RedditIE(InfoExtractor):
add_thumbnail(resolution)
info = {
'title': data.get('title'),
'thumbnails': thumbnails,
'timestamp': float_or_none(data.get('created_utc')),
'uploader': data.get('author'),
'channel_id': data.get('subreddit'),
'like_count': int_or_none(data.get('ups')),
'dislike_count': int_or_none(data.get('downs')),
'comment_count': int_or_none(data.get('num_comments')),
'age_limit': age_limit,
'age_limit': {True: 18, False: 0}.get(data.get('over_18')),
**traverse_obj(data, {
'title': ('title', {truncate_string(left=72)}),
'alt_title': ('title', {str}),
'description': ('selftext', {str}, filter),
'timestamp': ('created_utc', {float_or_none}),
'uploader': ('author', {str}),
'channel_id': ('subreddit', {str}),
'like_count': ('ups', {int_or_none}),
'dislike_count': ('downs', {int_or_none}),
'comment_count': ('num_comments', {int_or_none}),
}),
}
parsed_url = urllib.parse.urlparse(video_url)
@@ -371,7 +387,7 @@ class RedditIE(InfoExtractor):
**info,
})
if entries:
return self.playlist_result(entries, video_id, info.get('title'))
return self.playlist_result(entries, video_id, **info)
raise ExtractorError('No media found', expected=True)
# Check if media is hosted on reddit:

View File

@@ -12,7 +12,7 @@ from ..utils import (
)
class RedGifsBaseInfoExtractor(InfoExtractor):
class RedGifsBaseIE(InfoExtractor):
_FORMATS = {
'gif': 250,
'sd': 480,
@@ -113,7 +113,7 @@ class RedGifsBaseInfoExtractor(InfoExtractor):
return page_fetcher(page) if page else OnDemandPagedList(page_fetcher, self._PAGE_SIZE)
class RedGifsIE(RedGifsBaseInfoExtractor):
class RedGifsIE(RedGifsBaseIE):
_VALID_URL = r'https?://(?:(?:www\.)?redgifs\.com/(?:watch|ifr)/|thumbs2\.redgifs\.com/)(?P<id>[^-/?#\.]+)'
_TESTS = [{
'url': 'https://www.redgifs.com/watch/squeakyhelplesswisent',
@@ -172,7 +172,7 @@ class RedGifsIE(RedGifsBaseInfoExtractor):
return self._parse_gif_data(video_info['gif'])
class RedGifsSearchIE(RedGifsBaseInfoExtractor):
class RedGifsSearchIE(RedGifsBaseIE):
IE_DESC = 'Redgifs search'
_VALID_URL = r'https?://(?:www\.)?redgifs\.com/browse\?(?P<query>[^#]+)'
_PAGE_SIZE = 80
@@ -226,7 +226,7 @@ class RedGifsSearchIE(RedGifsBaseInfoExtractor):
entries, query_str, tags, f'RedGifs search for {tags}, ordered by {order}')
class RedGifsUserIE(RedGifsBaseInfoExtractor):
class RedGifsUserIE(RedGifsBaseIE):
IE_DESC = 'Redgifs user'
_VALID_URL = r'https?://(?:www\.)?redgifs\.com/users/(?P<username>[^/?#]+)(?:\?(?P<query>[^#]+))?'
_PAGE_SIZE = 80

43
yt_dlp/extractor/roya.py Normal file
View File

@@ -0,0 +1,43 @@
from .common import InfoExtractor
from ..utils.traversal import traverse_obj
class RoyaLiveIE(InfoExtractor):
_VALID_URL = r'https?://roya\.tv/live-stream/(?P<id>\d+)'
_TESTS = [{
'url': 'https://roya.tv/live-stream/1',
'info_dict': {
'id': '1',
'title': r're:Roya TV \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
'ext': 'mp4',
'live_status': 'is_live',
},
}, {
'url': 'https://roya.tv/live-stream/21',
'info_dict': {
'id': '21',
'title': r're:Roya News \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
'ext': 'mp4',
'live_status': 'is_live',
},
}, {
'url': 'https://roya.tv/live-stream/10000',
'only_matching': True,
}]
def _real_extract(self, url):
media_id = self._match_id(url)
stream_url = self._download_json(
f'https://ticket.roya-tv.com/api/v5/fastchannel/{media_id}', media_id)['data']['secured_url']
title = traverse_obj(
self._download_json('https://backend.roya.tv/api/v01/channels/schedule-pagination', media_id, fatal=False),
('data', 0, 'channel', lambda _, v: str(v['id']) == media_id, 'title', {str}, any))
return {
'id': media_id,
'formats': self._extract_m3u8_formats(stream_url, media_id, 'mp4', m3u8_id='hls', live=True),
'title': title,
'is_live': True,
}

View File

@@ -3,12 +3,20 @@ import json
import re
import urllib.parse
from .common import InfoExtractor
from ..utils import js_to_json
from .common import InfoExtractor, Request
from ..utils import (
determine_ext,
int_or_none,
js_to_json,
parse_duration,
parse_iso8601,
url_or_none,
)
from ..utils.traversal import traverse_obj
class RTPIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rtp\.pt/play/(?:(?:estudoemcasa|palco|zigzag)/)?p(?P<program_id>[0-9]+)/(?P<id>[^/?#]+)'
_VALID_URL = r'https?://(?:www\.)?rtp\.pt/play/(?:[^/#?]+/)?p(?P<program_id>\d+)/(?P<id>e\d+)'
_TESTS = [{
'url': 'http://www.rtp.pt/play/p405/e174042/paixoes-cruzadas',
'md5': 'e736ce0c665e459ddb818546220b4ef8',
@@ -16,99 +24,173 @@ class RTPIE(InfoExtractor):
'id': 'e174042',
'ext': 'mp3',
'title': 'Paixões Cruzadas',
'description': 'As paixões musicais de António Cartaxo e António Macedo',
'description': 'md5:af979e58ba0ab73f78435fc943fdb070',
'thumbnail': r're:^https?://.*\.jpg',
'series': 'Paixões Cruzadas',
'duration': 2950.0,
'modified_timestamp': 1553693464,
'modified_date': '20190327',
'timestamp': 1417219200,
'upload_date': '20141129',
},
}, {
'url': 'https://www.rtp.pt/play/zigzag/p13166/e757904/25-curiosidades-25-de-abril',
'md5': '9a81ed53f2b2197cfa7ed455b12f8ade',
'md5': '5b4859940e3adef61247a77dfb76046a',
'info_dict': {
'id': 'e757904',
'ext': 'mp4',
'title': '25 Curiosidades, 25 de Abril',
'description': 'Estudar ou não estudar - Em cada um dos episódios descobrimos uma curiosidade acerca de como era viver em Portugal antes da revolução do 25 de abr',
'title': 'Estudar ou não estudar',
'description': 'md5:3bfd7eb8bebfd5711a08df69c9c14c35',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1711958401,
'duration': 146.0,
'upload_date': '20240401',
'modified_timestamp': 1712242991,
'series': '25 Curiosidades, 25 de Abril',
'episode_number': 2,
'episode': 'Estudar ou não estudar',
'modified_date': '20240404',
},
}, {
'url': 'http://www.rtp.pt/play/p831/a-quimica-das-coisas',
'only_matching': True,
}, {
'url': 'https://www.rtp.pt/play/estudoemcasa/p7776/portugues-1-ano',
'only_matching': True,
}, {
'url': 'https://www.rtp.pt/play/palco/p13785/l7nnon',
'only_matching': True,
# Episode not accessible through API
'url': 'https://www.rtp.pt/play/estudoemcasa/p7776/e500050/portugues-1-ano',
'md5': '57660c0b46db9f22118c52cbd65975e4',
'info_dict': {
'id': 'e500050',
'ext': 'mp4',
'title': 'Português - 1.º ano',
'duration': 1669.0,
'description': 'md5:be68925c81269f8c6886589f25fe83ea',
'upload_date': '20201020',
'timestamp': 1603180799,
'thumbnail': 'https://cdn-images.rtp.pt/EPG/imagens/39482_59449_64850.png?v=3&w=860',
},
}]
_USER_AGENT = 'rtpplay/2.0.66 (pt.rtp.rtpplay; build:2066; iOS 15.8.3) Alamofire/5.9.1'
_AUTH_TOKEN = None
def _fetch_auth_token(self):
if self._AUTH_TOKEN:
return self._AUTH_TOKEN
self._AUTH_TOKEN = traverse_obj(self._download_json(Request(
'https://rtpplayapi.rtp.pt/play/api/2/token-manager',
headers={
'Accept': '*/*',
'rtp-play-auth': 'RTPPLAY_MOBILE_IOS',
'rtp-play-auth-hash': 'fac9c328b2f27e26e03d7f8942d66c05b3e59371e16c2a079f5c83cc801bd3ee',
'rtp-play-auth-timestamp': '2145973229682',
'User-Agent': self._USER_AGENT,
}, extensions={'keep_header_casing': True}), None,
note='Fetching guest auth token', errnote='Could not fetch guest auth token',
fatal=False), ('token', 'token', {str}))
return self._AUTH_TOKEN
@staticmethod
def _cleanup_media_url(url):
if urllib.parse.urlparse(url).netloc == 'streaming-ondemand.rtp.pt':
return None
return url.replace('/drm-fps/', '/hls/').replace('/drm-dash/', '/dash/')
def _extract_formats(self, media_urls, episode_id):
formats = []
subtitles = {}
for media_url in set(traverse_obj(media_urls, (..., {url_or_none}, {self._cleanup_media_url}))):
ext = determine_ext(media_url)
if ext == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
media_url, episode_id, m3u8_id='hls', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
elif ext == 'mpd':
fmts, subs = self._extract_mpd_formats_and_subtitles(
media_url, episode_id, mpd_id='dash', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
formats.append({
'url': media_url,
'format_id': 'http',
})
return formats, subtitles
def _extract_from_api(self, program_id, episode_id):
auth_token = self._fetch_auth_token()
if not auth_token:
return
episode_data = traverse_obj(self._download_json(
f'https://www.rtp.pt/play/api/1/get-episode/{program_id}/{episode_id[1:]}', episode_id,
query={'include_assets': 'true', 'include_webparams': 'true'},
headers={
'Accept': '*/*',
'Authorization': f'Bearer {auth_token}',
'User-Agent': self._USER_AGENT,
}, fatal=False), 'result', {dict})
if not episode_data:
return
asset_urls = traverse_obj(episode_data, ('assets', 0, 'asset_url', {dict}))
media_urls = traverse_obj(asset_urls, (
((('hls', 'dash'), 'stream_url'), ('multibitrate', ('url_hls', 'url_dash'))),))
formats, subtitles = self._extract_formats(media_urls, episode_id)
for sub_data in traverse_obj(asset_urls, ('subtitles', 'vtt_list', lambda _, v: url_or_none(v['file']))):
subtitles.setdefault(sub_data.get('code') or 'pt', []).append({
'url': sub_data['file'],
'name': sub_data.get('language'),
})
return {
'id': episode_id,
'formats': formats,
'subtitles': subtitles,
'thumbnail': traverse_obj(episode_data, ('assets', 0, 'asset_thumbnail', {url_or_none})),
**traverse_obj(episode_data, ('episode', {
'title': (('episode_title', 'program_title'), {str}, filter, any),
'alt_title': ('episode_subtitle', {str}, filter),
'description': (('episode_description', 'episode_summary'), {str}, filter, any),
'timestamp': ('episode_air_date', {parse_iso8601(delimiter=' ')}),
'modified_timestamp': ('episode_lastchanged', {parse_iso8601(delimiter=' ')}),
'duration': ('episode_duration_complete', {parse_duration}),
'episode': ('episode_title', {str}, filter),
'episode_number': ('episode_number', {int_or_none}),
'season': ('program_season', {str}, filter),
'series': ('program_title', {str}, filter),
})),
}
_RX_OBFUSCATION = re.compile(r'''(?xs)
atob\s*\(\s*decodeURIComponent\s*\(\s*
(\[[0-9A-Za-z%,'"]*\])
\s*\.\s*join\(\s*(?:""|'')\s*\)\s*\)\s*\)
''')
def __unobfuscate(self, data, *, video_id):
if data.startswith('{'):
data = self._RX_OBFUSCATION.sub(
lambda m: json.dumps(
base64.b64decode(urllib.parse.unquote(
''.join(self._parse_json(m.group(1), video_id)),
)).decode('iso-8859-1')),
data)
return js_to_json(data)
def __unobfuscate(self, data):
return self._RX_OBFUSCATION.sub(
lambda m: json.dumps(
base64.b64decode(urllib.parse.unquote(
''.join(json.loads(m.group(1))),
)).decode('iso-8859-1')),
data)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta(
'twitter:title', webpage, display_name='title', fatal=True)
f, config = self._search_regex(
r'''(?sx)
(?:var\s+f\s*=\s*(?P<f>".*?"|{[^;]+?});\s*)?
var\s+player1\s+=\s+new\s+RTPPlayer\s*\((?P<config>{(?:(?!\*/).)+?})\);(?!\s*\*/)
''', webpage,
'player config', group=('f', 'config'))
config = self._parse_json(
config, video_id,
lambda data: self.__unobfuscate(data, video_id=video_id))
f = config['file'] if not f else self._parse_json(
f, video_id,
lambda data: self.__unobfuscate(data, video_id=video_id))
def _extract_from_html(self, url, episode_id):
webpage = self._download_webpage(url, episode_id)
formats = []
if isinstance(f, dict):
f_hls = f.get('hls')
if f_hls is not None:
formats.extend(self._extract_m3u8_formats(
f_hls, video_id, 'mp4', 'm3u8_native', m3u8_id='hls'))
f_dash = f.get('dash')
if f_dash is not None:
formats.extend(self._extract_mpd_formats(f_dash, video_id, mpd_id='dash'))
else:
formats.append({
'format_id': 'f',
'url': f,
'vcodec': 'none' if config.get('mediaType') == 'audio' else None,
})
subtitles = {}
vtt = config.get('vtt')
if vtt is not None:
for lcode, lname, url in vtt:
subtitles.setdefault(lcode, []).append({
'name': lname,
'url': url,
})
media_urls = traverse_obj(re.findall(r'(?:var\s+f\s*=|RTPPlayer\({[^}]+file:)\s*({[^}]+}|"[^"]+")', webpage), (
-1, (({self.__unobfuscate}, {js_to_json}, {json.loads}, {dict.values}, ...), {json.loads})))
formats, subtitles = self._extract_formats(media_urls, episode_id)
return {
'id': video_id,
'title': title,
'id': episode_id,
'formats': formats,
'description': self._html_search_meta(['description', 'twitter:description'], webpage),
'thumbnail': config.get('poster') or self._og_search_thumbnail(webpage),
'subtitles': subtitles,
'description': self._html_search_meta(['og:description', 'twitter:description'], webpage, default=None),
'thumbnail': self._html_search_meta(['og:image', 'twitter:image'], webpage, default=None),
**self._search_json_ld(webpage, episode_id, default={}),
'title': self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None),
}
def _real_extract(self, url):
program_id, episode_id = self._match_valid_url(url).group('program_id', 'id')
return self._extract_from_api(program_id, episode_id) or self._extract_from_html(url, episode_id)

View File

@@ -9,7 +9,9 @@ from ..utils import (
class RTVSIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rtvs\.sk/(?:radio|televizia)/archiv(?:/\d+)?/(?P<id>\d+)/?(?:[#?]|$)'
IE_NAME = 'stvr'
IE_DESC = 'Slovak Television and Radio (formerly RTVS)'
_VALID_URL = r'https?://(?:www\.)?(?:rtvs|stvr)\.sk/(?:radio|televizia)/archiv(?:/\d+)?/(?P<id>\d+)/?(?:[#?]|$)'
_TESTS = [{
# radio archive
'url': 'http://www.rtvs.sk/radio/archiv/11224/414872',
@@ -19,7 +21,7 @@ class RTVSIE(InfoExtractor):
'ext': 'mp3',
'title': 'Ostrov pokladov 1 časť.mp3',
'duration': 2854,
'thumbnail': 'https://www.rtvs.sk/media/a501/image/file/2/0000/b1R8.rtvs.jpg',
'thumbnail': 'https://www.stvr.sk/media/a501/image/file/2/0000/rtvs-00009383.png',
'display_id': '135331',
},
}, {
@@ -30,7 +32,7 @@ class RTVSIE(InfoExtractor):
'ext': 'mp4',
'title': 'Amaro Džives - Náš deň',
'description': 'Galavečer pri príležitosti Medzinárodného dňa Rómov.',
'thumbnail': 'https://www.rtvs.sk/media/a501/image/file/2/0031/L7Qm.amaro_dzives_png.jpg',
'thumbnail': 'https://www.stvr.sk/media/a501/image/file/2/0031/L7Qm.amaro_dzives_png.jpg',
'timestamp': 1428555900,
'upload_date': '20150409',
'duration': 4986,
@@ -47,8 +49,11 @@ class RTVSIE(InfoExtractor):
'display_id': '307655',
'duration': 831,
'upload_date': '20211111',
'thumbnail': 'https://www.rtvs.sk/media/a501/image/file/2/0916/robin.jpg',
'thumbnail': 'https://www.stvr.sk/media/a501/image/file/2/0916/robin.jpg',
},
}, {
'url': 'https://www.stvr.sk/radio/archiv/11224/414872',
'only_matching': True,
}]
def _real_extract(self, url):

View File

@@ -7,7 +7,6 @@ from ..utils import (
ExtractorError,
UnsupportedError,
clean_html,
determine_ext,
extract_attributes,
format_field,
get_element_by_class,
@@ -36,7 +35,7 @@ class RumbleEmbedIE(InfoExtractor):
'upload_date': '20191020',
'channel_url': 'https://rumble.com/c/WMAR',
'channel': 'WMAR',
'thumbnail': 'https://sp.rmbl.ws/s8/1/5/M/z/1/5Mz1a.qR4e-small-WMAR-2-News-Latest-Headline.jpg',
'thumbnail': r're:https://.+\.jpg',
'duration': 234,
'uploader': 'WMAR',
'live_status': 'not_live',
@@ -52,7 +51,7 @@ class RumbleEmbedIE(InfoExtractor):
'upload_date': '20220217',
'channel_url': 'https://rumble.com/c/CyberTechNews',
'channel': 'CTNews',
'thumbnail': 'https://sp.rmbl.ws/s8/6/7/i/9/h/7i9hd.OvCc.jpg',
'thumbnail': r're:https://.+\.jpg',
'duration': 901,
'uploader': 'CTNews',
'live_status': 'not_live',
@@ -114,6 +113,22 @@ class RumbleEmbedIE(InfoExtractor):
'live_status': 'was_live',
},
'params': {'skip_download': True},
}, {
'url': 'https://rumble.com/embed/v6pezdb',
'info_dict': {
'id': 'v6pezdb',
'ext': 'mp4',
'title': '"Es war einmal ein Mädchen" Ein filmisches Zeitzeugnis aus Leningrad 1944',
'uploader': 'RT DE',
'channel': 'RT DE',
'channel_url': 'https://rumble.com/c/RTDE',
'duration': 309,
'thumbnail': 'https://1a-1791.com/video/fww1/dc/s8/1/n/z/2/y/nz2yy.qR4e-small-Es-war-einmal-ein-Mdchen-Ei.jpg',
'timestamp': 1743703500,
'upload_date': '20250403',
'live_status': 'not_live',
},
'params': {'skip_download': True},
}, {
'url': 'https://rumble.com/embed/ufe9n.v5pv5f',
'only_matching': True,
@@ -168,40 +183,42 @@ class RumbleEmbedIE(InfoExtractor):
live_status = None
formats = []
for ext, ext_info in (video.get('ua') or {}).items():
if isinstance(ext_info, dict):
for height, video_info in ext_info.items():
for format_type, format_info in (video.get('ua') or {}).items():
if isinstance(format_info, dict):
for height, video_info in format_info.items():
if not traverse_obj(video_info, ('meta', 'h', {int_or_none})):
video_info.setdefault('meta', {})['h'] = height
ext_info = ext_info.values()
format_info = format_info.values()
for video_info in ext_info:
for video_info in format_info:
meta = video_info.get('meta') or {}
if not video_info.get('url'):
continue
if ext == 'hls':
# With default query params returns m3u8 variants which are duplicates, without returns tar files
if format_type == 'tar':
continue
if format_type == 'hls':
if meta.get('live') is True and video.get('live') == 1:
live_status = 'post_live'
formats.extend(self._extract_m3u8_formats(
video_info['url'], video_id,
ext='mp4', m3u8_id='hls', fatal=False, live=live_status == 'is_live'))
continue
timeline = ext == 'timeline'
if timeline:
ext = determine_ext(video_info['url'])
is_timeline = format_type == 'timeline'
is_audio = format_type == 'audio'
formats.append({
'ext': ext,
'acodec': 'none' if timeline else None,
'acodec': 'none' if is_timeline else None,
'vcodec': 'none' if is_audio else None,
'url': video_info['url'],
'format_id': join_nonempty(ext, format_field(meta, 'h', '%sp')),
'format_note': 'Timeline' if timeline else None,
'fps': None if timeline else video.get('fps'),
'format_id': join_nonempty(format_type, format_field(meta, 'h', '%sp')),
'format_note': 'Timeline' if is_timeline else None,
'fps': None if is_timeline or is_audio else video.get('fps'),
**traverse_obj(meta, {
'tbr': 'bitrate',
'filesize': 'size',
'width': 'w',
'height': 'h',
}, expected_type=lambda x: int(x) or None),
'tbr': ('bitrate', {int_or_none}),
'filesize': ('size', {int_or_none}),
'width': ('w', {int_or_none}),
'height': ('h', {int_or_none}),
}),
})
subtitles = {

View File

@@ -122,6 +122,15 @@ class SBSIE(InfoExtractor):
if traverse_obj(media, ('partOfSeries', {dict})):
media['epName'] = traverse_obj(media, ('title', {str}))
# Need to set different language for forced subs or else they have priority over full subs
fixed_subtitles = {}
for lang, subs in subtitles.items():
for sub in subs:
fixed_lang = lang
if sub['url'].lower().endswith('_fe.vtt'):
fixed_lang += '-forced'
fixed_subtitles.setdefault(fixed_lang, []).append(sub)
return {
'id': video_id,
**traverse_obj(media, {
@@ -151,6 +160,6 @@ class SBSIE(InfoExtractor):
}),
}),
'formats': formats,
'subtitles': subtitles,
'subtitles': fixed_subtitles,
'uploader': 'SBSC',
}

View File

@@ -13,7 +13,7 @@ from ..utils.traversal import traverse_obj
class SenateISVPIE(InfoExtractor):
_IE_NAME = 'senate.gov:isvp'
IE_NAME = 'senate.gov:isvp'
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
_EMBED_REGEX = [r"<iframe[^>]+src=['\"](?P<url>https?://www\.senate\.gov/isvp/?\?[^'\"]+)['\"]"]
@@ -137,7 +137,7 @@ class SenateISVPIE(InfoExtractor):
class SenateGovIE(InfoExtractor):
_IE_NAME = 'senate.gov'
IE_NAME = 'senate.gov'
_SUBDOMAIN_RE = '|'.join(map(re.escape, (
'agriculture', 'aging', 'appropriations', 'armed-services', 'banking',
'budget', 'commerce', 'energy', 'epw', 'finance', 'foreign', 'help',

View File

@@ -2,16 +2,18 @@ import urllib.parse
from .common import InfoExtractor
from ..utils import (
clean_html,
dict_get,
int_or_none,
parse_duration,
unified_timestamp,
url_or_none,
urljoin,
)
from ..utils.traversal import traverse_obj
class SkyItPlayerIE(InfoExtractor):
IE_NAME = 'player.sky.it'
_VALID_URL = r'https?://player\.sky\.it/player/(?:external|social)\.html\?.*?\bid=(?P<id>\d+)'
class SkyItBaseIE(InfoExtractor):
_GEO_BYPASS = False
_DOMAIN = 'sky'
_PLAYER_TMPL = 'https://player.sky.it/player/external.html?id=%s&domain=%s'
@@ -33,7 +35,6 @@ class SkyItPlayerIE(InfoExtractor):
SkyItPlayerIE.ie_key(), video_id)
def _parse_video(self, video, video_id):
title = video['title']
is_live = video.get('type') == 'live'
hls_url = video.get(('streaming' if is_live else 'hls') + '_url')
if not hls_url and video.get('geoblock' if is_live else 'geob'):
@@ -43,7 +44,7 @@ class SkyItPlayerIE(InfoExtractor):
return {
'id': video_id,
'title': title,
'title': video.get('title'),
'formats': formats,
'thumbnail': dict_get(video, ('video_still', 'video_still_medium', 'thumb')),
'description': video.get('short_desc') or None,
@@ -52,6 +53,11 @@ class SkyItPlayerIE(InfoExtractor):
'is_live': is_live,
}
class SkyItPlayerIE(SkyItBaseIE):
IE_NAME = 'player.sky.it'
_VALID_URL = r'https?://player\.sky\.it/player/(?:external|social)\.html\?.*?\bid=(?P<id>\d+)'
def _real_extract(self, url):
video_id = self._match_id(url)
domain = urllib.parse.parse_qs(urllib.parse.urlparse(
@@ -67,7 +73,7 @@ class SkyItPlayerIE(InfoExtractor):
return self._parse_video(video, video_id)
class SkyItVideoIE(SkyItPlayerIE): # XXX: Do not subclass from concrete IE
class SkyItVideoIE(SkyItBaseIE):
IE_NAME = 'video.sky.it'
_VALID_URL = r'https?://(?:masterchef|video|xfactor)\.sky\.it(?:/[^/]+)*/video/[0-9a-z-]+-(?P<id>\d+)'
_TESTS = [{
@@ -96,7 +102,7 @@ class SkyItVideoIE(SkyItPlayerIE): # XXX: Do not subclass from concrete IE
return self._player_url_result(video_id)
class SkyItVideoLiveIE(SkyItPlayerIE): # XXX: Do not subclass from concrete IE
class SkyItVideoLiveIE(SkyItBaseIE):
IE_NAME = 'video.sky.it:live'
_VALID_URL = r'https?://video\.sky\.it/diretta/(?P<id>[^/?&#]+)'
_TEST = {
@@ -124,7 +130,7 @@ class SkyItVideoLiveIE(SkyItPlayerIE): # XXX: Do not subclass from concrete IE
return self._parse_video(livestream, asset_id)
class SkyItIE(SkyItPlayerIE): # XXX: Do not subclass from concrete IE
class SkyItIE(SkyItBaseIE):
IE_NAME = 'sky.it'
_VALID_URL = r'https?://(?:sport|tg24)\.sky\.it(?:/[^/]+)*/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)'
_TESTS = [{
@@ -223,3 +229,80 @@ class TV8ItIE(SkyItVideoIE): # XXX: Do not subclass from concrete IE
'params': {'skip_download': 'm3u8'},
}]
_DOMAIN = 'mtv8'
class TV8ItLiveIE(SkyItBaseIE):
IE_NAME = 'tv8.it:live'
IE_DESC = 'TV8 Live'
_VALID_URL = r'https?://(?:www\.)?tv8\.it/streaming'
_TESTS = [{
'url': 'https://tv8.it/streaming',
'info_dict': {
'id': 'tv8',
'ext': 'mp4',
'title': str,
'description': str,
'is_live': True,
'live_status': 'is_live',
},
}]
def _real_extract(self, url):
video_id = 'tv8'
livestream = self._download_json(
'https://apid.sky.it/vdp/v1/getLivestream', video_id,
'Downloading manifest JSON', query={'id': '7'})
metadata = self._download_json('https://tv8.it/api/getStreaming', video_id, fatal=False)
return {
**self._parse_video(livestream, video_id),
**traverse_obj(metadata, ('info', {
'title': ('title', 'text', {str}),
'description': ('description', 'html', {clean_html}),
})),
}
class TV8ItPlaylistIE(InfoExtractor):
IE_NAME = 'tv8.it:playlist'
IE_DESC = 'TV8 Playlist'
_VALID_URL = r'https?://(?:www\.)?tv8\.it/(?!video)[^/#?]+/(?P<id>[^/#?]+)'
_TESTS = [{
'url': 'https://tv8.it/intrattenimento/tv8-gialappas-night',
'playlist_mincount': 32,
'info_dict': {
'id': 'tv8-gialappas-night',
'title': 'Tv8 Gialappa\'s Night',
'description': 'md5:c876039d487d9cf40229b768872718ed',
'thumbnail': r're:https://static\.sky\.it/.+\.(png|jpe?g|webp)',
},
}, {
'url': 'https://tv8.it/sport/uefa-europa-league',
'playlist_mincount': 11,
'info_dict': {
'id': 'uefa-europa-league',
'title': 'UEFA Europa League',
'description': 'md5:9ab1832b7a8b1705b1f590e13a36bc6a',
'thumbnail': r're:https://static\.sky\.it/.+\.(png|jpe?g|webp)',
},
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
data = self._search_nextjs_data(webpage, playlist_id)['props']['pageProps']['data']
entries = [self.url_result(
urljoin('https://tv8.it', card['href']), ie=TV8ItIE,
**traverse_obj(card, {
'description': ('extraData', 'videoDesc', {str}),
'id': ('extraData', 'asset_id', {str}),
'thumbnail': ('image', 'src', {url_or_none}),
'title': ('title', 'typography', 'text', {str}),
}))
for card in traverse_obj(data, ('lastContent', 'cards', lambda _, v: v['href']))]
return self.playlist_result(entries, playlist_id, **traverse_obj(data, ('card', 'desktop', {
'description': ('description', 'html', {clean_html}),
'thumbnail': ('image', 'src', {url_or_none}),
'title': ('title', 'text', {str}),
})))

236
yt_dlp/extractor/streaks.py Normal file
View File

@@ -0,0 +1,236 @@
import json
import urllib.parse
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
filter_dict,
float_or_none,
join_nonempty,
mimetype2ext,
parse_iso8601,
unsmuggle_url,
update_url_query,
url_or_none,
)
from ..utils.traversal import traverse_obj
class StreaksBaseIE(InfoExtractor):
_API_URL_TEMPLATE = 'https://{}.api.streaks.jp/v1/projects/{}/medias/{}{}'
_GEO_BYPASS = False
_GEO_COUNTRIES = ['JP']
def _extract_from_streaks_api(self, project_id, media_id, headers=None, query=None, ssai=False):
try:
response = self._download_json(
self._API_URL_TEMPLATE.format('playback', project_id, media_id, ''),
media_id, 'Downloading STREAKS playback API JSON', headers={
'Accept': 'application/json',
'Origin': 'https://players.streaks.jp',
**self.geo_verification_headers(),
**(headers or {}),
})
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status in {403, 404}:
error = self._parse_json(e.cause.response.read().decode(), media_id, fatal=False)
message = traverse_obj(error, ('message', {str}))
code = traverse_obj(error, ('code', {str}))
if code == 'REQUEST_FAILED':
self.raise_geo_restricted(message, countries=self._GEO_COUNTRIES)
elif code == 'MEDIA_NOT_FOUND':
raise ExtractorError(message, expected=True)
elif code or message:
raise ExtractorError(join_nonempty(code, message, delim=': '))
raise
streaks_id = response['id']
live_status = {
'clip': 'was_live',
'file': 'not_live',
'linear': 'is_live',
'live': 'is_live',
}.get(response.get('type'))
formats, subtitles = [], {}
drm_formats = False
for source in traverse_obj(response, ('sources', lambda _, v: v['src'])):
if source.get('key_systems'):
drm_formats = True
continue
src_url = source['src']
is_live = live_status == 'is_live'
ext = mimetype2ext(source.get('type'))
if ext != 'm3u8':
self.report_warning(f'Unsupported stream type: {ext}')
continue
if is_live and ssai:
session_params = traverse_obj(self._download_json(
self._API_URL_TEMPLATE.format('ssai', project_id, streaks_id, '/ssai/session'),
media_id, 'Downloading session parameters',
headers={'Content-Type': 'application/json', 'Accept': 'application/json'},
data=json.dumps({'id': source['id']}).encode(),
), (0, 'query', {urllib.parse.parse_qs}))
src_url = update_url_query(src_url, session_params)
fmts, subs = self._extract_m3u8_formats_and_subtitles(
src_url, media_id, 'mp4', m3u8_id='hls', fatal=False, live=is_live, query=query)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
if not formats and drm_formats:
self.report_drm(media_id)
self._remove_duplicate_formats(formats)
for subs in traverse_obj(response, (
'tracks', lambda _, v: v['kind'] in ('captions', 'subtitles') and url_or_none(v['src']),
)):
lang = traverse_obj(subs, ('srclang', {str.lower})) or 'ja'
subtitles.setdefault(lang, []).append({'url': subs['src']})
return {
'id': streaks_id,
'display_id': media_id,
'formats': formats,
'live_status': live_status,
'subtitles': subtitles,
'uploader_id': project_id,
**traverse_obj(response, {
'title': ('name', {str}),
'description': ('description', {str}, filter),
'duration': ('duration', {float_or_none}),
'modified_timestamp': ('updated_at', {parse_iso8601}),
'tags': ('tags', ..., {str}),
'thumbnails': (('poster', 'thumbnail'), 'src', {'url': {url_or_none}}),
'timestamp': ('created_at', {parse_iso8601}),
}),
}
class StreaksIE(StreaksBaseIE):
_VALID_URL = [
r'https?://players\.streaks\.jp/(?P<project_id>[\w-]+)/[\da-f]+/index\.html\?(?:[^#]+&)?m=(?P<id>(?:ref:)?[\w-]+)',
r'https?://playback\.api\.streaks\.jp/v1/projects/(?P<project_id>[\w-]+)/medias/(?P<id>(?:ref:)?[\w-]+)',
]
_EMBED_REGEX = [rf'<iframe\s+[^>]*\bsrc\s*=\s*["\'](?P<url>{_VALID_URL[0]})']
_TESTS = [{
'url': 'https://players.streaks.jp/tipness/08155cd19dc14c12bebefb69b92eafcc/index.html?m=dbdf2df35b4d483ebaeeaeb38c594647',
'info_dict': {
'id': 'dbdf2df35b4d483ebaeeaeb38c594647',
'ext': 'mp4',
'title': '3shunenCM_edit.mp4',
'display_id': 'dbdf2df35b4d483ebaeeaeb38c594647',
'duration': 47.533,
'live_status': 'not_live',
'modified_date': '20230726',
'modified_timestamp': 1690356180,
'timestamp': 1690355996,
'upload_date': '20230726',
'uploader_id': 'tipness',
},
}, {
'url': 'https://players.streaks.jp/ktv-web/0298e8964c164ab384c07ef6e08c444b/index.html?m=ref:mycoffeetime_250317',
'info_dict': {
'id': 'dccdc079e3fd41f88b0c8435e2d453ab',
'ext': 'mp4',
'title': 'わたしの珈琲時間_250317',
'display_id': 'ref:mycoffeetime_250317',
'duration': 122.99,
'live_status': 'not_live',
'modified_date': '20250310',
'modified_timestamp': 1741586302,
'thumbnail': r're:https?://.+\.jpg',
'timestamp': 1741585839,
'upload_date': '20250310',
'uploader_id': 'ktv-web',
},
}, {
'url': 'https://playback.api.streaks.jp/v1/projects/ktv-web/medias/b5411938e1e5435dac71edf829dd4813',
'info_dict': {
'id': 'b5411938e1e5435dac71edf829dd4813',
'ext': 'mp4',
'title': 'KANTELE_SYUSEi_0630',
'display_id': 'b5411938e1e5435dac71edf829dd4813',
'live_status': 'not_live',
'modified_date': '20250122',
'modified_timestamp': 1737522999,
'thumbnail': r're:https?://.+\.jpg',
'timestamp': 1735205137,
'upload_date': '20241226',
'uploader_id': 'ktv-web',
},
}, {
# TVer Olympics: website already down, but api remains accessible
'url': 'https://playback.api.streaks.jp/v1/projects/tver-olympic/medias/ref:sp_240806_1748_dvr',
'info_dict': {
'id': 'c10f7345adb648cf804d7578ab93b2e3',
'ext': 'mp4',
'title': 'サッカー 男子 準決勝_dvr',
'display_id': 'ref:sp_240806_1748_dvr',
'duration': 12960.0,
'live_status': 'was_live',
'modified_date': '20240805',
'modified_timestamp': 1722896263,
'timestamp': 1722777618,
'upload_date': '20240804',
'uploader_id': 'tver-olympic',
},
}, {
# TBS FREE: 24-hour stream
'url': 'https://playback.api.streaks.jp/v1/projects/tbs/medias/ref:simul-02',
'info_dict': {
'id': 'c4e83a7b48f4409a96adacec674b4e22',
'ext': 'mp4',
'title': str,
'display_id': 'ref:simul-02',
'live_status': 'is_live',
'modified_date': '20241031',
'modified_timestamp': 1730339858,
'timestamp': 1705466840,
'upload_date': '20240117',
'uploader_id': 'tbs',
},
}, {
# DRM protected
'url': 'https://players.streaks.jp/sp-jbc/a12d7ee0f40c49d6a0a2bff520639677/index.html?m=5f89c62f37ee4a68be8e6e3b1396c7d8',
'only_matching': True,
}]
_WEBPAGE_TESTS = [{
'url': 'https://event.play.jp/playnext2023/',
'info_dict': {
'id': '2d975178293140dc8074a7fc536a7604',
'ext': 'mp4',
'title': 'PLAY NEXTキームービー本番',
'uploader_id': 'play',
'duration': 17.05,
'thumbnail': r're:https?://.+\.jpg',
'timestamp': 1668387517,
'upload_date': '20221114',
'modified_timestamp': 1739411523,
'modified_date': '20250213',
'live_status': 'not_live',
},
}, {
'url': 'https://wowshop.jp/Page/special/cooking_goods/?bid=wowshop&srsltid=AfmBOor_phUNoPEE_UCPiGGSCMrJE5T2US397smvsbrSdLqUxwON0el4',
'playlist_mincount': 2,
'info_dict': {
'id': '?bid=wowshop&srsltid=AfmBOor_phUNoPEE_UCPiGGSCMrJE5T2US397smvsbrSdLqUxwON0el4',
'title': 'ワンランク上の料理道具でとびきりの“おいしい”を食卓へwowshop',
'description': 'md5:914b5cb8624fc69274c7fb7b2342958f',
'age_limit': 0,
'thumbnail': 'https://wowshop.jp/Page/special/cooking_goods/images/ogp.jpg',
},
}]
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
project_id, media_id = self._match_valid_url(url).group('project_id', 'id')
return self._extract_from_streaks_api(
project_id, media_id, headers=filter_dict({
'X-Streaks-Api-Key': smuggled_data.get('api_key'),
}))

View File

@@ -191,12 +191,12 @@ class TapTapAppIE(TapTapBaseIE):
}]
class TapTapIntlBase(TapTapBaseIE):
class TapTapIntlBaseIE(TapTapBaseIE):
_X_UA = 'V=1&PN=WebAppIntl2&LANG=zh_TW&VN_CODE=115&VN=0.1.0&LOC=CN&PLT=PC&DS=Android&UID={uuid}&CURR=&DT=PC&OS=Windows&OSV=NT%208.0.0'
_VIDEO_API = 'https://www.taptap.io/webapiv2/video-resource/v1/multi-get'
class TapTapAppIntlIE(TapTapIntlBase):
class TapTapAppIntlIE(TapTapIntlBaseIE):
_VALID_URL = r'https?://www\.taptap\.io/app/(?P<id>\d+)'
_INFO_API = 'https://www.taptap.io/webapiv2/i/app/v5/detail'
_DATA_PATH = 'app'
@@ -227,7 +227,7 @@ class TapTapAppIntlIE(TapTapIntlBase):
}]
class TapTapPostIntlIE(TapTapIntlBase):
class TapTapPostIntlIE(TapTapIntlBaseIE):
_VALID_URL = r'https?://www\.taptap\.io/post/(?P<id>\d+)'
_INFO_API = 'https://www.taptap.io/webapiv2/creation/post/v1/detail'
_INFO_QUERY_KEY = 'id_str'

View File

@@ -46,7 +46,7 @@ class TelecincoBaseIE(InfoExtractor):
error_code = traverse_obj(
self._webpage_read_content(error.cause.response, caronte['cerbero'], video_id, fatal=False),
({json.loads}, 'code', {int}))
if error_code == 4038:
if error_code in (4038, 40313):
self.raise_geo_restricted(countries=['ES'])
raise

View File

@@ -26,6 +26,7 @@ from ..utils import (
srt_subtitles_timecode,
str_or_none,
traverse_obj,
truncate_string,
try_call,
try_get,
url_or_none,
@@ -444,7 +445,7 @@ class TikTokBaseIE(InfoExtractor):
return {
'id': aweme_id,
**traverse_obj(aweme_detail, {
'title': ('desc', {str}),
'title': ('desc', {truncate_string(left=72)}),
'description': ('desc', {str}),
'timestamp': ('create_time', {int_or_none}),
}),
@@ -595,7 +596,7 @@ class TikTokBaseIE(InfoExtractor):
'duration': ('duration', {int_or_none}),
})),
**traverse_obj(aweme_detail, {
'title': ('desc', {str}),
'title': ('desc', {truncate_string(left=72)}),
'description': ('desc', {str}),
# audio-only slideshows have a video duration of 0 and an actual audio duration
'duration': ('video', 'duration', {int_or_none}, filter),
@@ -656,7 +657,7 @@ class TikTokIE(TikTokBaseIE):
'info_dict': {
'id': '6742501081818877190',
'ext': 'mp4',
'title': 'md5:5e2a23877420bb85ce6521dbee39ba94',
'title': 'Tag 1 Friend reverse this Video and look what happens 🤩😱 @skyandtami ...',
'description': 'md5:5e2a23877420bb85ce6521dbee39ba94',
'duration': 27,
'height': 1024,
@@ -860,7 +861,7 @@ class TikTokIE(TikTokBaseIE):
'info_dict': {
'id': '7253412088251534594',
'ext': 'm4a',
'title': 'я ред флаг простите #переписка #щитпост #тревожныйтиппривязанности #рекомендации ',
'title': 'я ред флаг простите #переписка #щитпост #тревожныйтиппривязанности #р...',
'description': 'я ред флаг простите #переписка #щитпост #тревожныйтиппривязанности #рекомендации ',
'uploader': 'hara_yoimiya',
'uploader_id': '6582536342634676230',

View File

@@ -1,31 +1,70 @@
from .common import InfoExtractor
from .streaks import StreaksBaseIE
from ..utils import (
ExtractorError,
int_or_none,
join_nonempty,
make_archive_id,
smuggle_url,
str_or_none,
strip_or_none,
traverse_obj,
update_url_query,
)
from ..utils.traversal import require, traverse_obj
class TVerIE(InfoExtractor):
class TVerIE(StreaksBaseIE):
_VALID_URL = r'https?://(?:www\.)?tver\.jp/(?:(?P<type>lp|corner|series|episodes?|feature)/)+(?P<id>[a-zA-Z0-9]+)'
_GEO_COUNTRIES = ['JP']
_GEO_BYPASS = False
_TESTS = [{
'skip': 'videos are only available for 7 days',
'url': 'https://tver.jp/episodes/ep83nf3w4p',
# via Streaks backend
'url': 'https://tver.jp/episodes/epc1hdugbk',
'info_dict': {
'title': '家事ヤロウ!!! 売り場席巻のチーズSP財前直見×森泉親子の脱東京暮らし密着',
'description': 'md5:dc2c06b6acc23f1e7c730c513737719b',
'series': '家事ヤロウ!!!',
'episode': '売り場席巻のチーズSP財前直見×森泉親子の脱東京暮らし密着',
'alt_title': '売り場席巻のチーズSP財前直見×森泉親子の脱東京暮らし密着',
'channel': 'テレビ朝日',
'id': 'ep83nf3w4p',
'id': 'epc1hdugbk',
'ext': 'mp4',
'display_id': 'ref:baeebeac-a2a6-4dbf-9eb3-c40d59b40068',
'title': '神回だけ見せます! #2 壮烈!車大騎馬戦(木曜スペシャル)',
'alt_title': '神回だけ見せます! #2 壮烈!車大騎馬戦(木曜スペシャル) 日テレ',
'description': 'md5:2726f742d5e3886edeaf72fb6d740fef',
'uploader_id': 'tver-ntv',
'channel': '日テレ',
'duration': 1158.024,
'thumbnail': 'https://statics.tver.jp/images/content/thumbnail/episode/xlarge/epc1hdugbk.jpg?v=16',
'series': '神回だけ見せます!',
'episode': '#2 壮烈!車大騎馬戦(木曜スペシャル)',
'episode_number': 2,
'timestamp': 1736486036,
'upload_date': '20250110',
'modified_timestamp': 1736870264,
'modified_date': '20250114',
'live_status': 'not_live',
'release_timestamp': 1651453200,
'release_date': '20220502',
'_old_archive_ids': ['brightcovenew ref:baeebeac-a2a6-4dbf-9eb3-c40d59b40068'],
},
'add_ie': ['BrightcoveNew'],
}, {
# via Brightcove backend (deprecated)
'url': 'https://tver.jp/episodes/epc1hdugbk',
'info_dict': {
'id': 'ref:baeebeac-a2a6-4dbf-9eb3-c40d59b40068',
'ext': 'mp4',
'title': '神回だけ見せます! #2 壮烈!車大騎馬戦(木曜スペシャル)',
'alt_title': '神回だけ見せます! #2 壮烈!車大騎馬戦(木曜スペシャル) 日テレ',
'description': 'md5:2726f742d5e3886edeaf72fb6d740fef',
'uploader_id': '4394098882001',
'channel': '日テレ',
'duration': 1158.101,
'thumbnail': 'https://statics.tver.jp/images/content/thumbnail/episode/xlarge/epc1hdugbk.jpg?v=16',
'tags': [],
'series': '神回だけ見せます!',
'episode': '#2 壮烈!車大騎馬戦(木曜スペシャル)',
'episode_number': 2,
'timestamp': 1651388531,
'upload_date': '20220501',
'release_timestamp': 1651453200,
'release_date': '20220502',
},
'params': {'extractor_args': {'tver': {'backend': ['brightcove']}}},
}, {
'url': 'https://tver.jp/corner/f0103888',
'only_matching': True,
@@ -38,26 +77,7 @@ class TVerIE(InfoExtractor):
'id': 'srtxft431v',
'title': '名探偵コナン',
},
'playlist': [
{
'md5': '779ffd97493ed59b0a6277ea726b389e',
'info_dict': {
'id': 'ref:conan-1137-241005',
'ext': 'mp4',
'title': '名探偵コナン #1137「行列店、味変の秘密」',
'uploader_id': '5330942432001',
'tags': [],
'channel': '読売テレビ',
'series': '名探偵コナン',
'description': 'md5:601fccc1d2430d942a2c8068c4b33eb5',
'episode': '#1137「行列店、味変の秘密」',
'duration': 1469.077,
'timestamp': 1728030405,
'upload_date': '20241004',
'alt_title': '名探偵コナン #1137「行列店、味変の秘密」 読売テレビ 10月5日(土)放送分',
'thumbnail': r're:https://.+\.jpg',
},
}],
'playlist_mincount': 21,
}, {
'url': 'https://tver.jp/series/sru35hwdd2',
'info_dict': {
@@ -70,7 +90,11 @@ class TVerIE(InfoExtractor):
'only_matching': True,
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'
_HEADERS = {'x-tver-platform-type': 'web'}
_HEADERS = {
'x-tver-platform-type': 'web',
'Origin': 'https://tver.jp',
'Referer': 'https://tver.jp/',
}
_PLATFORM_QUERY = {}
def _real_initialize(self):
@@ -103,6 +127,9 @@ class TVerIE(InfoExtractor):
def _real_extract(self, url):
video_id, video_type = self._match_valid_url(url).group('id', 'type')
backend = self._configuration_arg('backend', ['streaks'])[0]
if backend not in ('brightcove', 'streaks'):
raise ExtractorError(f'Invalid backend value: {backend}', expected=True)
if video_type == 'series':
series_info = self._call_platform_api(
@@ -129,12 +156,6 @@ class TVerIE(InfoExtractor):
video_info = self._download_json(
f'https://statics.tver.jp/content/episode/{video_id}.json', video_id, 'Downloading video info',
query={'v': version}, headers={'Referer': 'https://tver.jp/'})
p_id = video_info['video']['accountID']
r_id = traverse_obj(video_info, ('video', ('videoRefID', 'videoID')), get_all=False)
if not r_id:
raise ExtractorError('Failed to extract reference ID for Brightcove')
if not r_id.isdigit():
r_id = f'ref:{r_id}'
episode = strip_or_none(episode_content.get('title'))
series = str_or_none(episode_content.get('seriesTitle'))
@@ -161,17 +182,53 @@ class TVerIE(InfoExtractor):
]
]
return {
'_type': 'url_transparent',
metadata = {
'title': title,
'series': series,
'episode': episode,
# an another title which is considered "full title" for some viewers
'alt_title': join_nonempty(title, provider, onair_label, delim=' '),
'channel': provider,
'description': str_or_none(video_info.get('description')),
'thumbnails': thumbnails,
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id), {'geo_countries': ['JP']}),
'ie_key': 'BrightcoveNew',
**traverse_obj(video_info, {
'description': ('description', {str}),
'release_timestamp': ('viewStatus', 'startAt', {int_or_none}),
'episode_number': ('no', {int_or_none}),
}),
}
brightcove_id = traverse_obj(video_info, ('video', ('videoRefID', 'videoID'), {str}, any))
if brightcove_id and not brightcove_id.isdecimal():
brightcove_id = f'ref:{brightcove_id}'
streaks_id = traverse_obj(video_info, ('streaks', 'videoRefID', {str}))
if streaks_id and not streaks_id.startswith('ref:'):
streaks_id = f'ref:{streaks_id}'
# Deprecated Brightcove extraction reachable w/extractor-arg or fallback; errors are expected
if backend == 'brightcove' or not streaks_id:
if backend != 'brightcove':
self.report_warning(
'No STREAKS ID found; falling back to Brightcove extraction', video_id=video_id)
if not brightcove_id:
raise ExtractorError('Unable to extract brightcove reference ID', expected=True)
account_id = traverse_obj(video_info, (
'video', 'accountID', {str}, {require('brightcove account ID', expected=True)}))
return {
**metadata,
'_type': 'url_transparent',
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % (account_id, brightcove_id),
{'geo_countries': ['JP']}),
'ie_key': 'BrightcoveNew',
}
return {
**self._extract_from_streaks_api(video_info['streaks']['projectID'], streaks_id, {
'Origin': 'https://tver.jp',
'Referer': 'https://tver.jp/',
}),
**metadata,
'id': video_id,
'_old_archive_ids': [make_archive_id('BrightcoveNew', brightcove_id)] if brightcove_id else None,
}

117
yt_dlp/extractor/tvw.py Normal file
View File

@@ -0,0 +1,117 @@
import json
from .common import InfoExtractor
from ..utils import clean_html, remove_end, unified_timestamp, url_or_none
from ..utils.traversal import traverse_obj
class TvwIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tvw\.org/video/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://tvw.org/video/billy-frank-jr-statue-maquette-unveiling-ceremony-2024011211/',
'md5': '9ceb94fe2bb7fd726f74f16356825703',
'info_dict': {
'id': '2024011211',
'ext': 'mp4',
'title': 'Billy Frank Jr. Statue Maquette Unveiling Ceremony',
'thumbnail': r're:^https?://.*\.(?:jpe?g|png)$',
'description': 'md5:58a8150017d985b4f377e11ee8f6f36e',
'timestamp': 1704902400,
'upload_date': '20240110',
'location': 'Legislative Building',
'display_id': 'billy-frank-jr-statue-maquette-unveiling-ceremony-2024011211',
'categories': ['General Interest'],
},
}, {
'url': 'https://tvw.org/video/ebeys-landing-state-park-2024081007/',
'md5': '71e87dae3deafd65d75ff3137b9a32fc',
'info_dict': {
'id': '2024081007',
'ext': 'mp4',
'title': 'Ebey\'s Landing State Park',
'thumbnail': r're:^https?://.*\.(?:jpe?g|png)$',
'description': 'md5:50c5bd73bde32fa6286a008dbc853386',
'timestamp': 1724310900,
'upload_date': '20240822',
'location': 'Ebeys Landing State Park',
'display_id': 'ebeys-landing-state-park-2024081007',
'categories': ['Washington State Parks'],
},
}, {
'url': 'https://tvw.org/video/home-warranties-workgroup-2',
'md5': 'f678789bf94d07da89809f213cf37150',
'info_dict': {
'id': '1999121000',
'ext': 'mp4',
'title': 'Home Warranties Workgroup',
'thumbnail': r're:^https?://.*\.(?:jpe?g|png)$',
'description': 'md5:861396cc523c9641d0dce690bc5c35f3',
'timestamp': 946389600,
'upload_date': '19991228',
'display_id': 'home-warranties-workgroup-2',
'categories': ['Legislative'],
},
}, {
'url': 'https://tvw.org/video/washington-to-washington-a-new-space-race-2022041111/?eventID=2022041111',
'md5': '6f5551090b351aba10c0d08a881b4f30',
'info_dict': {
'id': '2022041111',
'ext': 'mp4',
'title': 'Washington to Washington - A New Space Race',
'thumbnail': r're:^https?://.*\.(?:jpe?g|png)$',
'description': 'md5:f65a24eec56107afbcebb3aa5cd26341',
'timestamp': 1650394800,
'upload_date': '20220419',
'location': 'Hayner Media Center',
'display_id': 'washington-to-washington-a-new-space-race-2022041111',
'categories': ['Washington to Washington', 'General Interest'],
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
client_id = self._html_search_meta('clientID', webpage, fatal=True)
video_id = self._html_search_meta('eventID', webpage, fatal=True)
video_data = self._download_json(
'https://api.v3.invintus.com/v2/Event/getDetailed', video_id,
headers={
'authorization': 'embedder',
'wsc-api-key': '7WhiEBzijpritypp8bqcU7pfU9uicDR',
},
data=json.dumps({
'clientID': client_id,
'eventID': video_id,
'showStreams': True,
}).encode())['data']
formats = []
subtitles = {}
for stream_url in traverse_obj(video_data, ('streamingURIs', ..., {url_or_none})):
fmts, subs = self._extract_m3u8_formats_and_subtitles(
stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
if caption_url := traverse_obj(video_data, ('captionPath', {url_or_none})):
subtitles.setdefault('en', []).append({'url': caption_url, 'ext': 'vtt'})
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'subtitles': subtitles,
'title': remove_end(self._og_search_title(webpage, default=None), ' - TVW'),
'description': self._og_search_description(webpage, default=None),
**traverse_obj(video_data, {
'title': ('title', {str}),
'description': ('description', {clean_html}),
'categories': ('categories', ..., {str}),
'thumbnail': ('videoThumbnail', {url_or_none}),
'timestamp': ('startDateTime', {unified_timestamp}),
'location': ('locationName', {str}),
'is_live': ('eventStatus', {lambda x: x == 'live'}),
}),
}

View File

@@ -14,19 +14,20 @@ from ..utils import (
dict_get,
float_or_none,
int_or_none,
join_nonempty,
make_archive_id,
parse_duration,
parse_iso8601,
parse_qs,
qualities,
str_or_none,
traverse_obj,
try_get,
unified_timestamp,
update_url_query,
url_or_none,
urljoin,
)
from ..utils.traversal import traverse_obj, value
class TwitchBaseIE(InfoExtractor):
@@ -42,10 +43,10 @@ class TwitchBaseIE(InfoExtractor):
'CollectionSideBar': '27111f1b382effad0b6def325caef1909c733fe6a4fbabf54f8d491ef2cf2f14',
'FilterableVideoTower_Videos': 'a937f1d22e269e39a03b509f65a7490f9fc247d7f83d6ac1421523e3b68042cb',
'ClipsCards__User': 'b73ad2bfaecfd30a9e6c28fada15bd97032c83ec77a0440766a56fe0bd632777',
'ShareClipRenderStatus': 'f130048a462a0ac86bb54d653c968c514e9ab9ca94db52368c1179e97b0f16eb',
'ChannelCollectionsContent': '447aec6a0cc1e8d0a8d7732d47eb0762c336a2294fdb009e9c9d854e49d484b9',
'StreamMetadata': 'a647c2a13599e5991e175155f798ca7f1ecddde73f7f341f39009c14dbf59962',
'ComscoreStreamingQuery': 'e1edae8122517d013405f237ffcc124515dc6ded82480a88daef69c83b53ac01',
'VideoAccessToken_Clip': '36b89d2507fce29e5ca551df756d27c1cfe079e2609642b4390aa4c35796eb11',
'VideoPreviewOverlay': '3006e77e51b128d838fa4e835723ca4dc9a05c5efd4466c1085215c6e437e65c',
'VideoMetadata': '49b5b8f268cdeb259d75b58dcb0c1a748e3b575003448a2333dc5cdafd49adad',
'VideoPlayer_ChapterSelectButtonVideo': '8d2793384aac3773beab5e59bd5d6f585aedb923d292800119e03d40cd0f9b41',
@@ -1083,16 +1084,44 @@ class TwitchClipsIE(TwitchBaseIE):
'url': 'https://clips.twitch.tv/FaintLightGullWholeWheat',
'md5': '761769e1eafce0ffebfb4089cb3847cd',
'info_dict': {
'id': '42850523',
'id': '396245304',
'display_id': 'FaintLightGullWholeWheat',
'ext': 'mp4',
'title': 'EA Play 2016 Live from the Novo Theatre',
'duration': 32,
'view_count': int,
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1465767393,
'upload_date': '20160612',
'creator': 'EA',
'uploader': 'stereotype_',
'uploader_id': '43566419',
'creators': ['EA'],
'channel': 'EA',
'channel_id': '25163635',
'channel_is_verified': False,
'channel_follower_count': int,
'uploader': 'EA',
'uploader_id': '25163635',
},
}, {
'url': 'https://www.twitch.tv/xqc/clip/CulturedAmazingKuduDatSheffy-TiZ_-ixAGYR3y2Uy',
'md5': 'e90fe616b36e722a8cfa562547c543f0',
'info_dict': {
'id': '3207364882',
'display_id': 'CulturedAmazingKuduDatSheffy-TiZ_-ixAGYR3y2Uy',
'ext': 'mp4',
'title': 'A day in the life of xQc',
'duration': 60,
'view_count': int,
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1742869615,
'upload_date': '20250325',
'creators': ['xQc'],
'channel': 'xQc',
'channel_id': '71092938',
'channel_is_verified': True,
'channel_follower_count': int,
'uploader': 'xQc',
'uploader_id': '71092938',
'categories': ['Just Chatting'],
},
}, {
# multiple formats
@@ -1116,16 +1145,14 @@ class TwitchClipsIE(TwitchBaseIE):
}]
def _real_extract(self, url):
video_id = self._match_id(url)
slug = self._match_id(url)
clip = self._download_gql(
video_id, [{
'operationName': 'VideoAccessToken_Clip',
'variables': {
'slug': video_id,
},
slug, [{
'operationName': 'ShareClipRenderStatus',
'variables': {'slug': slug},
}],
'Downloading clip access token GraphQL')[0]['data']['clip']
'Downloading clip GraphQL')[0]['data']['clip']
if not clip:
raise ExtractorError(
@@ -1135,81 +1162,71 @@ class TwitchClipsIE(TwitchBaseIE):
'sig': clip['playbackAccessToken']['signature'],
'token': clip['playbackAccessToken']['value'],
}
data = self._download_base_gql(
video_id, {
'query': '''{
clip(slug: "%s") {
broadcaster {
displayName
}
createdAt
curator {
displayName
id
}
durationSeconds
id
tiny: thumbnailURL(width: 86, height: 45)
small: thumbnailURL(width: 260, height: 147)
medium: thumbnailURL(width: 480, height: 272)
title
videoQualities {
frameRate
quality
sourceURL
}
viewCount
}
}''' % video_id}, 'Downloading clip GraphQL', fatal=False) # noqa: UP031
if data:
clip = try_get(data, lambda x: x['data']['clip'], dict) or clip
asset_default = traverse_obj(clip, ('assets', 0, {dict})) or {}
asset_portrait = traverse_obj(clip, ('assets', 1, {dict})) or {}
formats = []
for option in clip.get('videoQualities', []):
if not isinstance(option, dict):
continue
source = url_or_none(option.get('sourceURL'))
if not source:
continue
default_aspect_ratio = float_or_none(asset_default.get('aspectRatio'))
formats.extend(traverse_obj(asset_default, ('videoQualities', lambda _, v: url_or_none(v['sourceURL']), {
'url': ('sourceURL', {update_url_query(query=access_query)}),
'format_id': ('quality', {str}),
'height': ('quality', {int_or_none}),
'fps': ('frameRate', {float_or_none}),
'aspect_ratio': {value(default_aspect_ratio)},
})))
portrait_aspect_ratio = float_or_none(asset_portrait.get('aspectRatio'))
for source in traverse_obj(asset_portrait, ('videoQualities', lambda _, v: url_or_none(v['sourceURL']))):
formats.append({
'url': update_url_query(source, access_query),
'format_id': option.get('quality'),
'height': int_or_none(option.get('quality')),
'fps': int_or_none(option.get('frameRate')),
'url': update_url_query(source['sourceURL'], access_query),
'format_id': join_nonempty('portrait', source.get('quality')),
'height': int_or_none(source.get('quality')),
'fps': float_or_none(source.get('frameRate')),
'aspect_ratio': portrait_aspect_ratio,
'quality': -2,
})
thumbnails = []
for thumbnail_id in ('tiny', 'small', 'medium'):
thumbnail_url = clip.get(thumbnail_id)
if not thumbnail_url:
continue
thumb = {
'id': thumbnail_id,
'url': thumbnail_url,
}
mobj = re.search(r'-(\d+)x(\d+)\.', thumbnail_url)
if mobj:
thumb.update({
'height': int(mobj.group(2)),
'width': int(mobj.group(1)),
})
thumbnails.append(thumb)
thumb_asset_default_url = url_or_none(asset_default.get('thumbnailURL'))
if thumb_asset_default_url:
thumbnails.append({
'id': 'default',
'url': thumb_asset_default_url,
'preference': 0,
})
if thumb_asset_portrait_url := url_or_none(asset_portrait.get('thumbnailURL')):
thumbnails.append({
'id': 'portrait',
'url': thumb_asset_portrait_url,
'preference': -1,
})
thumb_default_url = url_or_none(clip.get('thumbnailURL'))
if thumb_default_url and thumb_default_url != thumb_asset_default_url:
thumbnails.append({
'id': 'small',
'url': thumb_default_url,
'preference': -2,
})
old_id = self._search_regex(r'%7C(\d+)(?:-\d+)?.mp4', formats[-1]['url'], 'old id', default=None)
return {
'id': clip.get('id') or video_id,
'id': clip.get('id') or slug,
'_old_archive_ids': [make_archive_id(self, old_id)] if old_id else None,
'display_id': video_id,
'title': clip.get('title'),
'display_id': slug,
'formats': formats,
'duration': int_or_none(clip.get('durationSeconds')),
'view_count': int_or_none(clip.get('viewCount')),
'timestamp': unified_timestamp(clip.get('createdAt')),
'thumbnails': thumbnails,
'creator': try_get(clip, lambda x: x['broadcaster']['displayName'], str),
'uploader': try_get(clip, lambda x: x['curator']['displayName'], str),
'uploader_id': try_get(clip, lambda x: x['curator']['id'], str),
**traverse_obj(clip, {
'title': ('title', {str}),
'duration': ('durationSeconds', {int_or_none}),
'view_count': ('viewCount', {int_or_none}),
'timestamp': ('createdAt', {parse_iso8601}),
'creators': ('broadcaster', 'displayName', {str}, filter, all),
'channel': ('broadcaster', 'displayName', {str}),
'channel_id': ('broadcaster', 'id', {str}),
'channel_follower_count': ('broadcaster', 'followers', 'totalCount', {int_or_none}),
'channel_is_verified': ('broadcaster', 'isPartner', {bool}),
'uploader': ('broadcaster', 'displayName', {str}),
'uploader_id': ('broadcaster', 'id', {str}),
'categories': ('game', 'displayName', {str}, filter, all, filter),
}),
}

View File

@@ -21,6 +21,7 @@ from ..utils import (
str_or_none,
strip_or_none,
traverse_obj,
truncate_string,
try_call,
try_get,
unified_timestamp,
@@ -358,6 +359,7 @@ class TwitterCardIE(InfoExtractor):
'display_id': '560070183650213889',
'uploader_url': 'https://twitter.com/Twitter',
},
'skip': 'This content is no longer available.',
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768',
@@ -365,7 +367,7 @@ class TwitterCardIE(InfoExtractor):
'info_dict': {
'id': '623160978427936768',
'ext': 'mp4',
'title': "NASA - Fly over Pluto's icy Norgay Mountains and Sputnik Plain in this @NASANewHorizons #PlutoFlyby video.",
'title': "NASA - Fly over Pluto's icy Norgay Mountains and Sputnik Plain in this @NASA...",
'description': "Fly over Pluto's icy Norgay Mountains and Sputnik Plain in this @NASANewHorizons #PlutoFlyby video. https://t.co/BJYgOjSeGA",
'uploader': 'NASA',
'uploader_id': 'NASA',
@@ -377,12 +379,14 @@ class TwitterCardIE(InfoExtractor):
'like_count': int,
'repost_count': int,
'tags': ['PlutoFlyby'],
'channel_id': '11348282',
'_old_archive_ids': ['twitter 623160978427936768'],
},
'params': {'format': '[protocol=https]'},
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977',
'md5': 'b6d9683dd3f48e340ded81c0e917ad46',
'md5': 'fb08fbd69595cbd8818f0b2f2a94474d',
'info_dict': {
'id': 'dq4Oj5quskI',
'ext': 'mp4',
@@ -390,12 +394,12 @@ class TwitterCardIE(InfoExtractor):
'description': 'md5:a831e97fa384863d6e26ce48d1c43376',
'upload_date': '20111013',
'uploader': 'OMG! UBUNTU!',
'uploader_id': 'omgubuntu',
'uploader_id': '@omgubuntu',
'channel_url': 'https://www.youtube.com/channel/UCIiSwcm9xiFb3Y4wjzR41eQ',
'channel_id': 'UCIiSwcm9xiFb3Y4wjzR41eQ',
'channel_follower_count': int,
'chapters': 'count:8',
'uploader_url': 'http://www.youtube.com/user/omgubuntu',
'uploader_url': 'https://www.youtube.com/@omgubuntu',
'duration': 138,
'categories': ['Film & Animation'],
'age_limit': 0,
@@ -407,6 +411,9 @@ class TwitterCardIE(InfoExtractor):
'tags': 'count:12',
'channel': 'OMG! UBUNTU!',
'playable_in_embed': True,
'heatmap': 'count:100',
'timestamp': 1318500227,
'live_status': 'not_live',
},
'add_ie': ['Youtube'],
},
@@ -548,13 +555,14 @@ class TwitterIE(TwitterBaseIE):
'age_limit': 0,
'_old_archive_ids': ['twitter 700207533655363584'],
},
'skip': 'Tweet has been deleted',
}, {
'url': 'https://twitter.com/captainamerica/status/719944021058060289',
'info_dict': {
'id': '717462543795523584',
'display_id': '719944021058060289',
'ext': 'mp4',
'title': 'Captain America - @King0fNerd Are you sure you made the right choice? Find out in theaters.',
'title': 'Captain America - @King0fNerd Are you sure you made the right choice? Find out in theat...',
'description': '@King0fNerd Are you sure you made the right choice? Find out in theaters. https://t.co/GpgYi9xMJI',
'channel_id': '701615052',
'uploader_id': 'CaptainAmerica',
@@ -591,7 +599,7 @@ class TwitterIE(TwitterBaseIE):
'info_dict': {
'id': '852077943283097602',
'ext': 'mp4',
'title': 'عالم الأخبار - كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعارضين : اتقوا الله .. الظلم ظلمات يوم القيامة',
'title': 'عالم الأخبار - كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعا...',
'description': 'كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعارضين : اتقوا الله .. الظلم ظلمات يوم القيامة https://t.co/xg6OhpyKfN',
'channel_id': '2526757026',
'uploader': 'عالم الأخبار',
@@ -615,7 +623,7 @@ class TwitterIE(TwitterBaseIE):
'id': '910030238373089285',
'display_id': '910031516746514432',
'ext': 'mp4',
'title': 'Préfet de Guadeloupe - [Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terre. Restez confinés. Réfugiez-vous dans la pièce la + sûre.',
'title': 'Préfet de Guadeloupe - [Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terr...',
'thumbnail': r're:^https?://.*\.jpg',
'description': '[Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terre. Restez confinés. Réfugiez-vous dans la pièce la + sûre. https://t.co/mwx01Rs4lo',
'channel_id': '2319432498',
@@ -707,7 +715,7 @@ class TwitterIE(TwitterBaseIE):
'id': '1349774757969989634',
'display_id': '1349794411333394432',
'ext': 'mp4',
'title': 'md5:d1c4941658e4caaa6cb579260d85dcba',
'title': "Brooklyn Nets - WATCH: Sean Marks' full media session after our acquisition of 8-time...",
'thumbnail': r're:^https?://.*\.jpg',
'description': 'md5:71ead15ec44cee55071547d6447c6a3e',
'channel_id': '18552281',
@@ -733,7 +741,7 @@ class TwitterIE(TwitterBaseIE):
'id': '1577855447914409984',
'display_id': '1577855540407197696',
'ext': 'mp4',
'title': 'md5:466a3a8b049b5f5a13164ce915484b51',
'title': 'Oshtru - gm ✨️ now I can post image and video. nice update.',
'description': 'md5:b9c3699335447391d11753ab21c70a74',
'upload_date': '20221006',
'channel_id': '143077138',
@@ -755,10 +763,10 @@ class TwitterIE(TwitterBaseIE):
'url': 'https://twitter.com/UltimaShadowX/status/1577719286659006464',
'info_dict': {
'id': '1577719286659006464',
'title': 'Ultima Reload - Test',
'title': 'Ultima - Test',
'description': 'Test https://t.co/Y3KEZD7Dad',
'channel_id': '168922496',
'uploader': 'Ultima Reload',
'uploader': 'Ultima',
'uploader_id': 'UltimaShadowX',
'uploader_url': 'https://twitter.com/UltimaShadowX',
'upload_date': '20221005',
@@ -777,7 +785,7 @@ class TwitterIE(TwitterBaseIE):
'id': '1575559336759263233',
'display_id': '1575560063510810624',
'ext': 'mp4',
'title': 'md5:eec26382babd0f7c18f041db8ae1c9c9',
'title': 'Max Olson - Absolutely heartbreaking footage captured by our surge probe of catas...',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'md5:95aea692fda36a12081b9629b02daa92',
'channel_id': '1094109584',
@@ -901,18 +909,18 @@ class TwitterIE(TwitterBaseIE):
'playlist_mincount': 2,
'info_dict': {
'id': '1600649710662213632',
'title': 'md5:be05989b0722e114103ed3851a0ffae2',
'title': "Jocelyn Laidlaw - How Kirstie Alley's tragic death inspired me to share more about my c...",
'timestamp': 1670459604.0,
'description': 'md5:591c19ce66fadc2359725d5cd0d1052c',
'comment_count': int,
'uploader_id': 'CTVJLaidlaw',
'uploader_id': 'JocelynVLaidlaw',
'channel_id': '80082014',
'repost_count': int,
'tags': ['colorectalcancer', 'cancerjourney', 'imnotaquitter'],
'upload_date': '20221208',
'age_limit': 0,
'uploader': 'Jocelyn Laidlaw',
'uploader_url': 'https://twitter.com/CTVJLaidlaw',
'uploader_url': 'https://twitter.com/JocelynVLaidlaw',
'like_count': int,
},
}, {
@@ -921,17 +929,17 @@ class TwitterIE(TwitterBaseIE):
'info_dict': {
'id': '1600649511827013632',
'ext': 'mp4',
'title': 'md5:7662a0a27ce6faa3e5b160340f3cfab1',
'title': "Jocelyn Laidlaw - How Kirstie Alley's tragic death inspired me to share more about my c... #1",
'thumbnail': r're:^https?://.+\.jpg',
'timestamp': 1670459604.0,
'channel_id': '80082014',
'uploader_id': 'CTVJLaidlaw',
'uploader_id': 'JocelynVLaidlaw',
'uploader': 'Jocelyn Laidlaw',
'repost_count': int,
'comment_count': int,
'tags': ['colorectalcancer', 'cancerjourney', 'imnotaquitter'],
'duration': 102.226,
'uploader_url': 'https://twitter.com/CTVJLaidlaw',
'uploader_url': 'https://twitter.com/JocelynVLaidlaw',
'display_id': '1600649710662213632',
'like_count': int,
'description': 'md5:591c19ce66fadc2359725d5cd0d1052c',
@@ -990,6 +998,7 @@ class TwitterIE(TwitterBaseIE):
'_old_archive_ids': ['twitter 1599108751385972737'],
},
'params': {'noplaylist': True},
'skip': 'Tweet is limited',
}, {
'url': 'https://twitter.com/MunTheShinobi/status/1600009574919962625',
'info_dict': {
@@ -1001,10 +1010,10 @@ class TwitterIE(TwitterBaseIE):
'description': 'This is a genius ad by Apple. \U0001f525\U0001f525\U0001f525\U0001f525\U0001f525 https://t.co/cNsA0MoOml',
'thumbnail': 'https://pbs.twimg.com/ext_tw_video_thumb/1600009362759733248/pu/img/XVhFQivj75H_YxxV.jpg?name=orig',
'age_limit': 0,
'uploader': 'Mün',
'uploader': 'Boy Called Mün',
'repost_count': int,
'upload_date': '20221206',
'title': 'Mün - This is a genius ad by Apple. \U0001f525\U0001f525\U0001f525\U0001f525\U0001f525',
'title': 'Boy Called Mün - This is a genius ad by Apple. \U0001f525\U0001f525\U0001f525\U0001f525\U0001f525',
'comment_count': int,
'like_count': int,
'tags': [],
@@ -1042,7 +1051,7 @@ class TwitterIE(TwitterBaseIE):
'id': '1694928337846538240',
'ext': 'mp4',
'display_id': '1695424220702888009',
'title': 'md5:e8daa9527bc2b947121395494f786d9d',
'title': 'Benny Johnson - Donald Trump driving through the urban, poor neighborhoods of Atlanta...',
'description': 'md5:004f2d37fd58737724ec75bc7e679938',
'channel_id': '15212187',
'uploader': 'Benny Johnson',
@@ -1066,7 +1075,7 @@ class TwitterIE(TwitterBaseIE):
'id': '1694928337846538240',
'ext': 'mp4',
'display_id': '1695424220702888009',
'title': 'md5:e8daa9527bc2b947121395494f786d9d',
'title': 'Benny Johnson - Donald Trump driving through the urban, poor neighborhoods of Atlanta...',
'description': 'md5:004f2d37fd58737724ec75bc7e679938',
'channel_id': '15212187',
'uploader': 'Benny Johnson',
@@ -1101,6 +1110,7 @@ class TwitterIE(TwitterBaseIE):
'view_count': int,
},
'add_ie': ['TwitterBroadcast'],
'skip': 'Broadcast no longer exists',
}, {
# Animated gif and quote tweet video
'url': 'https://twitter.com/BAKKOOONN/status/1696256659889565950',
@@ -1129,7 +1139,7 @@ class TwitterIE(TwitterBaseIE):
'info_dict': {
'id': '1724883339285544960',
'ext': 'mp4',
'title': 'md5:cc56716f9ed0b368de2ba54c478e493c',
'title': 'Robert F. Kennedy Jr - A beautifully crafted short film by Mikki Willis about my independent...',
'description': 'md5:9dc14f5b0f1311fc7caf591ae253a164',
'display_id': '1724884212803834154',
'channel_id': '337808606',
@@ -1150,7 +1160,7 @@ class TwitterIE(TwitterBaseIE):
}, {
# x.com
'url': 'https://x.com/historyinmemes/status/1790637656616943991',
'md5': 'daca3952ba0defe2cfafb1276d4c1ea5',
'md5': '4549eda363fecfe37439c455923cba2c',
'info_dict': {
'id': '1790637589910654976',
'ext': 'mp4',
@@ -1334,7 +1344,7 @@ class TwitterIE(TwitterBaseIE):
def _generate_syndication_token(self, twid):
# ((Number(twid) / 1e15) * Math.PI).toString(36).replace(/(0+|\.)/g, '')
translation = str.maketrans(dict.fromkeys('0.'))
return js_number_to_string((int(twid) / 1e15) * math.PI, 36).translate(translation)
return js_number_to_string((int(twid) / 1e15) * math.pi, 36).translate(translation)
def _call_syndication_api(self, twid):
self.report_warning(
@@ -1390,7 +1400,7 @@ class TwitterIE(TwitterBaseIE):
title = description = traverse_obj(
status, (('full_text', 'text'), {lambda x: x.replace('\n', ' ')}), get_all=False) or ''
# strip 'https -_t.co_BJYgOjSeGA' junk from filenames
title = re.sub(r'\s+(https?://[^ ]+)', '', title)
title = truncate_string(re.sub(r'\s+(https?://[^ ]+)', '', title), left=72)
user = status.get('user') or {}
uploader = user.get('name')
if uploader:

View File

@@ -51,6 +51,8 @@ class KnownDRMIE(UnsupportedInfoExtractor):
r'(?:watch|front)\.njpwworld\.com',
r'qub\.ca/vrai',
r'(?:beta\.)?crunchyroll\.com',
r'viki\.com',
r'deezer\.com',
)
_TESTS = [{
@@ -160,6 +162,12 @@ class KnownDRMIE(UnsupportedInfoExtractor):
}, {
'url': 'https://beta.crunchyroll.com/pt-br/watch/G8WUN8VKP/the-ruler-of-conspiracy',
'only_matching': True,
}, {
'url': 'https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1',
'only_matching': True,
}, {
'url': 'http://www.deezer.com/playlist/176747451',
'only_matching': True,
}]
def _real_extract(self, url):

View File

@@ -1,346 +0,0 @@
import hashlib
import hmac
import json
import time
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
parse_age_limit,
parse_iso8601,
try_get,
)
class VikiBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?viki\.(?:com|net|mx|jp|fr)/'
_API_URL_TEMPLATE = 'https://api.viki.io%s'
_DEVICE_ID = '112395910d'
_APP = '100005a'
_APP_VERSION = '6.11.3'
_APP_SECRET = 'd96704b180208dbb2efa30fe44c48bd8690441af9f567ba8fd710a72badc85198f7472'
_GEO_BYPASS = False
_NETRC_MACHINE = 'viki'
_token = None
_ERRORS = {
'geo': 'Sorry, this content is not available in your region.',
'upcoming': 'Sorry, this content is not yet available.',
'paywall': 'Sorry, this content is only available to Viki Pass Plus subscribers',
}
def _stream_headers(self, timestamp, sig):
return {
'X-Viki-manufacturer': 'vivo',
'X-Viki-device-model': 'vivo 1606',
'X-Viki-device-os-ver': '6.0.1',
'X-Viki-connection-type': 'WIFI',
'X-Viki-carrier': '',
'X-Viki-as-id': '100005a-1625321982-3932',
'timestamp': str(timestamp),
'signature': str(sig),
'x-viki-app-ver': self._APP_VERSION,
}
def _api_query(self, path, version=4, **kwargs):
path += '?' if '?' not in path else '&'
query = f'/v{version}/{path}app={self._APP}'
if self._token:
query += f'&token={self._token}'
return query + ''.join(f'&{name}={val}' for name, val in kwargs.items())
def _sign_query(self, path):
timestamp = int(time.time())
query = self._api_query(path, version=5)
sig = hmac.new(
self._APP_SECRET.encode('ascii'), f'{query}&t={timestamp}'.encode('ascii'), hashlib.sha1).hexdigest()
return timestamp, sig, self._API_URL_TEMPLATE % query
def _call_api(
self, path, video_id, note='Downloading JSON metadata', data=None, query=None, fatal=True):
if query is None:
timestamp, sig, url = self._sign_query(path)
else:
url = self._API_URL_TEMPLATE % self._api_query(path, version=4)
resp = self._download_json(
url, video_id, note, fatal=fatal, query=query,
data=json.dumps(data).encode() if data else None,
headers=({'x-viki-app-ver': self._APP_VERSION} if data
else self._stream_headers(timestamp, sig) if query is None
else None), expected_status=400) or {}
self._raise_error(resp.get('error'), fatal)
return resp
def _raise_error(self, error, fatal=True):
if error is None:
return
msg = f'{self.IE_NAME} said: {error}'
if fatal:
raise ExtractorError(msg, expected=True)
else:
self.report_warning(msg)
def _check_errors(self, data):
for reason, status in (data.get('blocking') or {}).items():
if status and reason in self._ERRORS:
message = self._ERRORS[reason]
if reason == 'geo':
self.raise_geo_restricted(msg=message)
elif reason == 'paywall':
if try_get(data, lambda x: x['paywallable']['tvod']):
self._raise_error('This video is for rent only or TVOD (Transactional Video On demand)')
self.raise_login_required(message)
self._raise_error(message)
def _perform_login(self, username, password):
self._token = self._call_api(
'sessions.json', None, 'Logging in', fatal=False,
data={'username': username, 'password': password}).get('token')
if not self._token:
self.report_warning('Login Failed: Unable to get session token')
@staticmethod
def dict_selection(dict_obj, preferred_key):
if preferred_key in dict_obj:
return dict_obj[preferred_key]
return (list(filter(None, dict_obj.values())) or [None])[0]
class VikiIE(VikiBaseIE):
IE_NAME = 'viki'
_VALID_URL = rf'{VikiBaseIE._VALID_URL_BASE}(?:videos|player)/(?P<id>[0-9]+v)'
_TESTS = [{
'note': 'Free non-DRM video with storyboards in MPD',
'url': 'https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1',
'info_dict': {
'id': '1175236v',
'ext': 'mp4',
'title': 'Choosing Spouse by Lottery - Episode 1',
'timestamp': 1606463239,
'age_limit': 13,
'uploader': 'FCC',
'upload_date': '20201127',
},
}, {
'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14',
'info_dict': {
'id': '1023585v',
'ext': 'mp4',
'title': 'Heirs - Episode 14',
'uploader': 'SBS Contents Hub',
'timestamp': 1385047627,
'upload_date': '20131121',
'age_limit': 13,
'duration': 3570,
'episode_number': 14,
},
'skip': 'Blocked in the US',
}, {
# clip
'url': 'http://www.viki.com/videos/1067139v-the-avengers-age-of-ultron-press-conference',
'md5': '86c0b5dbd4d83a6611a79987cc7a1989',
'info_dict': {
'id': '1067139v',
'ext': 'mp4',
'title': "'The Avengers: Age of Ultron' Press Conference",
'description': 'md5:d70b2f9428f5488321bfe1db10d612ea',
'duration': 352,
'timestamp': 1430380829,
'upload_date': '20150430',
'uploader': 'Arirang TV',
'like_count': int,
'age_limit': 0,
},
'skip': 'Sorry. There was an error loading this video',
}, {
'url': 'http://www.viki.com/videos/1048879v-ankhon-dekhi',
'info_dict': {
'id': '1048879v',
'ext': 'mp4',
'title': 'Ankhon Dekhi',
'duration': 6512,
'timestamp': 1408532356,
'upload_date': '20140820',
'uploader': 'Spuul',
'like_count': int,
'age_limit': 13,
},
'skip': 'Blocked in the US',
}, {
# episode
'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1',
'md5': '0a53dc252e6e690feccd756861495a8c',
'info_dict': {
'id': '44699v',
'ext': 'mp4',
'title': 'Boys Over Flowers - Episode 1',
'description': 'md5:b89cf50038b480b88b5b3c93589a9076',
'duration': 4172,
'timestamp': 1270496524,
'upload_date': '20100405',
'uploader': 'group8',
'like_count': int,
'age_limit': 13,
'episode_number': 1,
},
}, {
# youtube external
'url': 'http://www.viki.com/videos/50562v-poor-nastya-complete-episode-1',
'md5': '63f8600c1da6f01b7640eee7eca4f1da',
'info_dict': {
'id': '50562v',
'ext': 'webm',
'title': 'Poor Nastya [COMPLETE] - Episode 1',
'description': '',
'duration': 606,
'timestamp': 1274949505,
'upload_date': '20101213',
'uploader': 'ad14065n',
'uploader_id': 'ad14065n',
'like_count': int,
'age_limit': 13,
},
'skip': 'Page not found!',
}, {
'url': 'http://www.viki.com/player/44699v',
'only_matching': True,
}, {
# non-English description
'url': 'http://www.viki.com/videos/158036v-love-in-magic',
'md5': '41faaba0de90483fb4848952af7c7d0d',
'info_dict': {
'id': '158036v',
'ext': 'mp4',
'uploader': 'I Planet Entertainment',
'upload_date': '20111122',
'timestamp': 1321985454,
'description': 'md5:44b1e46619df3a072294645c770cef36',
'title': 'Love In Magic',
'age_limit': 13,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._call_api(f'videos/{video_id}.json', video_id, 'Downloading video JSON', query={})
self._check_errors(video)
title = try_get(video, lambda x: x['titles']['en'], str)
episode_number = int_or_none(video.get('number'))
if not title:
title = f'Episode {episode_number}' if video.get('type') == 'episode' else video.get('id') or video_id
container_titles = try_get(video, lambda x: x['container']['titles'], dict) or {}
container_title = self.dict_selection(container_titles, 'en')
title = f'{container_title} - {title}'
thumbnails = [{
'id': thumbnail_id,
'url': thumbnail['url'],
} for thumbnail_id, thumbnail in (video.get('images') or {}).items() if thumbnail.get('url')]
resp = self._call_api(
f'playback_streams/{video_id}.json?drms=dt3&device_id={self._DEVICE_ID}',
video_id, 'Downloading video streams JSON')['main'][0]
stream_id = try_get(resp, lambda x: x['properties']['track']['stream_id'])
subtitles = dict((lang, [{
'ext': ext,
'url': self._API_URL_TEMPLATE % self._api_query(
f'videos/{video_id}/auth_subtitles/{lang}.{ext}', stream_id=stream_id),
} for ext in ('srt', 'vtt')]) for lang in (video.get('subtitle_completions') or {}))
mpd_url = resp['url']
# 720p is hidden in another MPD which can be found in the current manifest content
mpd_content = self._download_webpage(mpd_url, video_id, note='Downloading initial MPD manifest')
mpd_url = self._search_regex(
r'(?mi)<BaseURL>(http.+.mpd)', mpd_content, 'new manifest', default=mpd_url)
if 'mpdhd_high' not in mpd_url and 'sig=' not in mpd_url:
# Modify the URL to get 1080p
mpd_url = mpd_url.replace('mpdhd', 'mpdhd_high')
formats = self._extract_mpd_formats(mpd_url, video_id)
return {
'id': video_id,
'formats': formats,
'title': title,
'description': self.dict_selection(video.get('descriptions', {}), 'en'),
'duration': int_or_none(video.get('duration')),
'timestamp': parse_iso8601(video.get('created_at')),
'uploader': video.get('author'),
'uploader_url': video.get('author_url'),
'like_count': int_or_none(try_get(video, lambda x: x['likes']['count'])),
'age_limit': parse_age_limit(video.get('rating')),
'thumbnails': thumbnails,
'subtitles': subtitles,
'episode_number': episode_number,
}
class VikiChannelIE(VikiBaseIE):
IE_NAME = 'viki:channel'
_VALID_URL = rf'{VikiBaseIE._VALID_URL_BASE}(?:tv|news|movies|artists)/(?P<id>[0-9]+c)'
_TESTS = [{
'url': 'http://www.viki.com/tv/50c-boys-over-flowers',
'info_dict': {
'id': '50c',
'title': 'Boys Over Flowers',
'description': 'md5:804ce6e7837e1fd527ad2f25420f4d59',
},
'playlist_mincount': 51,
}, {
'url': 'http://www.viki.com/tv/1354c-poor-nastya-complete',
'info_dict': {
'id': '1354c',
'title': 'Poor Nastya [COMPLETE]',
'description': 'md5:05bf5471385aa8b21c18ad450e350525',
},
'playlist_count': 127,
'skip': 'Page not found',
}, {
'url': 'http://www.viki.com/news/24569c-showbiz-korea',
'only_matching': True,
}, {
'url': 'http://www.viki.com/movies/22047c-pride-and-prejudice-2005',
'only_matching': True,
}, {
'url': 'http://www.viki.com/artists/2141c-shinee',
'only_matching': True,
}]
_video_types = ('episodes', 'movies', 'clips', 'trailers')
def _entries(self, channel_id):
params = {
'app': self._APP, 'token': self._token, 'only_ids': 'true',
'direction': 'asc', 'sort': 'number', 'per_page': 30,
}
video_types = self._configuration_arg('video_types') or self._video_types
for video_type in video_types:
if video_type not in self._video_types:
self.report_warning(f'Unknown video_type: {video_type}')
page_num = 0
while True:
page_num += 1
params['page'] = page_num
res = self._call_api(
f'containers/{channel_id}/{video_type}.json', channel_id, query=params, fatal=False,
note=f'Downloading {video_type.title()} JSON page {page_num}')
for video_id in res.get('response') or []:
yield self.url_result(f'https://www.viki.com/videos/{video_id}', VikiIE.ie_key(), video_id)
if not res.get('more'):
break
def _real_extract(self, url):
channel_id = self._match_id(url)
channel = self._call_api(f'containers/{channel_id}.json', channel_id, 'Downloading channel JSON')
self._check_errors(channel)
return self.playlist_result(
self._entries(channel_id), channel_id,
self.dict_selection(channel['titles'], 'en'),
self.dict_selection(channel['descriptions'], 'en'))

View File

@@ -116,6 +116,7 @@ class VKIE(VKBaseIE):
'id': '-77521_162222515',
'ext': 'mp4',
'title': 'ProtivoGunz - Хуёвая песня',
'description': 'Видео из официальной группы Noize MC\nhttp://vk.com/noizemc',
'uploader': 're:(?:Noize MC|Alexander Ilyashenko).*',
'uploader_id': '39545378',
'duration': 195,
@@ -165,6 +166,7 @@ class VKIE(VKBaseIE):
'id': '-93049196_456239755',
'ext': 'mp4',
'title': '8 серия (озвучка)',
'description': 'Видео из официальной группы Noize MC\nhttp://vk.com/noizemc',
'duration': 8383,
'comment_count': int,
'uploader': 'Dizi2021',
@@ -240,6 +242,7 @@ class VKIE(VKBaseIE):
'upload_date': '20221005',
'uploader': 'Шальная Императрица',
'uploader_id': '-74006511',
'description': 'md5:f9315f7786fa0e84e75e4f824a48b056',
},
},
{
@@ -278,6 +281,25 @@ class VKIE(VKBaseIE):
},
'skip': 'No formats found',
},
{
'note': 'video has chapters',
'url': 'https://vkvideo.ru/video-18403220_456239696',
'info_dict': {
'id': '-18403220_456239696',
'ext': 'mp4',
'title': 'Трамп отменяет гранты // DeepSeek - Революция в ИИ // Илон Маск читер',
'description': 'md5:b112ea9de53683b6d03d29076f62eec2',
'uploader': 'Руслан Усачев',
'uploader_id': '-18403220',
'comment_count': int,
'like_count': int,
'duration': 1983,
'thumbnail': r're:https?://.+\.jpg',
'chapters': 'count:21',
'timestamp': 1738252883,
'upload_date': '20250130',
},
},
{
# live stream, hls and rtmp links, most likely already finished live
# stream by the time you are reading this comment
@@ -449,7 +471,6 @@ class VKIE(VKBaseIE):
return self.url_result(opts_url)
data = player['params'][0]
title = unescapeHTML(data['md_title'])
# 2 = live
# 3 = post live (finished live)
@@ -507,17 +528,29 @@ class VKIE(VKBaseIE):
return {
'id': video_id,
'formats': formats,
'title': title,
'thumbnail': data.get('jpg'),
'uploader': data.get('md_author'),
'uploader_id': str_or_none(data.get('author_id') or mv_data.get('authorId')),
'duration': int_or_none(data.get('duration') or mv_data.get('duration')),
'subtitles': subtitles,
**traverse_obj(mv_data, {
'title': ('title', {unescapeHTML}),
'description': ('desc', {clean_html}, filter),
'duration': ('duration', {int_or_none}),
'like_count': ('likes', {int_or_none}),
'comment_count': ('commcount', {int_or_none}),
}),
**traverse_obj(data, {
'title': ('md_title', {unescapeHTML}),
'description': ('description', {clean_html}, filter),
'thumbnail': ('jpg', {url_or_none}),
'uploader': ('md_author', {str}),
'uploader_id': (('author_id', 'authorId'), {str_or_none}, any),
'duration': ('duration', {int_or_none}),
'chapters': ('time_codes', lambda _, v: isinstance(v['time'], int), {
'title': ('text', {unescapeHTML}),
'start_time': 'time',
}),
}),
'timestamp': timestamp,
'view_count': view_count,
'like_count': int_or_none(mv_data.get('likes')),
'comment_count': int_or_none(mv_data.get('commcount')),
'is_live': is_live,
'subtitles': subtitles,
'_format_sort_fields': ('res', 'source'),
}

View File

@@ -0,0 +1,185 @@
import itertools
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
clean_html,
extract_attributes,
parse_duration,
parse_qs,
)
from ..utils.traversal import (
find_element,
find_elements,
traverse_obj,
)
class VrSquareIE(InfoExtractor):
IE_NAME = 'vrsquare'
IE_DESC = 'VR SQUARE'
_BASE_URL = 'https://livr.jp'
_VALID_URL = r'https?://livr\.jp/contents/(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://livr.jp/contents/P470896661',
'info_dict': {
'id': 'P470896661',
'ext': 'mp4',
'title': 'そこ曲がったら、櫻坂? 7年間お疲れ様!菅井友香の卒業を祝う会!前半 2022年11月6日放送分',
'description': 'md5:523726dc835aa8014dfe1e2b38d36cd1',
'duration': 1515.0,
'tags': 'count:2',
'thumbnail': r're:https?://media\.livr\.jp/vod/img/.+\.jpg',
},
}, {
'url': 'https://livr.jp/contents/P589523973',
'info_dict': {
'id': 'P589523973',
'ext': 'mp4',
'title': '薄闇に仰ぐ しだれ桜の妖艶',
'description': 'md5:a042f517b2cbb4ed6746707afec4d306',
'duration': 1084.0,
'tags': list,
'thumbnail': r're:https?://media\.livr\.jp/vod/img/.+\.jpg',
},
'skip': 'Paid video',
}, {
'url': 'https://livr.jp/contents/P316939908',
'info_dict': {
'id': 'P316939908',
'ext': 'mp4',
'title': '2024年5月16日 「今日は誰に恋をする?」公演 小栗有以 生誕祭',
'description': 'md5:2110bdcf947f28bd7d06ec420e51b619',
'duration': 8559.0,
'tags': list,
'thumbnail': r're:https?://media\.livr\.jp/vod/img/.+\.jpg',
},
'skip': 'Premium channel subscribers only',
}, {
# Accessible only in the VR SQUARE app
'url': 'https://livr.jp/contents/P126481458',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
status = self._download_json(
f'{self._BASE_URL}/webApi/contentsStatus/{video_id}',
video_id, 'Checking contents status', fatal=False)
if traverse_obj(status, 'result_code') == '40407':
self.raise_login_required('Unable to access this video')
try:
web_api = self._download_json(
f'{self._BASE_URL}/webApi/play/url/{video_id}', video_id)
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 500:
raise ExtractorError('VR SQUARE app-only videos are not supported', expected=True)
raise
return {
'id': video_id,
'title': self._html_search_meta(['og:title', 'twitter:title'], webpage),
'description': self._html_search_meta('description', webpage),
'formats': self._extract_m3u8_formats(traverse_obj(web_api, (
'urls', ..., 'url', any)), video_id, 'mp4', fatal=False),
'thumbnail': self._html_search_meta('og:image', webpage),
**traverse_obj(webpage, {
'duration': ({find_element(cls='layout-product-data-time')}, {parse_duration}),
'tags': ({find_elements(cls='search-tag')}, ..., {clean_html}),
}),
}
class VrSquarePlaylistBaseIE(InfoExtractor):
_BASE_URL = 'https://livr.jp'
def _fetch_vids(self, source, keys=()):
for url_path in traverse_obj(source, (
*keys, {find_elements(cls='video', html=True)}, ...,
{extract_attributes}, 'data-url', {str}, filter),
):
yield self.url_result(
f'{self._BASE_URL}/contents/{url_path.removeprefix("/contents/")}', VrSquareIE)
def _entries(self, path, display_id, query=None):
for page in itertools.count(1):
ajax = self._download_json(
f'{self._BASE_URL}{path}', display_id,
f'Downloading playlist JSON page {page}',
query={'p': page, **(query or {})})
yield from self._fetch_vids(ajax, ('contents_render_list', ...))
if not traverse_obj(ajax, (('has_next', 'hasNext'), {bool}, any)):
break
class VrSquareChannelIE(VrSquarePlaylistBaseIE):
IE_NAME = 'vrsquare:channel'
_VALID_URL = r'https?://livr\.jp/channel/(?P<id>\w+)'
_TESTS = [{
'url': 'https://livr.jp/channel/H372648599',
'info_dict': {
'id': 'H372648599',
'title': 'AKB48チャンネル',
},
'playlist_mincount': 502,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
return self.playlist_result(
self._entries(f'/ajax/channel/{playlist_id}', playlist_id),
playlist_id, self._html_search_meta('og:title', webpage))
class VrSquareSearchIE(VrSquarePlaylistBaseIE):
IE_NAME = 'vrsquare:search'
_VALID_URL = r'https?://livr\.jp/web-search/?\?(?:[^#]+&)?w=[^#]+'
_TESTS = [{
'url': 'https://livr.jp/web-search?w=%23%E5%B0%8F%E6%A0%97%E6%9C%89%E4%BB%A5',
'info_dict': {
'id': '#小栗有以',
},
'playlist_mincount': 60,
}]
def _real_extract(self, url):
search_query = parse_qs(url)['w'][0]
return self.playlist_result(
self._entries('/ajax/web-search', search_query, {'w': search_query}), search_query)
class VrSquareSectionIE(VrSquarePlaylistBaseIE):
IE_NAME = 'vrsquare:section'
_VALID_URL = r'https?://livr\.jp/(?:category|headline)/(?P<id>\w+)'
_TESTS = [{
'url': 'https://livr.jp/category/C133936275',
'info_dict': {
'id': 'C133936275',
'title': 'そこ曲がったら、櫻坂VR',
},
'playlist_mincount': 308,
}, {
'url': 'https://livr.jp/headline/A296449604',
'info_dict': {
'id': 'A296449604',
'title': 'AKB48 アフターVR',
},
'playlist_mincount': 22,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
return self.playlist_result(
self._fetch_vids(webpage), playlist_id, self._html_search_meta('og:title', webpage))

View File

@@ -2,31 +2,33 @@ import json
import time
import urllib.parse
from .gigya import GigyaBaseIE
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
clean_html,
extract_attributes,
filter_dict,
float_or_none,
get_element_by_class,
get_element_html_by_class,
int_or_none,
join_nonempty,
jwt_decode_hs256,
jwt_encode_hs256,
make_archive_id,
merge_dicts,
parse_age_limit,
parse_duration,
parse_iso8601,
str_or_none,
strip_or_none,
traverse_obj,
try_call,
url_or_none,
urlencode_postdata,
)
class VRTBaseIE(GigyaBaseIE):
class VRTBaseIE(InfoExtractor):
_GEO_BYPASS = False
_PLAYER_INFO = {
'platform': 'desktop',
@@ -37,11 +39,11 @@ class VRTBaseIE(GigyaBaseIE):
'device': 'undefined (undefined)',
'os': {
'name': 'Windows',
'version': 'x86_64',
'version': '10',
},
'player': {
'name': 'VRT web player',
'version': '2.7.4-prod-2023-04-19T06:05:45',
'version': '5.1.1-prod-2025-02-14T08:44:16"',
},
}
# From https://player.vrt.be/vrtnws/js/main.js & https://player.vrt.be/ketnet/js/main.8cdb11341bcb79e4cd44.js
@@ -90,20 +92,21 @@ class VRTBaseIE(GigyaBaseIE):
def _call_api(self, video_id, client='null', id_token=None, version='v2'):
player_info = {'exp': (round(time.time(), 3) + 900), **self._PLAYER_INFO}
player_token = self._download_json(
'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v2/tokens',
video_id, 'Downloading player token', headers={
f'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/{version}/tokens',
video_id, 'Downloading player token', 'Failed to download player token', headers={
**self.geo_verification_headers(),
'Content-Type': 'application/json',
}, data=json.dumps({
'identityToken': id_token or {},
'identityToken': id_token or '',
'playerInfo': jwt_encode_hs256(player_info, self._JWT_SIGNING_KEY, headers={
'kid': self._JWT_KEY_ID,
}).decode(),
}, separators=(',', ':')).encode())['vrtPlayerToken']
return self._download_json(
f'https://media-services-public.vrt.be/media-aggregator/{version}/media-items/{video_id}',
video_id, 'Downloading API JSON', query={
# The URL below redirects to https://media-services-public.vrt.be/media-aggregator/{version}/media-items/{video_id}
f'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/{version}/videos/{video_id}',
video_id, 'Downloading API JSON', 'Failed to download API JSON', query={
'vrtPlayerToken': player_token,
'client': client,
}, expected_status=400)
@@ -177,215 +180,286 @@ class VRTIE(VRTBaseIE):
class VrtNUIE(VRTBaseIE):
IE_DESC = 'VRT MAX'
_VALID_URL = r'https?://(?:www\.)?vrt\.be/vrtnu/a-z/(?:[^/]+/){2}(?P<id>[^/?#&]+)'
IE_NAME = 'vrtmax'
IE_DESC = 'VRT MAX (formerly VRT NU)'
_VALID_URL = r'https?://(?:www\.)?vrt\.be/(?:vrtnu|vrtmax)/a-z/(?:[^/]+/){2}(?P<id>[^/?#&]+)'
_TESTS = [{
# CONTENT_IS_AGE_RESTRICTED
'url': 'https://www.vrt.be/vrtnu/a-z/de-ideale-wereld/2023-vj/de-ideale-wereld-d20230116/',
'url': 'https://www.vrt.be/vrtmax/a-z/ket---doc/trailer/ket---doc-trailer-s6/',
'info_dict': {
'id': 'pbs-pub-855b00a8-6ce2-4032-ac4f-1fcf3ae78524$vid-d2243aa1-ec46-4e34-a55b-92568459906f',
'id': 'pbs-pub-c8a78645-5d3e-468a-89ec-6f3ed5534bd5$vid-242ddfe9-18f5-4e16-ab45-09b122a19251',
'ext': 'mp4',
'title': 'Tom Waes',
'description': 'Satirisch actualiteitenmagazine met Ella Leyers. Tom Waes is te gast.',
'timestamp': 1673905125,
'release_timestamp': 1673905125,
'series': 'De ideale wereld',
'season_id': '1672830988794',
'episode': 'Aflevering 1',
'episode_number': 1,
'episode_id': '1672830988861',
'display_id': 'de-ideale-wereld-d20230116',
'channel': 'VRT',
'duration': 1939.0,
'thumbnail': 'https://images.vrt.be/orig/2023/01/10/1bb39cb3-9115-11ed-b07d-02b7b76bf47f.jpg',
'release_date': '20230116',
'upload_date': '20230116',
'age_limit': 12,
'channel': 'ketnet',
'description': 'Neem een kijkje in de bijzondere wereld van deze Ketnetters.',
'display_id': 'ket---doc-trailer-s6',
'duration': 30.0,
'episode': 'Reeks 6 volledig vanaf 3 maart',
'episode_id': '1739450401467',
'season': 'Trailer',
'season_id': '1739450401467',
'series': 'Ket & Doc',
'thumbnail': 'https://images.vrt.be/orig/2025/02/21/63f07122-5bbd-4ca1-b42e-8565c6cd95df.jpg',
'timestamp': 1740373200,
'title': 'Reeks 6 volledig vanaf 3 maart',
'upload_date': '20250224',
'_old_archive_ids': [
'canvas pbs-pub-c8a78645-5d3e-468a-89ec-6f3ed5534bd5$vid-242ddfe9-18f5-4e16-ab45-09b122a19251',
'ketnet pbs-pub-c8a78645-5d3e-468a-89ec-6f3ed5534bd5$vid-242ddfe9-18f5-4e16-ab45-09b122a19251',
],
},
}, {
'url': 'https://www.vrt.be/vrtnu/a-z/buurman--wat-doet-u-nu-/6/buurman--wat-doet-u-nu--s6-trailer/',
'url': 'https://www.vrt.be/vrtmax/a-z/meisjes/6/meisjes-s6a5/',
'info_dict': {
'id': 'pbs-pub-ad4050eb-d9e5-48c2-9ec8-b6c355032361$vid-0465537a-34a8-4617-8352-4d8d983b4eee',
'id': 'pbs-pub-97b541ab-e05c-43b9-9a40-445702ef7189$vid-5e306921-a9aa-4fa9-9f39-5b82c8f1028e',
'ext': 'mp4',
'title': 'Trailer seizoen 6 \'Buurman, wat doet u nu?\'',
'description': 'md5:197424726c61384b4e5c519f16c0cf02',
'timestamp': 1652940000,
'release_timestamp': 1652940000,
'series': 'Buurman, wat doet u nu?',
'season': 'Seizoen 6',
'channel': 'ketnet',
'description': 'md5:713793f15cbf677f66200b36b7b1ec5a',
'display_id': 'meisjes-s6a5',
'duration': 1336.02,
'episode': 'Week 5',
'episode_id': '1684157692901',
'episode_number': 5,
'season': '6',
'season_id': '1684157692901',
'season_number': 6,
'season_id': '1652344200907',
'episode': 'Aflevering 0',
'episode_number': 0,
'episode_id': '1652951873524',
'display_id': 'buurman--wat-doet-u-nu--s6-trailer',
'channel': 'VRT',
'duration': 33.13,
'thumbnail': 'https://images.vrt.be/orig/2022/05/23/3c234d21-da83-11ec-b07d-02b7b76bf47f.jpg',
'release_date': '20220519',
'upload_date': '20220519',
'series': 'Meisjes',
'thumbnail': 'https://images.vrt.be/orig/2023/05/14/bf526ae0-f1d9-11ed-91d7-02b7b76bf47f.jpg',
'timestamp': 1685251800,
'title': 'Week 5',
'upload_date': '20230528',
'_old_archive_ids': [
'canvas pbs-pub-97b541ab-e05c-43b9-9a40-445702ef7189$vid-5e306921-a9aa-4fa9-9f39-5b82c8f1028e',
'ketnet pbs-pub-97b541ab-e05c-43b9-9a40-445702ef7189$vid-5e306921-a9aa-4fa9-9f39-5b82c8f1028e',
],
},
}, {
'url': 'https://www.vrt.be/vrtnu/a-z/taboe/3/taboe-s3a4/',
'info_dict': {
'id': 'pbs-pub-f50faa3a-1778-46b6-9117-4ba85f197703$vid-547507fe-1c8b-4394-b361-21e627cbd0fd',
'ext': 'mp4',
'channel': 'een',
'description': 'md5:bf61345a95eca9393a95de4a7a54b5c6',
'display_id': 'taboe-s3a4',
'duration': 2882.02,
'episode': 'Mensen met het syndroom van Gilles de la Tourette',
'episode_id': '1739055911734',
'episode_number': 4,
'season': '3',
'season_id': '1739055911734',
'season_number': 3,
'series': 'Taboe',
'thumbnail': 'https://images.vrt.be/orig/2025/02/19/8198496c-d1ae-4bca-9a48-761cf3ea3ff2.jpg',
'timestamp': 1740286800,
'title': 'Mensen met het syndroom van Gilles de la Tourette',
'upload_date': '20250223',
'_old_archive_ids': [
'canvas pbs-pub-f50faa3a-1778-46b6-9117-4ba85f197703$vid-547507fe-1c8b-4394-b361-21e627cbd0fd',
'ketnet pbs-pub-f50faa3a-1778-46b6-9117-4ba85f197703$vid-547507fe-1c8b-4394-b361-21e627cbd0fd',
],
},
'params': {'skip_download': 'm3u8'},
}]
_NETRC_MACHINE = 'vrtnu'
_authenticated = False
_TOKEN_COOKIE_DOMAIN = '.www.vrt.be'
_ACCESS_TOKEN_COOKIE_NAME = 'vrtnu-site_profile_at'
_REFRESH_TOKEN_COOKIE_NAME = 'vrtnu-site_profile_rt'
_VIDEO_TOKEN_COOKIE_NAME = 'vrtnu-site_profile_vt'
_VIDEO_PAGE_QUERY = '''
query VideoPage($pageId: ID!) {
page(id: $pageId) {
... on EpisodePage {
episode {
ageRaw
description
durationRaw
episodeNumberRaw
id
name
onTimeRaw
program {
title
}
season {
id
titleRaw
}
title
brand
}
ldjson
player {
image {
templateUrl
}
modes {
streamId
}
}
}
}
}
'''
def _fetch_tokens(self):
has_credentials = self._get_login_info()[0]
access_token = self._get_vrt_cookie(self._ACCESS_TOKEN_COOKIE_NAME)
video_token = self._get_vrt_cookie(self._VIDEO_TOKEN_COOKIE_NAME)
if (access_token and not self._is_jwt_token_expired(access_token)
and video_token and not self._is_jwt_token_expired(video_token)):
return access_token, video_token
if has_credentials:
access_token, video_token = self.cache.load(self._NETRC_MACHINE, 'token_data', default=(None, None))
if (access_token and not self._is_jwt_token_expired(access_token)
and video_token and not self._is_jwt_token_expired(video_token)):
self.write_debug('Restored tokens from cache')
self._set_cookie(self._TOKEN_COOKIE_DOMAIN, self._ACCESS_TOKEN_COOKIE_NAME, access_token)
self._set_cookie(self._TOKEN_COOKIE_DOMAIN, self._VIDEO_TOKEN_COOKIE_NAME, video_token)
return access_token, video_token
if not self._get_vrt_cookie(self._REFRESH_TOKEN_COOKIE_NAME):
return None, None
self._request_webpage(
'https://www.vrt.be/vrtmax/sso/refresh', None,
note='Refreshing tokens', errnote='Failed to refresh tokens', fatal=False)
access_token = self._get_vrt_cookie(self._ACCESS_TOKEN_COOKIE_NAME)
video_token = self._get_vrt_cookie(self._VIDEO_TOKEN_COOKIE_NAME)
if not access_token or not video_token:
self.cache.store(self._NETRC_MACHINE, 'refresh_token', None)
self.cookiejar.clear(self._TOKEN_COOKIE_DOMAIN, '/vrtmax/sso', self._REFRESH_TOKEN_COOKIE_NAME)
msg = 'Refreshing of tokens failed'
if not has_credentials:
self.report_warning(msg)
return None, None
self.report_warning(f'{msg}. Re-logging in')
return self._perform_login(*self._get_login_info())
if has_credentials:
self.cache.store(self._NETRC_MACHINE, 'token_data', (access_token, video_token))
return access_token, video_token
def _get_vrt_cookie(self, cookie_name):
# Refresh token cookie is scoped to /vrtmax/sso, others are scoped to /
return try_call(lambda: self._get_cookies('https://www.vrt.be/vrtmax/sso')[cookie_name].value)
@staticmethod
def _is_jwt_token_expired(token):
return jwt_decode_hs256(token)['exp'] - time.time() < 300
def _perform_login(self, username, password):
auth_info = self._gigya_login({
'APIKey': '3_0Z2HujMtiWq_pkAjgnS2Md2E11a1AwZjYiBETtwNE-EoEHDINgtnvcAOpNgmrVGy',
'targetEnv': 'jssdk',
'loginID': username,
'password': password,
'authMode': 'cookie',
})
refresh_token = self._get_vrt_cookie(self._REFRESH_TOKEN_COOKIE_NAME)
if refresh_token and not self._is_jwt_token_expired(refresh_token):
self.write_debug('Using refresh token from logged-in cookies; skipping login with credentials')
return
if auth_info.get('errorDetails'):
raise ExtractorError(f'Unable to login. VrtNU said: {auth_info["errorDetails"]}', expected=True)
refresh_token = self.cache.load(self._NETRC_MACHINE, 'refresh_token', default=None)
if refresh_token and not self._is_jwt_token_expired(refresh_token):
self.write_debug('Restored refresh token from cache')
self._set_cookie(self._TOKEN_COOKIE_DOMAIN, self._REFRESH_TOKEN_COOKIE_NAME, refresh_token, path='/vrtmax/sso')
return
# Sometimes authentication fails for no good reason, retry
for retry in self.RetryManager():
if retry.attempt > 1:
self._sleep(1, None)
try:
self._request_webpage(
'https://token.vrt.be/vrtnuinitlogin', None, note='Requesting XSRF Token',
errnote='Could not get XSRF Token', query={
'provider': 'site',
'destination': 'https://www.vrt.be/vrtnu/',
})
self._request_webpage(
'https://login.vrt.be/perform_login', None,
note='Performing login', errnote='Login failed',
query={'client_id': 'vrtnu-site'}, data=urlencode_postdata({
'UID': auth_info['UID'],
'UIDSignature': auth_info['UIDSignature'],
'signatureTimestamp': auth_info['signatureTimestamp'],
'_csrf': self._get_cookies('https://login.vrt.be').get('OIDCXSRF').value,
}))
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
retry.error = e
continue
raise
self._request_webpage(
'https://www.vrt.be/vrtmax/sso/login', None,
note='Getting session cookies', errnote='Failed to get session cookies')
self._authenticated = True
login_data = self._download_json(
'https://login.vrt.be/perform_login', None, data=json.dumps({
'clientId': 'vrtnu-site',
'loginID': username,
'password': password,
}).encode(), headers={
'Content-Type': 'application/json',
'Oidcxsrf': self._get_cookies('https://login.vrt.be')['OIDCXSRF'].value,
}, note='Logging in', errnote='Login failed', expected_status=403)
if login_data.get('errorCode'):
raise ExtractorError(f'Login failed: {login_data.get("errorMessage")}', expected=True)
self._request_webpage(
login_data['redirectUrl'], None,
note='Getting access token', errnote='Failed to get access token')
access_token = self._get_vrt_cookie(self._ACCESS_TOKEN_COOKIE_NAME)
video_token = self._get_vrt_cookie(self._VIDEO_TOKEN_COOKIE_NAME)
refresh_token = self._get_vrt_cookie(self._REFRESH_TOKEN_COOKIE_NAME)
if not all((access_token, video_token, refresh_token)):
raise ExtractorError('Unable to extract token cookie values')
self.cache.store(self._NETRC_MACHINE, 'token_data', (access_token, video_token))
self.cache.store(self._NETRC_MACHINE, 'refresh_token', refresh_token)
return access_token, video_token
def _real_extract(self, url):
display_id = self._match_id(url)
parsed_url = urllib.parse.urlparse(url)
details = self._download_json(
f'{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path.rstrip("/")}.model.json',
display_id, 'Downloading asset JSON', 'Unable to download asset JSON')['details']
access_token, video_token = self._fetch_tokens()
watch_info = traverse_obj(details, (
'actions', lambda _, v: v['type'] == 'watch-episode', {dict}), get_all=False) or {}
video_id = join_nonempty(
'episodePublicationId', 'episodeVideoId', delim='$', from_dict=watch_info)
if '$' not in video_id:
raise ExtractorError('Unable to extract video ID')
metadata = self._download_json(
f'https://www.vrt.be/vrtnu-api/graphql{"" if access_token else "/public"}/v1',
display_id, 'Downloading asset JSON', 'Unable to download asset JSON',
data=json.dumps({
'operationName': 'VideoPage',
'query': self._VIDEO_PAGE_QUERY,
'variables': {'pageId': urllib.parse.urlparse(url).path},
}).encode(),
headers=filter_dict({
'Authorization': f'Bearer {access_token}' if access_token else None,
'Content-Type': 'application/json',
'x-vrt-client-name': 'WEB',
'x-vrt-client-version': '1.5.9',
'x-vrt-zone': 'default',
}))['data']['page']
vrtnutoken = self._download_json(
'https://token.vrt.be/refreshtoken', video_id, note='Retrieving vrtnutoken',
errnote='Token refresh failed')['vrtnutoken'] if self._authenticated else None
video_id = metadata['player']['modes'][0]['streamId']
video_info = self._call_api(video_id, 'vrtnu-web@PROD', vrtnutoken)
try:
streaming_info = self._call_api(video_id, 'vrtnu-web@PROD', id_token=video_token)
except ExtractorError as e:
if not video_token and isinstance(e.cause, HTTPError) and e.cause.status == 404:
self.raise_login_required()
raise
if 'title' not in video_info:
code = video_info.get('code')
if code in ('AUTHENTICATION_REQUIRED', 'CONTENT_IS_AGE_RESTRICTED'):
self.raise_login_required(code, method='password')
elif code in ('INVALID_LOCATION', 'CONTENT_AVAILABLE_ONLY_IN_BE'):
formats, subtitles = self._extract_formats_and_subtitles(streaming_info, video_id)
code = traverse_obj(streaming_info, ('code', {str}))
if not formats and code:
if code in ('CONTENT_AVAILABLE_ONLY_FOR_BE_RESIDENTS', 'CONTENT_AVAILABLE_ONLY_IN_BE', 'CONTENT_UNAVAILABLE_VIA_PROXY'):
self.raise_geo_restricted(countries=['BE'])
elif code == 'CONTENT_AVAILABLE_ONLY_FOR_BE_RESIDENTS_AND_EXPATS':
if not self._authenticated:
self.raise_login_required(code, method='password')
self.raise_geo_restricted(countries=['BE'])
raise ExtractorError(code, expected=True)
formats, subtitles = self._extract_formats_and_subtitles(video_info, video_id)
elif code in ('CONTENT_AVAILABLE_ONLY_FOR_BE_RESIDENTS_AND_EXPATS', 'CONTENT_IS_AGE_RESTRICTED', 'CONTENT_REQUIRES_AUTHENTICATION'):
self.raise_login_required()
else:
self.raise_no_formats(f'Unable to extract formats: {code}')
return {
**traverse_obj(details, {
'title': 'title',
'description': ('description', {clean_html}),
'timestamp': ('data', 'episode', 'onTime', 'raw', {parse_iso8601}),
'release_timestamp': ('data', 'episode', 'onTime', 'raw', {parse_iso8601}),
'series': ('data', 'program', 'title'),
'season': ('data', 'season', 'title', 'value'),
'season_number': ('data', 'season', 'title', 'raw', {int_or_none}),
'season_id': ('data', 'season', 'id', {str_or_none}),
'episode': ('data', 'episode', 'number', 'value', {str_or_none}),
'episode_number': ('data', 'episode', 'number', 'raw', {int_or_none}),
'episode_id': ('data', 'episode', 'id', {str_or_none}),
'age_limit': ('data', 'episode', 'age', 'raw', {parse_age_limit}),
}),
'duration': float_or_none(streaming_info.get('duration'), 1000),
'thumbnail': url_or_none(streaming_info.get('posterImageUrl')),
**self._json_ld(traverse_obj(metadata, ('ldjson', ..., {json.loads})), video_id, fatal=False),
**traverse_obj(metadata, ('episode', {
'title': ('title', {str}),
'description': ('description', {str}),
'timestamp': ('onTimeRaw', {parse_iso8601}),
'series': ('program', 'title', {str}),
'season': ('season', 'titleRaw', {str}),
'season_number': ('season', 'titleRaw', {int_or_none}),
'season_id': ('id', {str_or_none}),
'episode': ('title', {str}),
'episode_number': ('episodeNumberRaw', {int_or_none}),
'episode_id': ('id', {str_or_none}),
'age_limit': ('ageRaw', {parse_age_limit}),
'channel': ('brand', {str}),
'duration': ('durationRaw', {parse_duration}),
})),
'id': video_id,
'display_id': display_id,
'channel': 'VRT',
'formats': formats,
'duration': float_or_none(video_info.get('duration'), 1000),
'thumbnail': url_or_none(video_info.get('posterImageUrl')),
'subtitles': subtitles,
'_old_archive_ids': [make_archive_id('Canvas', video_id)],
}
class KetnetIE(VRTBaseIE):
_VALID_URL = r'https?://(?:www\.)?ketnet\.be/(?P<id>(?:[^/]+/)*[^/?#&]+)'
_TESTS = [{
'url': 'https://www.ketnet.be/kijken/m/meisjes/6/meisjes-s6a5',
'info_dict': {
'id': 'pbs-pub-39f8351c-a0a0-43e6-8394-205d597d6162$vid-5e306921-a9aa-4fa9-9f39-5b82c8f1028e',
'ext': 'mp4',
'title': 'Meisjes',
'episode': 'Reeks 6: Week 5',
'season': 'Reeks 6',
'series': 'Meisjes',
'timestamp': 1685251800,
'upload_date': '20230528',
},
'params': {'skip_download': 'm3u8'},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
video = self._download_json(
'https://senior-bff.ketnet.be/graphql', display_id, query={
'query': '''{
video(id: "content/ketnet/nl/%s.model.json") {
description
episodeNr
imageUrl
mediaReference
programTitle
publicationDate
seasonTitle
subtitleVideodetail
titleVideodetail
}
}''' % display_id, # noqa: UP031
})['data']['video']
video_id = urllib.parse.unquote(video['mediaReference'])
data = self._call_api(video_id, 'ketnet@PROD', version='v1')
formats, subtitles = self._extract_formats_and_subtitles(data, video_id)
return {
'id': video_id,
'formats': formats,
'subtitles': subtitles,
'_old_archive_ids': [make_archive_id('Canvas', video_id)],
**traverse_obj(video, {
'title': ('titleVideodetail', {str}),
'description': ('description', {str}),
'thumbnail': ('thumbnail', {url_or_none}),
'timestamp': ('publicationDate', {parse_iso8601}),
'series': ('programTitle', {str}),
'season': ('seasonTitle', {str}),
'episode': ('subtitleVideodetail', {str}),
'episode_number': ('episodeNr', {int_or_none}),
}),
'_old_archive_ids': [make_archive_id('Canvas', video_id),
make_archive_id('Ketnet', video_id)],
}

View File

@@ -109,7 +109,7 @@ class WeiboBaseIE(InfoExtractor):
**traverse_obj(video_info, {
'display_id': ('mblogid', {str_or_none}),
'title': ('page_info', 'media_info', ('video_title', 'kol_title', 'name'),
{lambda x: x.replace('\n', ' ')}, {truncate_string(left=50)}, filter),
{lambda x: x.replace('\n', ' ')}, {truncate_string(left=72)}, filter),
'alt_title': ('page_info', 'media_info', ('video_title', 'kol_title', 'name'), {str}, filter),
'description': ('text_raw', {str}),
'duration': ('page_info', 'media_info', 'duration', {int_or_none}),
@@ -213,6 +213,7 @@ class WeiboVideoIE(WeiboBaseIE):
'ext': 'mp4',
'display_id': 'LEZDodaiW',
'title': '稍微了解了一下靡烟miya感觉这东西也太二了',
'alt_title': '稍微了解了一下靡烟miya感觉这东西也太二了',
'description': '稍微了解了一下靡烟miya感觉这东西也太二了 http://t.cn/A6aerGsM \u200b\u200b\u200b',
'duration': 76,
'timestamp': 1659344278,
@@ -224,6 +225,7 @@ class WeiboVideoIE(WeiboBaseIE):
'view_count': int,
'like_count': int,
'repost_count': int,
'_old_archive_ids': ['weibomobile 4797700463137878'],
},
}]

View File

@@ -11,7 +11,7 @@ from ..utils import (
)
class WykopBaseExtractor(InfoExtractor):
class WykopBaseIE(InfoExtractor):
def _get_token(self, force_refresh=False):
if not force_refresh:
maybe_cached = self.cache.load('wykop', 'bearer')
@@ -72,7 +72,7 @@ class WykopBaseExtractor(InfoExtractor):
}
class WykopDigIE(WykopBaseExtractor):
class WykopDigIE(WykopBaseIE):
IE_NAME = 'wykop:dig'
_VALID_URL = r'https?://(?:www\.)?wykop\.pl/link/(?P<id>\d+)'
@@ -128,7 +128,7 @@ class WykopDigIE(WykopBaseExtractor):
}
class WykopDigCommentIE(WykopBaseExtractor):
class WykopDigCommentIE(WykopBaseIE):
IE_NAME = 'wykop:dig:comment'
_VALID_URL = r'https?://(?:www\.)?wykop\.pl/link/(?P<dig_id>\d+)/[^/]+/komentarz/(?P<id>\d+)'
@@ -177,7 +177,7 @@ class WykopDigCommentIE(WykopBaseExtractor):
}
class WykopPostIE(WykopBaseExtractor):
class WykopPostIE(WykopBaseIE):
IE_NAME = 'wykop:post'
_VALID_URL = r'https?://(?:www\.)?wykop\.pl/wpis/(?P<id>\d+)'
@@ -228,7 +228,7 @@ class WykopPostIE(WykopBaseExtractor):
}
class WykopPostCommentIE(WykopBaseExtractor):
class WykopPostCommentIE(WykopBaseIE):
IE_NAME = 'wykop:post:comment'
_VALID_URL = r'https?://(?:www\.)?wykop\.pl/wpis/(?P<post_id>\d+)/[^/#]+#(?P<id>\d+)'

View File

@@ -2,15 +2,17 @@ import itertools
from .common import InfoExtractor
from ..utils import (
bug_reports_message,
determine_ext,
extract_attributes,
int_or_none,
lowercase_escape,
parse_qs,
traverse_obj,
qualities,
try_get,
update_url_query,
url_or_none,
)
from ..utils.traversal import traverse_obj
class YandexVideoIE(InfoExtractor):
@@ -186,7 +188,22 @@ class YandexVideoPreviewIE(InfoExtractor):
return self.url_result(data_json['video']['url'])
class ZenYandexIE(InfoExtractor):
class ZenYandexBaseIE(InfoExtractor):
def _fetch_ssr_data(self, url, video_id):
webpage = self._download_webpage(url, video_id)
redirect = self._search_json(
r'(?:var|let|const)\s+it\s*=', webpage, 'redirect', video_id, default={}).get('retpath')
if redirect:
video_id = self._match_id(redirect)
webpage = self._download_webpage(redirect, video_id, note='Redirecting')
return video_id, self._search_json(
r'(?:var|let|const)\s+_params\s*=\s*\(', webpage, 'metadata', video_id,
contains_pattern=r'{["\']ssrData.+}')['ssrData']
class ZenYandexIE(ZenYandexBaseIE):
IE_NAME = 'dzen.ru'
IE_DESC = 'Дзен (dzen) formerly Яндекс.Дзен (Yandex Zen)'
_VALID_URL = r'https?://(zen\.yandex|dzen)\.ru(?:/video)?/(media|watch)/(?:(?:id/[^/]+/|[^/]+/)(?:[a-z0-9-]+)-)?(?P<id>[a-z0-9-]+)'
_TESTS = [{
'url': 'https://zen.yandex.ru/media/id/606fd806cc13cb3c58c05cf5/vot-eto-focus-dedy-morozy-na-gidrociklah-60c7c443da18892ebfe85ed7',
@@ -216,6 +233,7 @@ class ZenYandexIE(InfoExtractor):
'timestamp': 1573465585,
},
'params': {'skip_download': 'm3u8'},
'skip': 'The page does not exist',
}, {
'url': 'https://zen.yandex.ru/video/watch/6002240ff8b1af50bb2da5e3',
'info_dict': {
@@ -227,6 +245,9 @@ class ZenYandexIE(InfoExtractor):
'uploader': 'TechInsider',
'timestamp': 1611378221,
'upload_date': '20210123',
'view_count': int,
'duration': 243,
'tags': ['опыт', 'эксперимент', 'огонь'],
},
'params': {'skip_download': 'm3u8'},
}, {
@@ -240,6 +261,9 @@ class ZenYandexIE(InfoExtractor):
'uploader': 'TechInsider',
'upload_date': '20210123',
'timestamp': 1611378221,
'view_count': int,
'duration': 243,
'tags': ['опыт', 'эксперимент', 'огонь'],
},
'params': {'skip_download': 'm3u8'},
}, {
@@ -252,44 +276,56 @@ class ZenYandexIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
redirect = self._search_json(r'var it\s*=', webpage, 'redirect', id, default={}).get('retpath')
if redirect:
video_id = self._match_id(redirect)
webpage = self._download_webpage(redirect, video_id, note='Redirecting')
data_json = self._search_json(
r'("data"\s*:|data\s*=)', webpage, 'metadata', video_id, contains_pattern=r'{["\']_*serverState_*video.+}')
serverstate = self._search_regex(r'(_+serverState_+video-site_[^_]+_+)', webpage, 'server state')
uploader = self._search_regex(r'(<a\s*class=["\']card-channel-link[^"\']+["\'][^>]+>)',
webpage, 'uploader', default='<a>')
uploader_name = extract_attributes(uploader).get('aria-label')
item_id = traverse_obj(data_json, (serverstate, 'videoViewer', 'openedItemId', {str}))
video_json = traverse_obj(data_json, (serverstate, 'videoViewer', 'items', item_id, {dict})) or {}
video_id, ssr_data = self._fetch_ssr_data(url, video_id)
video_data = ssr_data['videoMetaResponse']
formats, subtitles = [], {}
for s_url in traverse_obj(video_json, ('video', 'streams', ..., {url_or_none})):
quality = qualities(('4', '0', '1', '2', '3', '5', '6', '7'))
# Deduplicate stream URLs. The "dzen_dash" query parameter is present in some URLs but can be omitted
stream_urls = set(traverse_obj(video_data, (
'video', ('id', ('streams', ...), ('mp4Streams', ..., 'url'), ('oneVideoStreams', ..., 'url')),
{url_or_none}, {update_url_query(query={'dzen_dash': []})})))
for s_url in stream_urls:
ext = determine_ext(s_url)
if ext == 'mpd':
fmts, subs = self._extract_mpd_formats_and_subtitles(s_url, video_id, mpd_id='dash')
elif ext == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(s_url, video_id, 'mp4')
content_type = traverse_obj(parse_qs(s_url), ('ct', 0))
if ext == 'mpd' or content_type == '6':
fmts, subs = self._extract_mpd_formats_and_subtitles(s_url, video_id, mpd_id='dash', fatal=False)
elif ext == 'm3u8' or content_type == '8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(s_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
elif content_type == '0':
format_type = traverse_obj(parse_qs(s_url), ('type', 0))
formats.append({
'url': s_url,
'format_id': format_type,
'ext': 'mp4',
'quality': quality(format_type),
})
continue
else:
self.report_warning(f'Unsupported stream URL: {s_url}{bug_reports_message()}')
continue
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
self._merge_subtitles(subs, target=subtitles)
return {
'id': video_id,
'title': video_json.get('title') or self._og_search_title(webpage),
'formats': formats,
'subtitles': subtitles,
'duration': int_or_none(video_json.get('duration')),
'view_count': int_or_none(video_json.get('views')),
'timestamp': int_or_none(video_json.get('publicationDate')),
'uploader': uploader_name or data_json.get('authorName') or try_get(data_json, lambda x: x['publisher']['name']),
'description': video_json.get('description') or self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage) or try_get(data_json, lambda x: x['og']['imageUrl']),
**traverse_obj(video_data, {
'title': ('title', {str}),
'description': ('description', {str}),
'thumbnail': ('image', {url_or_none}),
'duration': ('video', 'duration', {int_or_none}),
'view_count': ('video', 'views', {int_or_none}),
'timestamp': ('publicationDate', {int_or_none}),
'tags': ('tags', ..., {str}),
'uploader': ('source', 'title', {str}),
}),
}
class ZenYandexChannelIE(InfoExtractor):
class ZenYandexChannelIE(ZenYandexBaseIE):
IE_NAME = 'dzen.ru:channel'
_VALID_URL = r'https?://(zen\.yandex|dzen)\.ru/(?!media|video)(?:id/)?(?P<id>[a-z0-9-_]+)'
_TESTS = [{
'url': 'https://zen.yandex.ru/tok_media',
@@ -323,8 +359,8 @@ class ZenYandexChannelIE(InfoExtractor):
'url': 'https://zen.yandex.ru/jony_me',
'info_dict': {
'id': 'jony_me',
'description': 'md5:ce0a5cad2752ab58701b5497835b2cc5',
'title': 'JONY ',
'description': 'md5:7c30d11dc005faba8826feae99da3113',
'title': 'JONY',
},
'playlist_count': 18,
}, {
@@ -333,9 +369,8 @@ class ZenYandexChannelIE(InfoExtractor):
'url': 'https://zen.yandex.ru/tatyanareva',
'info_dict': {
'id': 'tatyanareva',
'description': 'md5:40a1e51f174369ec3ba9d657734ac31f',
'description': 'md5:92e56fa730a932ca2483ba5c2186ad96',
'title': 'Татьяна Рева',
'entries': 'maxcount:200',
},
'playlist_mincount': 46,
}, {
@@ -348,43 +383,31 @@ class ZenYandexChannelIE(InfoExtractor):
'playlist_mincount': 657,
}]
def _entries(self, item_id, server_state_json, server_settings_json):
items = (traverse_obj(server_state_json, ('feed', 'items', ...))
or traverse_obj(server_settings_json, ('exportData', 'items', ...)))
more = (traverse_obj(server_state_json, ('links', 'more'))
or traverse_obj(server_settings_json, ('exportData', 'more', 'link')))
def _entries(self, feed_data, channel_id):
next_page_id = None
for page in itertools.count(1):
for item in items or []:
if item.get('type') != 'gif':
continue
video_id = traverse_obj(item, 'publication_id', 'publicationId') or ''
yield self.url_result(item['link'], ZenYandexIE, video_id.split(':')[-1])
for item in traverse_obj(feed_data, (
(None, ('items', lambda _, v: v['tab'] in ('shorts', 'longs'))),
'items', lambda _, v: url_or_none(v['link']),
)):
yield self.url_result(item['link'], ZenYandexIE, item.get('id'), title=item.get('title'))
more = traverse_obj(feed_data, ('more', 'link', {url_or_none}))
current_page_id = next_page_id
next_page_id = traverse_obj(parse_qs(more), ('next_page_id', -1))
if not all((more, items, next_page_id, next_page_id != current_page_id)):
if not all((more, next_page_id, next_page_id != current_page_id)):
break
data = self._download_json(more, item_id, note=f'Downloading Page {page}')
items, more = data.get('items'), traverse_obj(data, ('more', 'link'))
feed_data = self._download_json(more, channel_id, note=f'Downloading Page {page}')
def _real_extract(self, url):
item_id = self._match_id(url)
webpage = self._download_webpage(url, item_id)
redirect = self._search_json(
r'var it\s*=', webpage, 'redirect', item_id, default={}).get('retpath')
if redirect:
item_id = self._match_id(redirect)
webpage = self._download_webpage(redirect, item_id, note='Redirecting')
data = self._search_json(
r'("data"\s*:|data\s*=)', webpage, 'channel data', item_id, contains_pattern=r'{\"__serverState__.+}')
server_state_json = traverse_obj(data, lambda k, _: k.startswith('__serverState__'), get_all=False)
server_settings_json = traverse_obj(data, lambda k, _: k.startswith('__serverSettings__'), get_all=False)
channel_id = self._match_id(url)
channel_id, ssr_data = self._fetch_ssr_data(url, channel_id)
channel_data = ssr_data['exportResponse']
return self.playlist_result(
self._entries(item_id, server_state_json, server_settings_json),
item_id, traverse_obj(server_state_json, ('channel', 'source', 'title')),
traverse_obj(server_state_json, ('channel', 'source', 'description')))
self._entries(channel_data['feedData'], channel_id),
channel_id, **traverse_obj(channel_data, ('channel', 'source', {
'title': ('title', {str}),
'description': ('description', {str}),
})))

View File

@@ -227,7 +227,7 @@ class YouPornIE(InfoExtractor):
return result
class YouPornListBase(InfoExtractor):
class YouPornListBaseIE(InfoExtractor):
def _get_next_url(self, url, pl_id, html):
return urljoin(url, self._search_regex(
r'''<a [^>]*?\bhref\s*=\s*("|')(?P<url>(?:(?!\1)[^>])+)\1''',
@@ -284,7 +284,7 @@ class YouPornListBase(InfoExtractor):
playlist_id=pl_id, playlist_title=title)
class YouPornCategoryIE(YouPornListBase):
class YouPornCategoryIE(YouPornListBaseIE):
IE_DESC = 'YouPorn category, with sorting, filtering and pagination'
_VALID_URL = r'''(?x)
https?://(?:www\.)?youporn\.com/
@@ -319,7 +319,7 @@ class YouPornCategoryIE(YouPornListBase):
}]
class YouPornChannelIE(YouPornListBase):
class YouPornChannelIE(YouPornListBaseIE):
IE_DESC = 'YouPorn channel, with sorting and pagination'
_VALID_URL = r'''(?x)
https?://(?:www\.)?youporn\.com/
@@ -349,7 +349,7 @@ class YouPornChannelIE(YouPornListBase):
return re.sub(r'_', ' ', title_slug).title()
class YouPornCollectionIE(YouPornListBase):
class YouPornCollectionIE(YouPornListBaseIE):
IE_DESC = 'YouPorn collection (user playlist), with sorting and pagination'
_VALID_URL = r'''(?x)
https?://(?:www\.)?youporn\.com/
@@ -394,7 +394,7 @@ class YouPornCollectionIE(YouPornListBase):
return playlist
class YouPornTagIE(YouPornListBase):
class YouPornTagIE(YouPornListBaseIE):
IE_DESC = 'YouPorn tag (porntags), with sorting, filtering and pagination'
_VALID_URL = r'''(?x)
https?://(?:www\.)?youporn\.com/
@@ -442,7 +442,7 @@ class YouPornTagIE(YouPornListBase):
return super()._real_extract(url)
class YouPornStarIE(YouPornListBase):
class YouPornStarIE(YouPornListBaseIE):
IE_DESC = 'YouPorn Pornstar, with description, sorting and pagination'
_VALID_URL = r'''(?x)
https?://(?:www\.)?youporn\.com/
@@ -493,7 +493,7 @@ class YouPornStarIE(YouPornListBase):
}
class YouPornVideosIE(YouPornListBase):
class YouPornVideosIE(YouPornListBaseIE):
IE_DESC = 'YouPorn video (browse) playlists, with sorting, filtering and pagination'
_VALID_URL = r'''(?x)
https?://(?:www\.)?youporn\.com/

View File

@@ -0,0 +1,50 @@
# flake8: noqa: F401
from ._base import YoutubeBaseInfoExtractor
from ._clip import YoutubeClipIE
from ._mistakes import YoutubeTruncatedIDIE, YoutubeTruncatedURLIE
from ._notifications import YoutubeNotificationsIE
from ._redirect import (
YoutubeConsentRedirectIE,
YoutubeFavouritesIE,
YoutubeFeedsInfoExtractor,
YoutubeHistoryIE,
YoutubeLivestreamEmbedIE,
YoutubeRecommendedIE,
YoutubeShortsAudioPivotIE,
YoutubeSubscriptionsIE,
YoutubeWatchLaterIE,
YoutubeYtBeIE,
YoutubeYtUserIE,
)
from ._search import YoutubeMusicSearchURLIE, YoutubeSearchDateIE, YoutubeSearchIE, YoutubeSearchURLIE
from ._tab import YoutubePlaylistIE, YoutubeTabBaseInfoExtractor, YoutubeTabIE
from ._video import YoutubeIE
# Hack to allow plugin overrides work
for _cls in [
YoutubeBaseInfoExtractor,
YoutubeClipIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeNotificationsIE,
YoutubeConsentRedirectIE,
YoutubeFavouritesIE,
YoutubeFeedsInfoExtractor,
YoutubeHistoryIE,
YoutubeLivestreamEmbedIE,
YoutubeRecommendedIE,
YoutubeShortsAudioPivotIE,
YoutubeSubscriptionsIE,
YoutubeWatchLaterIE,
YoutubeYtBeIE,
YoutubeYtUserIE,
YoutubeMusicSearchURLIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubePlaylistIE,
YoutubeTabBaseInfoExtractor,
YoutubeTabIE,
YoutubeIE,
]:
_cls.__module__ = 'yt_dlp.extractor.youtube'

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,66 @@
from ._tab import YoutubeTabBaseInfoExtractor
from ._video import YoutubeIE
from ...utils import ExtractorError, traverse_obj
class YoutubeClipIE(YoutubeTabBaseInfoExtractor):
IE_NAME = 'youtube:clip'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/clip/(?P<id>[^/?#]+)'
_TESTS = [{
# FIXME: Other metadata should be extracted from the clip, not from the base video
'url': 'https://www.youtube.com/clip/UgytZKpehg-hEMBSn3F4AaABCQ',
'info_dict': {
'id': 'UgytZKpehg-hEMBSn3F4AaABCQ',
'ext': 'mp4',
'section_start': 29.0,
'section_end': 39.7,
'duration': 10.7,
'age_limit': 0,
'availability': 'public',
'categories': ['Gaming'],
'channel': 'Scott The Woz',
'channel_id': 'UC4rqhyiTs7XyuODcECvuiiQ',
'channel_url': 'https://www.youtube.com/channel/UC4rqhyiTs7XyuODcECvuiiQ',
'description': 'md5:7a4517a17ea9b4bd98996399d8bb36e7',
'like_count': int,
'playable_in_embed': True,
'tags': 'count:17',
'thumbnail': 'https://i.ytimg.com/vi_webp/ScPX26pdQik/maxresdefault.webp',
'title': 'Mobile Games on Console - Scott The Woz',
'upload_date': '20210920',
'uploader': 'Scott The Woz',
'uploader_id': '@ScottTheWoz',
'uploader_url': 'https://www.youtube.com/@ScottTheWoz',
'view_count': int,
'live_status': 'not_live',
'channel_follower_count': int,
'chapters': 'count:20',
'comment_count': int,
'heatmap': 'count:100',
},
}]
def _real_extract(self, url):
clip_id = self._match_id(url)
_, data = self._extract_webpage(url, clip_id)
video_id = traverse_obj(data, ('currentVideoEndpoint', 'watchEndpoint', 'videoId'))
if not video_id:
raise ExtractorError('Unable to find video ID')
clip_data = traverse_obj(data, (
'engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'clipSectionRenderer',
'contents', ..., 'clipAttributionRenderer', 'onScrubExit', 'commandExecutorCommand', 'commands', ...,
'openPopupAction', 'popup', 'notificationActionRenderer', 'actionButton', 'buttonRenderer', 'command',
'commandExecutorCommand', 'commands', ..., 'loopCommand'), get_all=False)
return {
'_type': 'url_transparent',
'url': f'https://www.youtube.com/watch?v={video_id}',
'ie_key': YoutubeIE.ie_key(),
'id': clip_id,
'section_start': int(clip_data['startTimeMs']) / 1000,
'section_end': int(clip_data['endTimeMs']) / 1000,
'_format_sort_fields': ( # https protocol is prioritized for ffmpeg compatibility
'proto:https', 'quality', 'res', 'fps', 'hdr:12', 'source', 'vcodec', 'channels', 'acodec', 'lang'),
}

View File

@@ -0,0 +1,69 @@
from ._base import YoutubeBaseInfoExtractor
from ...utils import ExtractorError
class YoutubeTruncatedURLIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like yt-dlp '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply yt-dlp BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
f'Incomplete YouTube ID {video_id}. URL {url} looks truncated.',
expected=True)

View File

@@ -0,0 +1,98 @@
import itertools
import re
from ._tab import YoutubeTabBaseInfoExtractor, YoutubeTabIE
from ._video import YoutubeIE
from ...utils import traverse_obj
class YoutubeNotificationsIE(YoutubeTabBaseInfoExtractor):
IE_NAME = 'youtube:notif'
IE_DESC = 'YouTube notifications; ":ytnotif" keyword (requires cookies)'
_VALID_URL = r':ytnotif(?:ication)?s?'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': ':ytnotif',
'only_matching': True,
}, {
'url': ':ytnotifications',
'only_matching': True,
}]
def _extract_notification_menu(self, response, continuation_list):
notification_list = traverse_obj(
response,
('actions', 0, 'openPopupAction', 'popup', 'multiPageMenuRenderer', 'sections', 0, 'multiPageMenuNotificationSectionRenderer', 'items'),
('actions', 0, 'appendContinuationItemsAction', 'continuationItems'),
expected_type=list) or []
continuation_list[0] = None
for item in notification_list:
entry = self._extract_notification_renderer(item.get('notificationRenderer'))
if entry:
yield entry
continuation = item.get('continuationItemRenderer')
if continuation:
continuation_list[0] = continuation
def _extract_notification_renderer(self, notification):
video_id = traverse_obj(
notification, ('navigationEndpoint', 'watchEndpoint', 'videoId'), expected_type=str)
url = f'https://www.youtube.com/watch?v={video_id}'
channel_id = None
if not video_id:
browse_ep = traverse_obj(
notification, ('navigationEndpoint', 'browseEndpoint'), expected_type=dict)
channel_id = self.ucid_or_none(traverse_obj(browse_ep, 'browseId', expected_type=str))
post_id = self._search_regex(
r'/post/(.+)', traverse_obj(browse_ep, 'canonicalBaseUrl', expected_type=str),
'post id', default=None)
if not channel_id or not post_id:
return
# The direct /post url redirects to this in the browser
url = f'https://www.youtube.com/channel/{channel_id}/community?lb={post_id}'
channel = traverse_obj(
notification, ('contextualMenu', 'menuRenderer', 'items', 1, 'menuServiceItemRenderer', 'text', 'runs', 1, 'text'),
expected_type=str)
notification_title = self._get_text(notification, 'shortMessage')
if notification_title:
notification_title = notification_title.replace('\xad', '') # remove soft hyphens
# TODO: handle recommended videos
title = self._search_regex(
rf'{re.escape(channel or "")}[^:]+: (.+)', notification_title,
'video title', default=None)
timestamp = (self._parse_time_text(self._get_text(notification, 'sentTimeText'))
if self._configuration_arg('approximate_date', ie_key=YoutubeTabIE)
else None)
return {
'_type': 'url',
'url': url,
'ie_key': (YoutubeIE if video_id else YoutubeTabIE).ie_key(),
'video_id': video_id,
'title': title,
'channel_id': channel_id,
'channel': channel,
'uploader': channel,
'thumbnails': self._extract_thumbnails(notification, 'videoThumbnail'),
'timestamp': timestamp,
}
def _notification_menu_entries(self, ytcfg):
continuation_list = [None]
response = None
for page in itertools.count(1):
ctoken = traverse_obj(
continuation_list, (0, 'continuationEndpoint', 'getNotificationMenuEndpoint', 'ctoken'), expected_type=str)
response = self._extract_response(
item_id=f'page {page}', query={'ctoken': ctoken} if ctoken else {}, ytcfg=ytcfg,
ep='notification/get_notification_menu', check_get_keys='actions',
headers=self.generate_api_headers(ytcfg=ytcfg, visitor_data=self._extract_visitor_data(response)))
yield from self._extract_notification_menu(response, continuation_list)
if not continuation_list[0]:
break
def _real_extract(self, url):
display_id = 'notifications'
ytcfg = self._download_ytcfg('web', display_id) if not self.skip_webpage else {}
self._report_playlist_authcheck(ytcfg)
return self.playlist_result(self._notification_menu_entries(ytcfg), display_id, display_id)

View File

@@ -0,0 +1,247 @@
import base64
import urllib.parse
from ._base import YoutubeBaseInfoExtractor
from ._tab import YoutubeTabIE
from ...utils import ExtractorError, classproperty, parse_qs, update_url_query, url_or_none
class YoutubeYtBeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'youtu.be'
_VALID_URL = rf'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{{11}})/*?.*?\blist=(?P<playlist_id>{YoutubeBaseInfoExtractor._PLAYLIST_ID_RE})'
_TESTS = [{
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': '@backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/@backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'age_limit': 0,
'playable_in_embed': True,
'thumbnail': r're:^https?://.*\.webp',
'channel': 'Backus-Page House Museum',
'channel_id': 'UCEfMCQ9bs3tjvjy1s451zaw',
'live_status': 'not_live',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCEfMCQ9bs3tjvjy1s451zaw',
'availability': 'public',
'duration': 59,
'comment_count': int,
'channel_follower_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
playlist_id = mobj.group('playlist_id')
return self.url_result(
update_url_query('https://www.youtube.com/watch', {
'v': video_id,
'list': playlist_id,
'feature': 'youtu.be',
}), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeLivestreamEmbedIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube livestream embeds'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/embed/live_stream/?\?(?:[^#]+&)?channel=(?P<id>[^&#]+)'
_TESTS = [{
'url': 'https://www.youtube.com/embed/live_stream?channel=UC2_KI6RB__jGdlnK6dvFEZA',
'only_matching': True,
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
return self.url_result(
f'https://www.youtube.com/channel/{channel_id}/live',
ie=YoutubeTabIE.ie_key(), video_id=channel_id)
class YoutubeYtUserIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube user videos; "ytuser:" prefix'
IE_NAME = 'youtube:user'
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(f'https://www.youtube.com/user/{user_id}', YoutubeTabIE, user_id)
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube liked videos; ":ytfav" keyword (requires cookies)'
_VALID_URL = r':ytfav(?:ou?rite)?s?'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': ':ytfav',
'only_matching': True,
}, {
'url': ':ytfavorites',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=LL',
ie=YoutubeTabIE.ie_key())
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must re-define the _FEED_NAME property.
"""
_LOGIN_REQUIRED = True
_FEED_NAME = 'feeds'
@classproperty
def IE_NAME(cls):
return f'youtube:{cls._FEED_NAME}'
def _real_extract(self, url):
return self.url_result(
f'https://www.youtube.com/feed/{self._FEED_NAME}', ie=YoutubeTabIE.ie_key())
class YoutubeWatchLaterIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
_VALID_URL = r':ytwatchlater'
_TESTS = [{
'url': ':ytwatchlater',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube recommended videos; ":ytrec" keyword'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_LOGIN_REQUIRED = False
_TESTS = [{
'url': ':ytrec',
'only_matching': True,
}, {
'url': ':ytrecommended',
'only_matching': True,
}, {
'url': 'https://youtube.com',
'only_matching': True,
}]
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)'
_VALID_URL = r':ytsub(?:scription)?s?'
_FEED_NAME = 'subscriptions'
_TESTS = [{
'url': ':ytsubs',
'only_matching': True,
}, {
'url': ':ytsubscriptions',
'only_matching': True,
}]
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history; ":ythis" keyword (requires cookies)'
_VALID_URL = r':ythis(?:tory)?'
_FEED_NAME = 'history'
_TESTS = [{
'url': ':ythistory',
'only_matching': True,
}]
class YoutubeShortsAudioPivotIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube Shorts audio pivot (Shorts using audio of a given video)'
IE_NAME = 'youtube:shorts:pivot:audio'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/source/(?P<id>[\w-]{11})/shorts'
_TESTS = [{
'url': 'https://www.youtube.com/source/Lyj-MZSAA9o/shorts',
'only_matching': True,
}]
@staticmethod
def _generate_audio_pivot_params(video_id):
"""
Generates sfv_audio_pivot browse params for this video id
"""
pb_params = b'\xf2\x05+\n)\x12\'\n\x0b%b\x12\x0b%b\x1a\x0b%b' % ((video_id.encode(),) * 3)
return urllib.parse.quote(base64.b64encode(pb_params).decode())
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(
f'https://www.youtube.com/feed/sfv_audio_pivot?bp={self._generate_audio_pivot_params(video_id)}',
ie=YoutubeTabIE)
class YoutubeConsentRedirectIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:consent'
IE_DESC = False # Do not list
_VALID_URL = r'https?://consent\.youtube\.com/m\?'
_TESTS = [{
'url': 'https://consent.youtube.com/m?continue=https%3A%2F%2Fwww.youtube.com%2Flive%2FqVv6vCqciTM%3Fcbrd%3D1&gl=NL&m=0&pc=yt&hl=en&src=1',
'info_dict': {
'id': 'qVv6vCqciTM',
'ext': 'mp4',
'age_limit': 0,
'uploader_id': '@sana_natori',
'comment_count': int,
'chapters': 'count:13',
'upload_date': '20221223',
'thumbnail': 'https://i.ytimg.com/vi/qVv6vCqciTM/maxresdefault.jpg',
'channel_url': 'https://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
'uploader_url': 'https://www.youtube.com/@sana_natori',
'like_count': int,
'release_date': '20221223',
'tags': ['Vtuber', '月ノ美兎', '名取さな', 'にじさんじ', 'クリスマス', '3D配信'],
'title': '【 #インターネット女クリスマス 】3Dで歌ってはしゃぐインターネットの女たち【月美兎/名取さな】',
'view_count': int,
'playable_in_embed': True,
'duration': 4438,
'availability': 'public',
'channel_follower_count': int,
'channel_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
'categories': ['Entertainment'],
'live_status': 'was_live',
'release_timestamp': 1671793345,
'channel': 'さなちゃんねる',
'description': 'md5:6aebf95cc4a1d731aebc01ad6cc9806d',
'uploader': 'さなちゃんねる',
'channel_is_verified': True,
'heatmap': 'count:100',
},
'add_ie': ['Youtube'],
'params': {'skip_download': 'Youtube'},
}]
def _real_extract(self, url):
redirect_url = url_or_none(parse_qs(url).get('continue', [None])[-1])
if not redirect_url:
raise ExtractorError('Invalid cookie consent redirect URL', expected=True)
return self.url_result(redirect_url)

View File

@@ -0,0 +1,167 @@
import urllib.parse
from ._tab import YoutubeTabBaseInfoExtractor
from ..common import SearchInfoExtractor
from ...utils import join_nonempty, parse_qs
class YoutubeSearchIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_DESC = 'YouTube search'
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = 'EgIQAfABAQ==' # Videos only
_TESTS = [{
'url': 'ytsearch5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
},
}, {
'note': 'Suicide/self-harm search warning',
'url': 'ytsearch1:i hate myself and i wanna die',
'playlist_count': 1,
'info_dict': {
'id': 'i hate myself and i wanna die',
'title': 'i hate myself and i wanna die',
},
}]
class YoutubeSearchDateIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube search, newest videos first'
_SEARCH_PARAMS = 'CAISAhAB8AEB' # Videos only, sorted by date
_TESTS = [{
'url': 'ytsearchdate5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
},
}]
class YoutubeSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube search URLs with sorting and filter support'
IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:results|search)\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
},
}, {
'url': 'https://www.youtube.com/results?search_query=python&sp=EgIQAg%253D%253D',
'playlist_mincount': 5,
'info_dict': {
'id': 'python',
'title': 'python',
},
}, {
'url': 'https://www.youtube.com/results?search_query=%23cats',
'playlist_mincount': 1,
'info_dict': {
'id': '#cats',
'title': '#cats',
# The test suite does not have support for nested playlists
# 'entries': [{
# 'url': r're:https://(www\.)?youtube\.com/hashtag/cats',
# 'title': '#cats',
# }],
},
}, {
# Channel results
'url': 'https://www.youtube.com/results?search_query=kurzgesagt&sp=EgIQAg%253D%253D',
'info_dict': {
'id': 'kurzgesagt',
'title': 'kurzgesagt',
},
'playlist': [{
'info_dict': {
'_type': 'url',
'id': 'UCsXVk37bltHxD1rDPwtNM8Q',
'url': 'https://www.youtube.com/channel/UCsXVk37bltHxD1rDPwtNM8Q',
'ie_key': 'YoutubeTab',
'channel': 'Kurzgesagt In a Nutshell',
'description': 'md5:4ae48dfa9505ffc307dad26342d06bfc',
'title': 'Kurzgesagt In a Nutshell',
'channel_id': 'UCsXVk37bltHxD1rDPwtNM8Q',
# No longer available for search as it is set to the handle.
# 'playlist_count': int,
'channel_url': 'https://www.youtube.com/channel/UCsXVk37bltHxD1rDPwtNM8Q',
'thumbnails': list,
'uploader_id': '@kurzgesagt',
'uploader_url': 'https://www.youtube.com/@kurzgesagt',
'uploader': 'Kurzgesagt In a Nutshell',
'channel_is_verified': True,
'channel_follower_count': int,
},
}],
'params': {'extract_flat': True, 'playlist_items': '1'},
'playlist_mincount': 1,
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
return self.playlist_result(self._search_results(query, qs.get('sp', (None,))[0]), query, query)
class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube music search URLs with selectable sections, e.g. #songs'
IE_NAME = 'youtube:music:search_url'
_VALID_URL = r'https?://music\.youtube\.com/search\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://music.youtube.com/search?q=royalty+free+music',
'playlist_count': 16,
'info_dict': {
'id': 'royalty free music',
'title': 'royalty free music',
},
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music&sp=EgWKAQIIAWoKEAoQAxAEEAkQBQ%3D%3D',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - songs',
'title': 'royalty free music - songs',
},
'params': {'extract_flat': 'in_playlist'},
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music#community+playlists',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - community playlists',
'title': 'royalty free music - community playlists',
},
'params': {'extract_flat': 'in_playlist'},
}]
_SECTIONS = {
'albums': 'EgWKAQIYAWoKEAoQAxAEEAkQBQ==',
'artists': 'EgWKAQIgAWoKEAoQAxAEEAkQBQ==',
'community playlists': 'EgeKAQQoAEABagoQChADEAQQCRAF',
'featured playlists': 'EgeKAQQoADgBagwQAxAJEAQQDhAKEAU==',
'songs': 'EgWKAQIIAWoKEAoQAxAEEAkQBQ==',
'videos': 'EgWKAQIQAWoKEAoQAxAEEAkQBQ==',
}
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
params = qs.get('sp', (None,))[0]
if params:
section = next((k for k, v in self._SECTIONS.items() if v == params), params)
else:
section = urllib.parse.unquote_plus(([*url.split('#'), ''])[1]).lower()
params = self._SECTIONS.get(section)
if not params:
section = None
title = join_nonempty(query, section, delim=' - ')
return self.playlist_result(self._search_results(query, params, default_client='web_music'), title, title)

File diff suppressed because it is too large Load Diff