mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2026-02-10 07:57:06 +00:00
Merge branch 'yt-dlp:master' into pr/live-sections
This commit is contained in:
@@ -208,6 +208,10 @@ from .bandcamp import (
|
||||
BandcampUserIE,
|
||||
BandcampWeeklyIE,
|
||||
)
|
||||
from .bandlab import (
|
||||
BandlabIE,
|
||||
BandlabPlaylistIE,
|
||||
)
|
||||
from .bannedvideo import BannedVideoIE
|
||||
from .bbc import (
|
||||
BBCIE,
|
||||
@@ -942,6 +946,10 @@ from .kaltura import KalturaIE
|
||||
from .kankanews import KankaNewsIE
|
||||
from .karaoketv import KaraoketvIE
|
||||
from .kelbyone import KelbyOneIE
|
||||
from .kenh14 import (
|
||||
Kenh14PlaylistIE,
|
||||
Kenh14VideoIE,
|
||||
)
|
||||
from .khanacademy import (
|
||||
KhanAcademyIE,
|
||||
KhanAcademyUnitIE,
|
||||
@@ -1131,12 +1139,6 @@ from .microsoftembed import (
|
||||
MicrosoftMediusIE,
|
||||
)
|
||||
from .microsoftstream import MicrosoftStreamIE
|
||||
from .mildom import (
|
||||
MildomClipIE,
|
||||
MildomIE,
|
||||
MildomUserVodIE,
|
||||
MildomVodIE,
|
||||
)
|
||||
from .minds import (
|
||||
MindsChannelIE,
|
||||
MindsGroupIE,
|
||||
@@ -1156,6 +1158,7 @@ from .mitele import MiTeleIE
|
||||
from .mixch import (
|
||||
MixchArchiveIE,
|
||||
MixchIE,
|
||||
MixchMovieIE,
|
||||
)
|
||||
from .mixcloud import (
|
||||
MixcloudIE,
|
||||
@@ -1517,8 +1520,8 @@ from .pgatour import PGATourIE
|
||||
from .philharmoniedeparis import PhilharmonieDeParisIE
|
||||
from .phoenix import PhoenixIE
|
||||
from .photobucket import PhotobucketIE
|
||||
from .pialive import PiaLiveIE
|
||||
from .piapro import PiaproIE
|
||||
from .piaulizaportal import PIAULIZAPortalIE
|
||||
from .picarto import (
|
||||
PicartoIE,
|
||||
PicartoVodIE,
|
||||
@@ -1554,10 +1557,6 @@ from .podbayfm import (
|
||||
)
|
||||
from .podchaser import PodchaserIE
|
||||
from .podomatic import PodomaticIE
|
||||
from .pokemon import (
|
||||
PokemonIE,
|
||||
PokemonWatchIE,
|
||||
)
|
||||
from .pokergo import (
|
||||
PokerGoCollectionIE,
|
||||
PokerGoIE,
|
||||
@@ -1648,6 +1647,7 @@ from .radiokapital import (
|
||||
RadioKapitalIE,
|
||||
RadioKapitalShowIE,
|
||||
)
|
||||
from .radioradicale import RadioRadicaleIE
|
||||
from .radiozet import RadioZetPodcastIE
|
||||
from .radlive import (
|
||||
RadLiveChannelIE,
|
||||
@@ -1939,9 +1939,7 @@ from .spotify import (
|
||||
)
|
||||
from .spreaker import (
|
||||
SpreakerIE,
|
||||
SpreakerPageIE,
|
||||
SpreakerShowIE,
|
||||
SpreakerShowPageIE,
|
||||
)
|
||||
from .springboardplatform import SpringboardPlatformIE
|
||||
from .sprout import SproutIE
|
||||
@@ -2252,6 +2250,10 @@ from .ufctv import (
|
||||
)
|
||||
from .ukcolumn import UkColumnIE
|
||||
from .uktvplay import UKTVPlayIE
|
||||
from .uliza import (
|
||||
UlizaPlayerIE,
|
||||
UlizaPortalIE,
|
||||
)
|
||||
from .umg import UMGDeIE
|
||||
from .unistra import UnistraIE
|
||||
from .unity import UnityIE
|
||||
@@ -2280,10 +2282,6 @@ from .utreon import UtreonIE
|
||||
from .varzesh3 import Varzesh3IE
|
||||
from .vbox7 import Vbox7IE
|
||||
from .veo import VeoIE
|
||||
from .veoh import (
|
||||
VeohIE,
|
||||
VeohUserIE,
|
||||
)
|
||||
from .vesti import VestiIE
|
||||
from .vevo import (
|
||||
VevoIE,
|
||||
|
||||
@@ -6,7 +6,6 @@ import hmac
|
||||
import io
|
||||
import json
|
||||
import re
|
||||
import struct
|
||||
import time
|
||||
import urllib.parse
|
||||
import uuid
|
||||
@@ -18,10 +17,8 @@ from ..networking.exceptions import TransportError
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
OnDemandPagedList,
|
||||
bytes_to_intlist,
|
||||
decode_base_n,
|
||||
int_or_none,
|
||||
intlist_to_bytes,
|
||||
time_seconds,
|
||||
traverse_obj,
|
||||
update_url_query,
|
||||
@@ -72,15 +69,15 @@ class AbemaLicenseRH(RequestHandler):
|
||||
})
|
||||
|
||||
res = decode_base_n(license_response['k'], table=self._STRTABLE)
|
||||
encvideokey = bytes_to_intlist(struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff))
|
||||
encvideokey = list(res.to_bytes(16, 'big'))
|
||||
|
||||
h = hmac.new(
|
||||
binascii.unhexlify(self._HKEY),
|
||||
(license_response['cid'] + self.ie._DEVICE_ID).encode(),
|
||||
digestmod=hashlib.sha256)
|
||||
enckey = bytes_to_intlist(h.digest())
|
||||
enckey = list(h.digest())
|
||||
|
||||
return intlist_to_bytes(aes_ecb_decrypt(encvideokey, enckey))
|
||||
return bytes(aes_ecb_decrypt(encvideokey, enckey))
|
||||
|
||||
|
||||
class AbemaTVBaseIE(InfoExtractor):
|
||||
|
||||
@@ -11,11 +11,9 @@ from ..networking.exceptions import HTTPError
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
ass_subtitles_timecode,
|
||||
bytes_to_intlist,
|
||||
bytes_to_long,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
intlist_to_bytes,
|
||||
join_nonempty,
|
||||
long_to_bytes,
|
||||
parse_iso8601,
|
||||
@@ -198,16 +196,16 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
||||
|
||||
links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link')
|
||||
self._K = ''.join(random.choices('0123456789abcdef', k=16))
|
||||
message = bytes_to_intlist(json.dumps({
|
||||
message = list(json.dumps({
|
||||
'k': self._K,
|
||||
't': token,
|
||||
}))
|
||||
}).encode())
|
||||
|
||||
# Sometimes authentication fails for no good reason, retry with
|
||||
# a different random padding
|
||||
links_data = None
|
||||
for _ in range(3):
|
||||
padded_message = intlist_to_bytes(pkcs1pad(message, 128))
|
||||
padded_message = bytes(pkcs1pad(message, 128))
|
||||
n, e = self._RSA_KEY
|
||||
encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n))
|
||||
authorization = base64.b64encode(encrypted_message).decode()
|
||||
@@ -234,7 +232,7 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
||||
|
||||
error = self._parse_json(e.cause.response.read(), video_id)
|
||||
message = error.get('message')
|
||||
if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country':
|
||||
if e.cause.status == 403 and error.get('code') == 'player-bad-geolocation-country':
|
||||
self.raise_geo_restricted(msg=message)
|
||||
raise ExtractorError(message)
|
||||
else:
|
||||
|
||||
@@ -66,6 +66,14 @@ class AfreecaTVBaseIE(InfoExtractor):
|
||||
extensions={'legacy_ssl': True}), display_id,
|
||||
'Downloading API JSON', 'Unable to download API JSON')
|
||||
|
||||
@staticmethod
|
||||
def _fixup_thumb(thumb_url):
|
||||
if not url_or_none(thumb_url):
|
||||
return None
|
||||
# Core would determine_ext as 'php' from the url, so we need to provide the real ext
|
||||
# See: https://github.com/yt-dlp/yt-dlp/issues/11537
|
||||
return [{'url': thumb_url, 'ext': 'jpg'}]
|
||||
|
||||
|
||||
class AfreecaTVIE(AfreecaTVBaseIE):
|
||||
IE_NAME = 'soop'
|
||||
@@ -155,7 +163,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
|
||||
'uploader': ('writer_nick', {str}),
|
||||
'uploader_id': ('bj_id', {str}),
|
||||
'duration': ('total_file_duration', {int_or_none(scale=1000)}),
|
||||
'thumbnail': ('thumb', {url_or_none}),
|
||||
'thumbnails': ('thumb', {self._fixup_thumb}),
|
||||
})
|
||||
|
||||
entries = []
|
||||
@@ -226,8 +234,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
|
||||
|
||||
return self.playlist_result(self._entries(data), video_id)
|
||||
|
||||
@staticmethod
|
||||
def _entries(data):
|
||||
def _entries(self, data):
|
||||
# 'files' is always a list with 1 element
|
||||
yield from traverse_obj(data, (
|
||||
'data', lambda _, v: v['story_type'] == 'catch',
|
||||
@@ -238,7 +245,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
|
||||
'title': ('title', {str}),
|
||||
'uploader': ('writer_nick', {str}),
|
||||
'uploader_id': ('writer_id', {str}),
|
||||
'thumbnail': ('thumb', {url_or_none}),
|
||||
'thumbnails': ('thumb', {self._fixup_thumb}),
|
||||
'timestamp': ('write_timestamp', {int_or_none}),
|
||||
}))
|
||||
|
||||
|
||||
@@ -8,10 +8,8 @@ import time
|
||||
from .common import InfoExtractor
|
||||
from ..aes import aes_encrypt
|
||||
from ..utils import (
|
||||
bytes_to_intlist,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
intlist_to_bytes,
|
||||
join_nonempty,
|
||||
smuggle_url,
|
||||
strip_jsonp,
|
||||
@@ -234,8 +232,8 @@ class AnvatoIE(InfoExtractor):
|
||||
server_time = self._server_time(access_key, video_id)
|
||||
input_data = f'{server_time}~{md5_text(video_data_url)}~{md5_text(server_time)}'
|
||||
|
||||
auth_secret = intlist_to_bytes(aes_encrypt(
|
||||
bytes_to_intlist(input_data[:64]), bytes_to_intlist(self._AUTH_KEY)))
|
||||
auth_secret = bytes(aes_encrypt(
|
||||
list(input_data[:64].encode()), list(self._AUTH_KEY)))
|
||||
query = {
|
||||
'X-Anvato-Adst-Auth': base64.b64encode(auth_secret).decode('ascii'),
|
||||
'rtyp': 'fp',
|
||||
|
||||
@@ -205,6 +205,26 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
},
|
||||
},
|
||||
],
|
||||
}, {
|
||||
# The reviewbody is None for one of the reviews; just need to extract data without crashing
|
||||
'url': 'https://archive.org/details/gd95-04-02.sbd.11622.sbeok.shnf/gd95-04-02d1t04.shn',
|
||||
'info_dict': {
|
||||
'id': 'gd95-04-02.sbd.11622.sbeok.shnf/gd95-04-02d1t04.shn',
|
||||
'ext': 'mp3',
|
||||
'title': 'Stuck Inside of Mobile with the Memphis Blues Again',
|
||||
'creators': ['Grateful Dead'],
|
||||
'duration': 338.31,
|
||||
'track': 'Stuck Inside of Mobile with the Memphis Blues Again',
|
||||
'description': 'md5:764348a470b986f1217ffd38d6ac7b72',
|
||||
'display_id': 'gd95-04-02d1t04.shn',
|
||||
'location': 'Pyramid Arena',
|
||||
'uploader': 'jon@archive.org',
|
||||
'album': '1995-04-02 - Pyramid Arena',
|
||||
'upload_date': '20040519',
|
||||
'track_number': 4,
|
||||
'release_date': '19950402',
|
||||
'timestamp': 1084927901,
|
||||
},
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
@@ -335,7 +355,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
info['comments'].append({
|
||||
'id': review.get('review_id'),
|
||||
'author': review.get('reviewer'),
|
||||
'text': str_or_none(review.get('reviewtitle'), '') + '\n\n' + review.get('reviewbody'),
|
||||
'text': join_nonempty('reviewtitle', 'reviewbody', from_dict=review, delim='\n\n'),
|
||||
'timestamp': unified_timestamp(review.get('createdate')),
|
||||
'parent': 'root'})
|
||||
|
||||
|
||||
437
yt_dlp/extractor/bandlab.py
Normal file
437
yt_dlp/extractor/bandlab.py
Normal file
@@ -0,0 +1,437 @@
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
format_field,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
parse_qs,
|
||||
truncate_string,
|
||||
url_or_none,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj, value
|
||||
|
||||
|
||||
class BandlabBaseIE(InfoExtractor):
|
||||
def _call_api(self, endpoint, asset_id, **kwargs):
|
||||
headers = kwargs.pop('headers', None) or {}
|
||||
return self._download_json(
|
||||
f'https://www.bandlab.com/api/v1.3/{endpoint}/{asset_id}',
|
||||
asset_id, headers={
|
||||
'accept': 'application/json',
|
||||
'referer': 'https://www.bandlab.com/',
|
||||
'x-client-id': 'BandLab-Web',
|
||||
'x-client-version': '10.1.124',
|
||||
**headers,
|
||||
}, **kwargs)
|
||||
|
||||
def _parse_revision(self, revision_data, url=None):
|
||||
return {
|
||||
'vcodec': 'none',
|
||||
'media_type': 'revision',
|
||||
'extractor_key': BandlabIE.ie_key(),
|
||||
'extractor': BandlabIE.IE_NAME,
|
||||
**traverse_obj(revision_data, {
|
||||
'webpage_url': (
|
||||
'id', ({value(url)}, {format_field(template='https://www.bandlab.com/revision/%s')}), filter, any),
|
||||
'id': (('revisionId', 'id'), {str}, any),
|
||||
'title': ('song', 'name', {str}),
|
||||
'track': ('song', 'name', {str}),
|
||||
'url': ('mixdown', 'file', {url_or_none}),
|
||||
'thumbnail': ('song', 'picture', 'url', {url_or_none}),
|
||||
'description': ('description', {str}),
|
||||
'uploader': ('creator', 'name', {str}),
|
||||
'uploader_id': ('creator', 'username', {str}),
|
||||
'timestamp': ('createdOn', {parse_iso8601}),
|
||||
'duration': ('mixdown', 'duration', {float_or_none}),
|
||||
'view_count': ('counters', 'plays', {int_or_none}),
|
||||
'like_count': ('counters', 'likes', {int_or_none}),
|
||||
'comment_count': ('counters', 'comments', {int_or_none}),
|
||||
'genres': ('genres', ..., 'name', {str}),
|
||||
}),
|
||||
}
|
||||
|
||||
def _parse_track(self, track_data, url=None):
|
||||
return {
|
||||
'vcodec': 'none',
|
||||
'media_type': 'track',
|
||||
'extractor_key': BandlabIE.ie_key(),
|
||||
'extractor': BandlabIE.IE_NAME,
|
||||
**traverse_obj(track_data, {
|
||||
'webpage_url': (
|
||||
'id', ({value(url)}, {format_field(template='https://www.bandlab.com/post/%s')}), filter, any),
|
||||
'id': (('revisionId', 'id'), {str}, any),
|
||||
'url': ('track', 'sample', 'audioUrl', {url_or_none}),
|
||||
'title': ('track', 'name', {str}),
|
||||
'track': ('track', 'name', {str}),
|
||||
'description': ('caption', {str}),
|
||||
'thumbnail': ('track', 'picture', ('original', 'url'), {url_or_none}, any),
|
||||
'view_count': ('counters', 'plays', {int_or_none}),
|
||||
'like_count': ('counters', 'likes', {int_or_none}),
|
||||
'comment_count': ('counters', 'comments', {int_or_none}),
|
||||
'duration': ('track', 'sample', 'duration', {float_or_none}),
|
||||
'uploader': ('creator', 'name', {str}),
|
||||
'uploader_id': ('creator', 'username', {str}),
|
||||
'timestamp': ('createdOn', {parse_iso8601}),
|
||||
}),
|
||||
}
|
||||
|
||||
def _parse_video(self, video_data, url=None):
|
||||
return {
|
||||
'media_type': 'video',
|
||||
'extractor_key': BandlabIE.ie_key(),
|
||||
'extractor': BandlabIE.IE_NAME,
|
||||
**traverse_obj(video_data, {
|
||||
'id': ('id', {str}),
|
||||
'webpage_url': (
|
||||
'id', ({value(url)}, {format_field(template='https://www.bandlab.com/post/%s')}), filter, any),
|
||||
'url': ('video', 'url', {url_or_none}),
|
||||
'title': ('caption', {lambda x: x.replace('\n', ' ')}, {truncate_string(left=50)}),
|
||||
'description': ('caption', {str}),
|
||||
'thumbnail': ('video', 'picture', 'url', {url_or_none}),
|
||||
'view_count': ('video', 'counters', 'plays', {int_or_none}),
|
||||
'like_count': ('video', 'counters', 'likes', {int_or_none}),
|
||||
'comment_count': ('counters', 'comments', {int_or_none}),
|
||||
'duration': ('video', 'duration', {float_or_none}),
|
||||
'uploader': ('creator', 'name', {str}),
|
||||
'uploader_id': ('creator', 'username', {str}),
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
class BandlabIE(BandlabBaseIE):
|
||||
_VALID_URL = [
|
||||
r'https?://(?:www\.)?bandlab.com/(?P<url_type>track|post|revision)/(?P<id>[\da-f_-]+)',
|
||||
r'https?://(?:www\.)?bandlab.com/(?P<url_type>embed)/\?(?:[^#]*&)?id=(?P<id>[\da-f-]+)',
|
||||
]
|
||||
_EMBED_REGEX = [rf'<iframe[^>]+src=[\'"](?P<url>{_VALID_URL[1]})[\'"]']
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bandlab.com/track/04b37e88dba24967b9dac8eb8567ff39_07d7f906fc96ee11b75e000d3a428fff',
|
||||
'md5': '46f7b43367dd268bbcf0bbe466753b2c',
|
||||
'info_dict': {
|
||||
'id': '02d7f906-fc96-ee11-b75e-000d3a428fff',
|
||||
'ext': 'm4a',
|
||||
'uploader_id': 'ender_milze',
|
||||
'track': 'sweet black',
|
||||
'description': 'composed by juanjn3737',
|
||||
'timestamp': 1702171963,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'duration': 54.629999999999995,
|
||||
'title': 'sweet black',
|
||||
'upload_date': '20231210',
|
||||
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/',
|
||||
'genres': ['Lofi'],
|
||||
'uploader': 'ender milze',
|
||||
'comment_count': int,
|
||||
'media_type': 'revision',
|
||||
},
|
||||
}, {
|
||||
# Same track as above but post URL
|
||||
'url': 'https://www.bandlab.com/post/07d7f906-fc96-ee11-b75e-000d3a428fff',
|
||||
'md5': '46f7b43367dd268bbcf0bbe466753b2c',
|
||||
'info_dict': {
|
||||
'id': '02d7f906-fc96-ee11-b75e-000d3a428fff',
|
||||
'ext': 'm4a',
|
||||
'uploader_id': 'ender_milze',
|
||||
'track': 'sweet black',
|
||||
'description': 'composed by juanjn3737',
|
||||
'timestamp': 1702171973,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'duration': 54.629999999999995,
|
||||
'title': 'sweet black',
|
||||
'upload_date': '20231210',
|
||||
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/',
|
||||
'genres': ['Lofi'],
|
||||
'uploader': 'ender milze',
|
||||
'comment_count': int,
|
||||
'media_type': 'revision',
|
||||
},
|
||||
}, {
|
||||
# SharedKey Example
|
||||
'url': 'https://www.bandlab.com/track/048916c2-c6da-ee11-85f9-6045bd2e11f9?sharedKey=0NNWX8qYAEmI38lWAzCNDA',
|
||||
'md5': '15174b57c44440e2a2008be9cae00250',
|
||||
'info_dict': {
|
||||
'id': '038916c2-c6da-ee11-85f9-6045bd2e11f9',
|
||||
'ext': 'm4a',
|
||||
'comment_count': int,
|
||||
'genres': ['Other'],
|
||||
'uploader_id': 'user8353034818103753',
|
||||
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/51b18363-da23-4b9b-a29c-2933a3e561ca/',
|
||||
'timestamp': 1709625771,
|
||||
'track': 'PodcastMaerchen4b',
|
||||
'duration': 468.14,
|
||||
'view_count': int,
|
||||
'description': 'Podcast: Neues aus der Märchenwelt',
|
||||
'like_count': int,
|
||||
'upload_date': '20240305',
|
||||
'uploader': 'Erna Wageneder',
|
||||
'title': 'PodcastMaerchen4b',
|
||||
'media_type': 'revision',
|
||||
},
|
||||
}, {
|
||||
# Different Revision selected
|
||||
'url': 'https://www.bandlab.com/track/130343fc-148b-ea11-96d2-0003ffd1fc09?revId=110343fc-148b-ea11-96d2-0003ffd1fc09',
|
||||
'md5': '74e055ef9325d63f37088772fbfe4454',
|
||||
'info_dict': {
|
||||
'id': '110343fc-148b-ea11-96d2-0003ffd1fc09',
|
||||
'ext': 'm4a',
|
||||
'timestamp': 1588273294,
|
||||
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/users/b612e533-e4f7-4542-9f50-3fcfd8dd822c/',
|
||||
'description': 'Final Revision.',
|
||||
'title': 'Replay ( Instrumental)',
|
||||
'uploader': 'David R Sparks',
|
||||
'uploader_id': 'davesnothome69',
|
||||
'view_count': int,
|
||||
'comment_count': int,
|
||||
'track': 'Replay ( Instrumental)',
|
||||
'genres': ['Rock'],
|
||||
'upload_date': '20200430',
|
||||
'like_count': int,
|
||||
'duration': 279.43,
|
||||
'media_type': 'revision',
|
||||
},
|
||||
}, {
|
||||
# Video
|
||||
'url': 'https://www.bandlab.com/post/5cdf9036-3857-ef11-991a-6045bd36e0d9',
|
||||
'md5': '8caa2ef28e86c1dacf167293cfdbeba9',
|
||||
'info_dict': {
|
||||
'id': '5cdf9036-3857-ef11-991a-6045bd36e0d9',
|
||||
'ext': 'mp4',
|
||||
'duration': 44.705,
|
||||
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/videos/67c6cef1-cef6-40d3-831e-a55bc1dcb972/',
|
||||
'comment_count': int,
|
||||
'title': 'backing vocals',
|
||||
'uploader_id': 'marliashya',
|
||||
'uploader': 'auraa',
|
||||
'like_count': int,
|
||||
'description': 'backing vocals',
|
||||
'media_type': 'video',
|
||||
},
|
||||
}, {
|
||||
# Embed Example
|
||||
'url': 'https://www.bandlab.com/embed/?blur=false&id=014de0a4-7d82-ea11-a94c-0003ffd19c0f',
|
||||
'md5': 'a4ad05cb68c54faaed9b0a8453a8cf4a',
|
||||
'info_dict': {
|
||||
'id': '014de0a4-7d82-ea11-a94c-0003ffd19c0f',
|
||||
'ext': 'm4a',
|
||||
'comment_count': int,
|
||||
'genres': ['Electronic'],
|
||||
'uploader': 'Charlie Henson',
|
||||
'timestamp': 1587328674,
|
||||
'upload_date': '20200419',
|
||||
'view_count': int,
|
||||
'track': 'Positronic Meltdown',
|
||||
'duration': 318.55,
|
||||
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/87165bc3-5439-496e-b1f7-a9f13b541ff2/',
|
||||
'description': 'Checkout my tracks at AOMX http://aomxsounds.com/',
|
||||
'uploader_id': 'microfreaks',
|
||||
'title': 'Positronic Meltdown',
|
||||
'like_count': int,
|
||||
'media_type': 'revision',
|
||||
},
|
||||
}, {
|
||||
# Track without revisions available
|
||||
'url': 'https://www.bandlab.com/track/55767ac51789ea11a94c0003ffd1fc09_2f007b0a37b94ec7a69bc25ae15108a5',
|
||||
'md5': 'f05d68a3769952c2d9257c473e14c15f',
|
||||
'info_dict': {
|
||||
'id': '55767ac51789ea11a94c0003ffd1fc09_2f007b0a37b94ec7a69bc25ae15108a5',
|
||||
'ext': 'm4a',
|
||||
'track': 'insame',
|
||||
'like_count': int,
|
||||
'duration': 84.03,
|
||||
'title': 'insame',
|
||||
'view_count': int,
|
||||
'comment_count': int,
|
||||
'uploader': 'Sorakime',
|
||||
'uploader_id': 'sorakime',
|
||||
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/users/572a351a-0f3a-4c6a-ac39-1a5defdeeb1c/',
|
||||
'timestamp': 1691162128,
|
||||
'upload_date': '20230804',
|
||||
'media_type': 'track',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.bandlab.com/revision/014de0a4-7d82-ea11-a94c-0003ffd19c0f',
|
||||
'only_matching': True,
|
||||
}]
|
||||
_WEBPAGE_TESTS = [{
|
||||
'url': 'https://phantomluigi.github.io/',
|
||||
'info_dict': {
|
||||
'id': 'e14223c3-7871-ef11-bdfd-000d3a980db3',
|
||||
'ext': 'm4a',
|
||||
'view_count': int,
|
||||
'upload_date': '20240913',
|
||||
'uploader_id': 'phantommusicofficial',
|
||||
'timestamp': 1726194897,
|
||||
'uploader': 'Phantom',
|
||||
'comment_count': int,
|
||||
'genres': ['Progresive Rock'],
|
||||
'description': 'md5:a38cd668f7a2843295ef284114f18429',
|
||||
'duration': 225.23,
|
||||
'like_count': int,
|
||||
'title': 'Vermilion Pt. 2 (Cover)',
|
||||
'track': 'Vermilion Pt. 2 (Cover)',
|
||||
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/62b10750-7aef-4f42-ad08-1af52f577e97/',
|
||||
'media_type': 'revision',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id, url_type = self._match_valid_url(url).group('id', 'url_type')
|
||||
|
||||
qs = parse_qs(url)
|
||||
revision_id = traverse_obj(qs, (('revId', 'id'), 0, any))
|
||||
if url_type == 'revision':
|
||||
revision_id = display_id
|
||||
|
||||
revision_data = None
|
||||
if not revision_id:
|
||||
post_data = self._call_api(
|
||||
'posts', display_id, note='Downloading post data',
|
||||
query=traverse_obj(qs, {'sharedKey': ('sharedKey', 0)}))
|
||||
|
||||
revision_id = traverse_obj(post_data, (('revisionId', ('revision', 'id')), {str}, any))
|
||||
revision_data = traverse_obj(post_data, ('revision', {dict}))
|
||||
|
||||
if not revision_data and not revision_id:
|
||||
post_type = post_data.get('type')
|
||||
if post_type == 'Video':
|
||||
return self._parse_video(post_data, url=url)
|
||||
if post_type == 'Track':
|
||||
return self._parse_track(post_data, url=url)
|
||||
raise ExtractorError(f'Could not extract data for post type {post_type!r}')
|
||||
|
||||
if not revision_data:
|
||||
revision_data = self._call_api(
|
||||
'revisions', revision_id, note='Downloading revision data', query={'edit': 'false'})
|
||||
|
||||
return self._parse_revision(revision_data, url=url)
|
||||
|
||||
|
||||
class BandlabPlaylistIE(BandlabBaseIE):
|
||||
_VALID_URL = [
|
||||
r'https?://(?:www\.)?bandlab.com/(?:[\w]+/)?(?P<type>albums|collections)/(?P<id>[\da-f-]+)',
|
||||
r'https?://(?:www\.)?bandlab.com/(?P<type>embed)/collection/\?(?:[^#]*&)?id=(?P<id>[\da-f-]+)',
|
||||
]
|
||||
_EMBED_REGEX = [rf'<iframe[^>]+src=[\'"](?P<url>{_VALID_URL[1]})[\'"]']
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bandlab.com/davesnothome69/albums/89b79ea6-de42-ed11-b495-00224845aac7',
|
||||
'info_dict': {
|
||||
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/69507ff3-579a-45be-afca-9e87eddec944/',
|
||||
'release_date': '20221003',
|
||||
'title': 'Remnants',
|
||||
'album': 'Remnants',
|
||||
'like_count': int,
|
||||
'album_type': 'LP',
|
||||
'description': 'A collection of some feel good, rock hits.',
|
||||
'comment_count': int,
|
||||
'view_count': int,
|
||||
'id': '89b79ea6-de42-ed11-b495-00224845aac7',
|
||||
'uploader': 'David R Sparks',
|
||||
'uploader_id': 'davesnothome69',
|
||||
},
|
||||
'playlist_count': 10,
|
||||
}, {
|
||||
'url': 'https://www.bandlab.com/slytheband/collections/955102d4-1040-ef11-86c3-000d3a42581b',
|
||||
'info_dict': {
|
||||
'id': '955102d4-1040-ef11-86c3-000d3a42581b',
|
||||
'timestamp': 1720762659,
|
||||
'view_count': int,
|
||||
'title': 'My Shit 🖤',
|
||||
'uploader_id': 'slytheband',
|
||||
'uploader': '𝓢𝓛𝓨',
|
||||
'upload_date': '20240712',
|
||||
'like_count': int,
|
||||
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/collections/2c64ca12-b180-4b76-8587-7a8da76bddc8/',
|
||||
},
|
||||
'playlist_count': 15,
|
||||
}, {
|
||||
# Embeds can contain both albums and collections with the same URL pattern. This is an album
|
||||
'url': 'https://www.bandlab.com/embed/collection/?id=12cc6f7f-951b-ee11-907c-00224844f303',
|
||||
'info_dict': {
|
||||
'id': '12cc6f7f-951b-ee11-907c-00224844f303',
|
||||
'release_date': '20230706',
|
||||
'description': 'This is a collection of songs I created when I had an Amiga computer.',
|
||||
'view_count': int,
|
||||
'title': 'Mark Salud The Amiga Collection',
|
||||
'uploader_id': 'mssirmooth1962',
|
||||
'comment_count': int,
|
||||
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/d618bd7b-0537-40d5-bdd8-61b066e77d59/',
|
||||
'like_count': int,
|
||||
'uploader': 'Mark Salud',
|
||||
'album': 'Mark Salud The Amiga Collection',
|
||||
'album_type': 'LP',
|
||||
},
|
||||
'playlist_count': 24,
|
||||
}, {
|
||||
# Tracks without revision id
|
||||
'url': 'https://www.bandlab.com/embed/collection/?id=e98aafb5-d932-ee11-b8f0-00224844c719',
|
||||
'info_dict': {
|
||||
'like_count': int,
|
||||
'uploader_id': 'sorakime',
|
||||
'comment_count': int,
|
||||
'uploader': 'Sorakime',
|
||||
'view_count': int,
|
||||
'description': 'md5:4ec31c568a5f5a5a2b17572ea64c3825',
|
||||
'release_date': '20230812',
|
||||
'title': 'Art',
|
||||
'album': 'Art',
|
||||
'album_type': 'Album',
|
||||
'id': 'e98aafb5-d932-ee11-b8f0-00224844c719',
|
||||
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/20c890de-e94a-4422-828a-2da6377a13c8/',
|
||||
},
|
||||
'playlist_count': 13,
|
||||
}, {
|
||||
'url': 'https://www.bandlab.com/albums/89b79ea6-de42-ed11-b495-00224845aac7',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _entries(self, album_data):
|
||||
for post in traverse_obj(album_data, ('posts', lambda _, v: v['type'])):
|
||||
post_type = post['type']
|
||||
if post_type == 'Revision':
|
||||
yield self._parse_revision(post.get('revision'))
|
||||
elif post_type == 'Track':
|
||||
yield self._parse_track(post)
|
||||
elif post_type == 'Video':
|
||||
yield self._parse_video(post)
|
||||
else:
|
||||
self.report_warning(f'Skipping unknown post type: "{post_type}"')
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id, playlist_type = self._match_valid_url(url).group('id', 'type')
|
||||
|
||||
endpoints = {
|
||||
'albums': ['albums'],
|
||||
'collections': ['collections'],
|
||||
'embed': ['collections', 'albums'],
|
||||
}.get(playlist_type)
|
||||
for endpoint in endpoints:
|
||||
playlist_data = self._call_api(
|
||||
endpoint, playlist_id, note=f'Downloading {endpoint[:-1]} data',
|
||||
fatal=False, expected_status=404)
|
||||
if not playlist_data.get('errorCode'):
|
||||
playlist_type = endpoint
|
||||
break
|
||||
if error_code := playlist_data.get('errorCode'):
|
||||
raise ExtractorError(f'Could not find playlist data. Error code: "{error_code}"')
|
||||
|
||||
return self.playlist_result(
|
||||
self._entries(playlist_data), playlist_id,
|
||||
**traverse_obj(playlist_data, {
|
||||
'title': ('name', {str}),
|
||||
'description': ('description', {str}),
|
||||
'uploader': ('creator', 'name', {str}),
|
||||
'uploader_id': ('creator', 'username', {str}),
|
||||
'timestamp': ('createdOn', {parse_iso8601}),
|
||||
'release_date': ('releaseDate', {lambda x: x.replace('-', '')}, filter),
|
||||
'thumbnail': ('picture', ('original', 'url'), {url_or_none}, any),
|
||||
'like_count': ('counters', 'likes', {int_or_none}),
|
||||
'comment_count': ('counters', 'comments', {int_or_none}),
|
||||
'view_count': ('counters', 'plays', {int_or_none}),
|
||||
}),
|
||||
**(traverse_obj(playlist_data, {
|
||||
'album': ('name', {str}),
|
||||
'album_type': ('type', {str}),
|
||||
}) if playlist_type == 'albums' else {}))
|
||||
@@ -18,7 +18,6 @@ from ..utils import (
|
||||
InAdvancePagedList,
|
||||
OnDemandPagedList,
|
||||
bool_or_none,
|
||||
clean_html,
|
||||
determine_ext,
|
||||
filter_dict,
|
||||
float_or_none,
|
||||
@@ -63,7 +62,7 @@ class BilibiliBaseIE(InfoExtractor):
|
||||
'support_formats', lambda _, v: v['quality'] not in parsed_qualities))], delim=', ')
|
||||
if missing_formats:
|
||||
self.to_screen(
|
||||
f'Format(s) {missing_formats} are missing; you have to login or '
|
||||
f'Format(s) {missing_formats} are missing; you have to '
|
||||
f'become a premium member to download them. {self._login_hint()}')
|
||||
|
||||
def extract_formats(self, play_info):
|
||||
@@ -165,14 +164,18 @@ class BilibiliBaseIE(InfoExtractor):
|
||||
params['w_rid'] = hashlib.md5(f'{query}{self._get_wbi_key(video_id)}'.encode()).hexdigest()
|
||||
return params
|
||||
|
||||
def _download_playinfo(self, bvid, cid, headers=None, qn=None):
|
||||
params = {'bvid': bvid, 'cid': cid, 'fnval': 4048}
|
||||
if qn:
|
||||
params['qn'] = qn
|
||||
def _download_playinfo(self, bvid, cid, headers=None, query=None):
|
||||
params = {'bvid': bvid, 'cid': cid, 'fnval': 4048, **(query or {})}
|
||||
if self.is_logged_in:
|
||||
params.pop('try_look', None)
|
||||
if qn := params.get('qn'):
|
||||
note = f'Downloading video format {qn} for cid {cid}'
|
||||
else:
|
||||
note = f'Downloading video formats for cid {cid}'
|
||||
|
||||
return self._download_json(
|
||||
'https://api.bilibili.com/x/player/wbi/playurl', bvid,
|
||||
query=self._sign_wbi(params, bvid), headers=headers,
|
||||
note=f'Downloading video formats for cid {cid} {qn or ""}')['data']
|
||||
query=self._sign_wbi(params, bvid), headers=headers, note=note)['data']
|
||||
|
||||
def json2srt(self, json_data):
|
||||
srt_data = ''
|
||||
@@ -191,7 +194,7 @@ class BilibiliBaseIE(InfoExtractor):
|
||||
}
|
||||
|
||||
video_info = self._download_json(
|
||||
'https://api.bilibili.com/x/player/v2', video_id,
|
||||
'https://api.bilibili.com/x/player/wbi/v2', video_id,
|
||||
query={'aid': aid, 'cid': cid} if aid else {'bvid': video_id, 'cid': cid},
|
||||
note=f'Extracting subtitle info {cid}', headers=self._HEADERS)
|
||||
if traverse_obj(video_info, ('data', 'need_login_subtitle')):
|
||||
@@ -207,7 +210,7 @@ class BilibiliBaseIE(InfoExtractor):
|
||||
|
||||
def _get_chapters(self, aid, cid):
|
||||
chapters = aid and cid and self._download_json(
|
||||
'https://api.bilibili.com/x/player/v2', aid, query={'aid': aid, 'cid': cid},
|
||||
'https://api.bilibili.com/x/player/wbi/v2', aid, query={'aid': aid, 'cid': cid},
|
||||
note='Extracting chapters', fatal=False, headers=self._HEADERS)
|
||||
return traverse_obj(chapters, ('data', 'view_points', ..., {
|
||||
'title': 'content',
|
||||
@@ -286,7 +289,7 @@ class BilibiliBaseIE(InfoExtractor):
|
||||
('data', 'interaction', 'graph_version', {int_or_none}))
|
||||
cid_edges = self._get_divisions(video_id, graph_version, {1: {'cid': cid}}, 1)
|
||||
for cid, edges in cid_edges.items():
|
||||
play_info = self._download_playinfo(video_id, cid, headers=headers)
|
||||
play_info = self._download_playinfo(video_id, cid, headers=headers, query={'try_look': 1})
|
||||
yield {
|
||||
**metainfo,
|
||||
'id': f'{video_id}_{cid}',
|
||||
@@ -639,40 +642,29 @@ class BiliBiliIE(BilibiliBaseIE):
|
||||
headers['Referer'] = url
|
||||
|
||||
initial_state = self._search_json(r'window\.__INITIAL_STATE__\s*=', webpage, 'initial state', video_id)
|
||||
|
||||
if traverse_obj(initial_state, ('error', 'trueCode')) == -403:
|
||||
self.raise_login_required()
|
||||
if traverse_obj(initial_state, ('error', 'trueCode')) == -404:
|
||||
raise ExtractorError(
|
||||
'This video may be deleted or geo-restricted. '
|
||||
'You might want to try a VPN or a proxy server (with --proxy)', expected=True)
|
||||
|
||||
is_festival = 'videoData' not in initial_state
|
||||
if is_festival:
|
||||
video_data = initial_state['videoInfo']
|
||||
else:
|
||||
play_info_obj = self._search_json(
|
||||
r'window\.__playinfo__\s*=', webpage, 'play info', video_id, fatal=False)
|
||||
if not play_info_obj:
|
||||
if traverse_obj(initial_state, ('error', 'trueCode')) == -403:
|
||||
self.raise_login_required()
|
||||
if traverse_obj(initial_state, ('error', 'trueCode')) == -404:
|
||||
raise ExtractorError(
|
||||
'This video may be deleted or geo-restricted. '
|
||||
'You might want to try a VPN or a proxy server (with --proxy)', expected=True)
|
||||
play_info = traverse_obj(play_info_obj, ('data', {dict}))
|
||||
if not play_info:
|
||||
if traverse_obj(play_info_obj, 'code') == 87007:
|
||||
toast = get_element_by_class('tips-toast', webpage) or ''
|
||||
msg = clean_html(
|
||||
f'{get_element_by_class("belongs-to", toast) or ""},'
|
||||
+ (get_element_by_class('level', toast) or ''))
|
||||
raise ExtractorError(
|
||||
f'This is a supporter-only video: {msg}. {self._login_hint()}', expected=True)
|
||||
raise ExtractorError('Failed to extract play info')
|
||||
video_data = initial_state['videoData']
|
||||
|
||||
video_id, title = video_data['bvid'], video_data.get('title')
|
||||
|
||||
# Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself.
|
||||
page_list_json = not is_festival and traverse_obj(
|
||||
page_list_json = (not is_festival and traverse_obj(
|
||||
self._download_json(
|
||||
'https://api.bilibili.com/x/player/pagelist', video_id,
|
||||
fatal=False, query={'bvid': video_id, 'jsonp': 'jsonp'},
|
||||
note='Extracting videos in anthology', headers=headers),
|
||||
'data', expected_type=list) or []
|
||||
'data', expected_type=list)) or []
|
||||
is_anthology = len(page_list_json) > 1
|
||||
|
||||
part_id = int_or_none(parse_qs(url).get('p', [None])[-1])
|
||||
@@ -691,8 +683,6 @@ class BiliBiliIE(BilibiliBaseIE):
|
||||
|
||||
festival_info = {}
|
||||
if is_festival:
|
||||
play_info = self._download_playinfo(video_id, cid, headers=headers)
|
||||
|
||||
festival_info = traverse_obj(initial_state, {
|
||||
'uploader': ('videoInfo', 'upName'),
|
||||
'uploader_id': ('videoInfo', 'upMid', {str_or_none}),
|
||||
@@ -727,62 +717,79 @@ class BiliBiliIE(BilibiliBaseIE):
|
||||
self._get_interactive_entries(video_id, cid, metainfo, headers=headers), **metainfo,
|
||||
duration=traverse_obj(initial_state, ('videoData', 'duration', {int_or_none})),
|
||||
__post_extractor=self.extract_comments(aid))
|
||||
else:
|
||||
formats = self.extract_formats(play_info)
|
||||
|
||||
if not traverse_obj(play_info, ('dash')):
|
||||
# we only have legacy formats and need additional work
|
||||
has_qn = lambda x: x in traverse_obj(formats, (..., 'quality'))
|
||||
for qn in traverse_obj(play_info, ('accept_quality', lambda _, v: not has_qn(v), {int})):
|
||||
formats.extend(traverse_obj(
|
||||
self.extract_formats(self._download_playinfo(video_id, cid, headers=headers, qn=qn)),
|
||||
lambda _, v: not has_qn(v['quality'])))
|
||||
self._check_missing_formats(play_info, formats)
|
||||
flv_formats = traverse_obj(formats, lambda _, v: v['fragments'])
|
||||
if flv_formats and len(flv_formats) < len(formats):
|
||||
# Flv and mp4 are incompatible due to `multi_video` workaround, so drop one
|
||||
if not self._configuration_arg('prefer_multi_flv'):
|
||||
dropped_fmts = ', '.join(
|
||||
f'{f.get("format_note")} ({f.get("format_id")})' for f in flv_formats)
|
||||
formats = traverse_obj(formats, lambda _, v: not v.get('fragments'))
|
||||
if dropped_fmts:
|
||||
self.to_screen(
|
||||
f'Dropping incompatible flv format(s) {dropped_fmts} since mp4 is available. '
|
||||
'To extract flv, pass --extractor-args "bilibili:prefer_multi_flv"')
|
||||
else:
|
||||
formats = traverse_obj(
|
||||
# XXX: Filtering by extractor-arg is for testing purposes
|
||||
formats, lambda _, v: v['quality'] == int(self._configuration_arg('prefer_multi_flv')[0]),
|
||||
) or [max(flv_formats, key=lambda x: x['quality'])]
|
||||
play_info = None
|
||||
if self.is_logged_in:
|
||||
play_info = traverse_obj(
|
||||
self._search_json(r'window\.__playinfo__\s*=', webpage, 'play info', video_id, default=None),
|
||||
('data', {dict}))
|
||||
if not play_info:
|
||||
play_info = self._download_playinfo(video_id, cid, headers=headers, query={'try_look': 1})
|
||||
formats = self.extract_formats(play_info)
|
||||
|
||||
if traverse_obj(formats, (0, 'fragments')):
|
||||
# We have flv formats, which are individual short videos with their own timestamps and metainfo
|
||||
# Binary concatenation corrupts their timestamps, so we need a `multi_video` workaround
|
||||
return {
|
||||
**metainfo,
|
||||
'_type': 'multi_video',
|
||||
'entries': [{
|
||||
'id': f'{metainfo["id"]}_{idx}',
|
||||
'title': metainfo['title'],
|
||||
'http_headers': metainfo['http_headers'],
|
||||
'formats': [{
|
||||
**fragment,
|
||||
'format_id': formats[0].get('format_id'),
|
||||
}],
|
||||
'subtitles': self.extract_subtitles(video_id, cid) if idx == 0 else None,
|
||||
'__post_extractor': self.extract_comments(aid) if idx == 0 else None,
|
||||
} for idx, fragment in enumerate(formats[0]['fragments'])],
|
||||
'duration': float_or_none(play_info.get('timelength'), scale=1000),
|
||||
}
|
||||
else:
|
||||
return {
|
||||
**metainfo,
|
||||
'formats': formats,
|
||||
'duration': float_or_none(play_info.get('timelength'), scale=1000),
|
||||
'chapters': self._get_chapters(aid, cid),
|
||||
'subtitles': self.extract_subtitles(video_id, cid),
|
||||
'__post_extractor': self.extract_comments(aid),
|
||||
}
|
||||
if video_data.get('is_upower_exclusive'):
|
||||
high_level = traverse_obj(initial_state, ('elecFullInfo', 'show_info', 'high_level', {dict})) or {}
|
||||
msg = f'{join_nonempty("title", "sub_title", from_dict=high_level, delim=",")}. {self._login_hint()}'
|
||||
if not formats:
|
||||
raise ExtractorError(f'This is a supporter-only video: {msg}', expected=True)
|
||||
if '试看' in traverse_obj(play_info, ('accept_description', ..., {str})):
|
||||
self.report_warning(
|
||||
f'This is a supporter-only video, only the preview will be extracted: {msg}',
|
||||
video_id=video_id)
|
||||
|
||||
if not traverse_obj(play_info, 'dash'):
|
||||
# we only have legacy formats and need additional work
|
||||
has_qn = lambda x: x in traverse_obj(formats, (..., 'quality'))
|
||||
for qn in traverse_obj(play_info, ('accept_quality', lambda _, v: not has_qn(v), {int})):
|
||||
formats.extend(traverse_obj(
|
||||
self.extract_formats(self._download_playinfo(video_id, cid, headers=headers, query={'qn': qn})),
|
||||
lambda _, v: not has_qn(v['quality'])))
|
||||
self._check_missing_formats(play_info, formats)
|
||||
flv_formats = traverse_obj(formats, lambda _, v: v['fragments'])
|
||||
if flv_formats and len(flv_formats) < len(formats):
|
||||
# Flv and mp4 are incompatible due to `multi_video` workaround, so drop one
|
||||
if not self._configuration_arg('prefer_multi_flv'):
|
||||
dropped_fmts = ', '.join(
|
||||
f'{f.get("format_note")} ({f.get("format_id")})' for f in flv_formats)
|
||||
formats = traverse_obj(formats, lambda _, v: not v.get('fragments'))
|
||||
if dropped_fmts:
|
||||
self.to_screen(
|
||||
f'Dropping incompatible flv format(s) {dropped_fmts} since mp4 is available. '
|
||||
'To extract flv, pass --extractor-args "bilibili:prefer_multi_flv"')
|
||||
else:
|
||||
formats = traverse_obj(
|
||||
# XXX: Filtering by extractor-arg is for testing purposes
|
||||
formats, lambda _, v: v['quality'] == int(self._configuration_arg('prefer_multi_flv')[0]),
|
||||
) or [max(flv_formats, key=lambda x: x['quality'])]
|
||||
|
||||
if traverse_obj(formats, (0, 'fragments')):
|
||||
# We have flv formats, which are individual short videos with their own timestamps and metainfo
|
||||
# Binary concatenation corrupts their timestamps, so we need a `multi_video` workaround
|
||||
return {
|
||||
**metainfo,
|
||||
'_type': 'multi_video',
|
||||
'entries': [{
|
||||
'id': f'{metainfo["id"]}_{idx}',
|
||||
'title': metainfo['title'],
|
||||
'http_headers': metainfo['http_headers'],
|
||||
'formats': [{
|
||||
**fragment,
|
||||
'format_id': formats[0].get('format_id'),
|
||||
}],
|
||||
'subtitles': self.extract_subtitles(video_id, cid) if idx == 0 else None,
|
||||
'__post_extractor': self.extract_comments(aid) if idx == 0 else None,
|
||||
} for idx, fragment in enumerate(formats[0]['fragments'])],
|
||||
'duration': float_or_none(play_info.get('timelength'), scale=1000),
|
||||
}
|
||||
|
||||
return {
|
||||
**metainfo,
|
||||
'formats': formats,
|
||||
'duration': float_or_none(play_info.get('timelength'), scale=1000),
|
||||
'chapters': self._get_chapters(aid, cid),
|
||||
'subtitles': self.extract_subtitles(video_id, cid),
|
||||
'__post_extractor': self.extract_comments(aid),
|
||||
}
|
||||
|
||||
|
||||
class BiliBiliBangumiIE(BilibiliBaseIE):
|
||||
@@ -860,10 +867,16 @@ class BiliBiliBangumiIE(BilibiliBaseIE):
|
||||
self.raise_login_required('This video is for premium members only')
|
||||
|
||||
headers['Referer'] = url
|
||||
play_info = self._download_json(
|
||||
'https://api.bilibili.com/pgc/player/web/v2/playurl', episode_id,
|
||||
'Extracting episode', query={'fnval': '4048', 'ep_id': episode_id},
|
||||
headers=headers)
|
||||
|
||||
play_info = (
|
||||
self._search_json(
|
||||
r'playurlSSRData\s*=', webpage, 'embedded page info', episode_id,
|
||||
end_pattern='\n', default=None)
|
||||
or self._download_json(
|
||||
'https://api.bilibili.com/pgc/player/web/v2/playurl', episode_id,
|
||||
'Extracting episode', query={'fnval': 12240, 'ep_id': episode_id},
|
||||
headers=headers))
|
||||
|
||||
premium_only = play_info.get('code') == -10403
|
||||
play_info = traverse_obj(play_info, ('result', 'video_info', {dict})) or {}
|
||||
|
||||
|
||||
@@ -5,11 +5,12 @@ from ..utils import (
|
||||
ExtractorError,
|
||||
lowercase_escape,
|
||||
url_or_none,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
|
||||
class ChaturbateIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:[^/]+\.)?chaturbate\.com/(?:fullvideo/?\?.*?\bb=)?(?P<id>[^/?&#]+)'
|
||||
_VALID_URL = r'https?://(?:[^/]+\.)?chaturbate\.(?P<tld>com|eu|global)/(?:fullvideo/?\?.*?\bb=)?(?P<id>[^/?&#]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.chaturbate.com/siswet19/',
|
||||
'info_dict': {
|
||||
@@ -29,16 +30,58 @@ class ChaturbateIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://en.chaturbate.com/siswet19/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://chaturbate.eu/siswet19/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://chaturbate.eu/fullvideo/?b=caylin',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://chaturbate.global/siswet19/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_ROOM_OFFLINE = 'Room is currently offline'
|
||||
_ERROR_MAP = {
|
||||
'offline': 'Room is currently offline',
|
||||
'private': 'Room is currently in a private show',
|
||||
'away': 'Performer is currently away',
|
||||
'password protected': 'Room is password protected',
|
||||
'hidden': 'Hidden session in progress',
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
def _extract_from_api(self, video_id, tld):
|
||||
response = self._download_json(
|
||||
f'https://chaturbate.{tld}/get_edge_hls_url_ajax/', video_id,
|
||||
data=urlencode_postdata({'room_slug': video_id}),
|
||||
headers={
|
||||
**self.geo_verification_headers(),
|
||||
'X-Requested-With': 'XMLHttpRequest',
|
||||
'Accept': 'application/json',
|
||||
}, fatal=False, impersonate=True) or {}
|
||||
|
||||
m3u8_url = response.get('url')
|
||||
if not m3u8_url:
|
||||
status = response.get('room_status')
|
||||
if error := self._ERROR_MAP.get(status):
|
||||
raise ExtractorError(error, expected=True)
|
||||
if status == 'public':
|
||||
self.raise_geo_restricted()
|
||||
self.report_warning(f'Got status "{status}" from API; falling back to webpage extraction')
|
||||
return None
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_id,
|
||||
'thumbnail': f'https://roomimg.stream.highwebmedia.com/ri/{video_id}.jpg',
|
||||
'is_live': True,
|
||||
'age_limit': 18,
|
||||
'formats': self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', live=True),
|
||||
}
|
||||
|
||||
def _extract_from_html(self, video_id, tld):
|
||||
webpage = self._download_webpage(
|
||||
f'https://chaturbate.com/{video_id}/', video_id,
|
||||
headers=self.geo_verification_headers())
|
||||
f'https://chaturbate.{tld}/{video_id}/', video_id,
|
||||
headers=self.geo_verification_headers(), impersonate=True)
|
||||
|
||||
found_m3u8_urls = []
|
||||
|
||||
@@ -76,8 +119,8 @@ class ChaturbateIE(InfoExtractor):
|
||||
webpage, 'error', group='error', default=None)
|
||||
if not error:
|
||||
if any(p in webpage for p in (
|
||||
self._ROOM_OFFLINE, 'offline_tipping', 'tip_offline')):
|
||||
error = self._ROOM_OFFLINE
|
||||
self._ERROR_MAP['offline'], 'offline_tipping', 'tip_offline')):
|
||||
error = self._ERROR_MAP['offline']
|
||||
if error:
|
||||
raise ExtractorError(error, expected=True)
|
||||
raise ExtractorError('Unable to find stream URL')
|
||||
@@ -104,3 +147,7 @@ class ChaturbateIE(InfoExtractor):
|
||||
'is_live': True,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, tld = self._match_valid_url(url).group('id', 'tld')
|
||||
return self._extract_from_api(video_id, tld) or self._extract_from_html(video_id, tld)
|
||||
|
||||
@@ -8,7 +8,7 @@ class CloudflareStreamIE(InfoExtractor):
|
||||
_DOMAIN_RE = r'(?:cloudflarestream\.com|(?:videodelivery|bytehighway)\.net)'
|
||||
_EMBED_RE = rf'(?:embed\.|{_SUBDOMAIN_RE}){_DOMAIN_RE}/embed/[^/?#]+\.js\?(?:[^#]+&)?video='
|
||||
_ID_RE = r'[\da-f]{32}|eyJ[\w-]+\.[\w-]+\.[\w-]+'
|
||||
_VALID_URL = rf'https?://(?:{_SUBDOMAIN_RE}{_DOMAIN_RE}/|{_EMBED_RE})(?P<id>{_ID_RE})'
|
||||
_VALID_URL = rf'https?://(?:{_SUBDOMAIN_RE}(?P<domain>{_DOMAIN_RE})/|{_EMBED_RE})(?P<id>{_ID_RE})'
|
||||
_EMBED_REGEX = [
|
||||
rf'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//{_EMBED_RE}(?:{_ID_RE})(?:(?!\1).)*)\1',
|
||||
rf'<iframe[^>]+\bsrc=["\'](?P<url>https?://{_SUBDOMAIN_RE}{_DOMAIN_RE}/[\da-f]{{32}})',
|
||||
@@ -19,7 +19,7 @@ class CloudflareStreamIE(InfoExtractor):
|
||||
'id': '31c9291ab41fac05471db4e73aa11717',
|
||||
'ext': 'mp4',
|
||||
'title': '31c9291ab41fac05471db4e73aa11717',
|
||||
'thumbnail': 'https://videodelivery.net/31c9291ab41fac05471db4e73aa11717/thumbnails/thumbnail.jpg',
|
||||
'thumbnail': 'https://cloudflarestream.com/31c9291ab41fac05471db4e73aa11717/thumbnails/thumbnail.jpg',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
@@ -30,7 +30,7 @@ class CloudflareStreamIE(InfoExtractor):
|
||||
'id': '0e8e040aec776862e1d632a699edf59e',
|
||||
'ext': 'mp4',
|
||||
'title': '0e8e040aec776862e1d632a699edf59e',
|
||||
'thumbnail': 'https://videodelivery.net/0e8e040aec776862e1d632a699edf59e/thumbnails/thumbnail.jpg',
|
||||
'thumbnail': 'https://cloudflarestream.com/0e8e040aec776862e1d632a699edf59e/thumbnails/thumbnail.jpg',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://watch.cloudflarestream.com/9df17203414fd1db3e3ed74abbe936c1',
|
||||
@@ -54,7 +54,7 @@ class CloudflareStreamIE(InfoExtractor):
|
||||
'id': 'eaef9dea5159cf968be84241b5cedfe7',
|
||||
'ext': 'mp4',
|
||||
'title': 'eaef9dea5159cf968be84241b5cedfe7',
|
||||
'thumbnail': 'https://videodelivery.net/eaef9dea5159cf968be84241b5cedfe7/thumbnails/thumbnail.jpg',
|
||||
'thumbnail': 'https://cloudflarestream.com/eaef9dea5159cf968be84241b5cedfe7/thumbnails/thumbnail.jpg',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
@@ -62,8 +62,9 @@ class CloudflareStreamIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
domain = 'bytehighway.net' if 'bytehighway.net/' in url else 'videodelivery.net'
|
||||
video_id, domain = self._match_valid_url(url).group('id', 'domain')
|
||||
if domain != 'bytehighway.net':
|
||||
domain = 'cloudflarestream.com'
|
||||
base_url = f'https://{domain}/{video_id}/'
|
||||
if '.' in video_id:
|
||||
video_id = self._parse_json(base64.urlsafe_b64decode(
|
||||
|
||||
@@ -25,7 +25,6 @@ import xml.etree.ElementTree
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_expanduser,
|
||||
compat_os_name,
|
||||
urllib_req_to_req,
|
||||
)
|
||||
from ..cookies import LenientSimpleCookie
|
||||
@@ -279,6 +278,7 @@ class InfoExtractor:
|
||||
thumbnails: A list of dictionaries, with the following entries:
|
||||
* "id" (optional, string) - Thumbnail format ID
|
||||
* "url"
|
||||
* "ext" (optional, string) - actual image extension if not given in URL
|
||||
* "preference" (optional, int) - quality of the image
|
||||
* "width" (optional, int)
|
||||
* "height" (optional, int)
|
||||
@@ -1028,7 +1028,7 @@ class InfoExtractor:
|
||||
filename = sanitize_filename(f'{basen}.dump', restricted=True)
|
||||
# Working around MAX_PATH limitation on Windows (see
|
||||
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
|
||||
if compat_os_name == 'nt':
|
||||
if os.name == 'nt':
|
||||
absfilepath = os.path.abspath(filename)
|
||||
if len(absfilepath) > 259:
|
||||
filename = fR'\\?\{absfilepath}'
|
||||
@@ -1854,12 +1854,26 @@ class InfoExtractor:
|
||||
|
||||
@staticmethod
|
||||
def _remove_duplicate_formats(formats):
|
||||
format_urls = set()
|
||||
seen_urls = set()
|
||||
seen_fragment_urls = set()
|
||||
unique_formats = []
|
||||
for f in formats:
|
||||
if f['url'] not in format_urls:
|
||||
format_urls.add(f['url'])
|
||||
fragments = f.get('fragments')
|
||||
if callable(fragments):
|
||||
unique_formats.append(f)
|
||||
|
||||
elif fragments:
|
||||
fragment_urls = frozenset(
|
||||
fragment.get('url') or urljoin(f['fragment_base_url'], fragment['path'])
|
||||
for fragment in fragments)
|
||||
if fragment_urls not in seen_fragment_urls:
|
||||
seen_fragment_urls.add(fragment_urls)
|
||||
unique_formats.append(f)
|
||||
|
||||
elif f['url'] not in seen_urls:
|
||||
seen_urls.add(f['url'])
|
||||
unique_formats.append(f)
|
||||
|
||||
formats[:] = unique_formats
|
||||
|
||||
def _is_valid_url(self, url, video_id, item='video', headers={}):
|
||||
@@ -3781,7 +3795,7 @@ class InfoExtractor:
|
||||
""" Merge subtitle dictionaries, language by language. """
|
||||
if target is None:
|
||||
target = {}
|
||||
for d in dicts:
|
||||
for d in filter(None, dicts):
|
||||
for lang, subs in d.items():
|
||||
target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs)
|
||||
return target
|
||||
@@ -3803,7 +3817,7 @@ class InfoExtractor:
|
||||
def mark_watched(self, *args, **kwargs):
|
||||
if not self.get_param('mark_watched', False):
|
||||
return
|
||||
if self.supports_login() and self._get_login_info()[0] is not None or self._cookies_passed:
|
||||
if (self.supports_login() and self._get_login_info()[0] is not None) or self._cookies_passed:
|
||||
self._mark_watched(*args, **kwargs)
|
||||
|
||||
def _mark_watched(self, *args, **kwargs):
|
||||
|
||||
@@ -1,14 +1,27 @@
|
||||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import orderedSet
|
||||
from .ninecninemedia import NineCNineMediaIE
|
||||
from ..utils import extract_attributes, orderedSet
|
||||
from ..utils.traversal import find_element, traverse_obj
|
||||
|
||||
|
||||
class CTVNewsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:.+?\.)?ctvnews\.ca/(?:video\?(?:clip|playlist|bin)Id=|.*?)(?P<id>[0-9.]+)'
|
||||
_BASE_REGEX = r'https?://(?:[^.]+\.)?ctvnews\.ca/'
|
||||
_VIDEO_ID_RE = r'(?P<id>\d{5,})'
|
||||
_PLAYLIST_ID_RE = r'(?P<id>\d\.\d{5,})'
|
||||
_VALID_URL = [
|
||||
rf'{_BASE_REGEX}video/c{_VIDEO_ID_RE}',
|
||||
rf'{_BASE_REGEX}video(?:-gallery)?/?\?clipId={_VIDEO_ID_RE}',
|
||||
rf'{_BASE_REGEX}video/?\?(?:playlist|bin)Id={_PLAYLIST_ID_RE}',
|
||||
rf'{_BASE_REGEX}(?!video/)[^?#]*?{_PLAYLIST_ID_RE}/?(?:$|[?#])',
|
||||
rf'{_BASE_REGEX}(?!video/)[^?#]+\?binId={_PLAYLIST_ID_RE}',
|
||||
]
|
||||
_TESTS = [{
|
||||
'url': 'http://www.ctvnews.ca/video?clipId=901995',
|
||||
'md5': '9b8624ba66351a23e0b6e1391971f9af',
|
||||
'md5': 'b608f466c7fa24b9666c6439d766ab7e',
|
||||
'info_dict': {
|
||||
'id': '901995',
|
||||
'ext': 'flv',
|
||||
@@ -16,6 +29,33 @@ class CTVNewsIE(InfoExtractor):
|
||||
'description': 'md5:958dd3b4f5bbbf0ed4d045c790d89285',
|
||||
'timestamp': 1467286284,
|
||||
'upload_date': '20160630',
|
||||
'categories': [],
|
||||
'season_number': 0,
|
||||
'season': 'Season 0',
|
||||
'tags': [],
|
||||
'series': 'CTV News National | Archive | Stories 2',
|
||||
'season_id': '57981',
|
||||
'thumbnail': r're:https?://.*\.jpg$',
|
||||
'duration': 764.631,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://barrie.ctvnews.ca/video/c3030933-here_s-what_s-making-news-for-nov--15?binId=1272429',
|
||||
'md5': '8b8c2b33c5c1803e3c26bc74ff8694d5',
|
||||
'info_dict': {
|
||||
'id': '3030933',
|
||||
'ext': 'flv',
|
||||
'title': 'Here’s what’s making news for Nov. 15',
|
||||
'description': 'Here are the top stories we’re working on for CTV News at 11 for Nov. 15',
|
||||
'thumbnail': 'http://images2.9c9media.com/image_asset/2021_2_22_a602e68e-1514-410e-a67a-e1f7cccbacab_png_2000x1125.jpg',
|
||||
'season_id': '58104',
|
||||
'season_number': 0,
|
||||
'tags': [],
|
||||
'season': 'Season 0',
|
||||
'categories': [],
|
||||
'series': 'CTV News Barrie',
|
||||
'upload_date': '20241116',
|
||||
'duration': 42.943,
|
||||
'timestamp': 1731722452,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.ctvnews.ca/video?playlistId=1.2966224',
|
||||
@@ -31,6 +71,72 @@ class CTVNewsIE(InfoExtractor):
|
||||
'id': '1.2876780',
|
||||
},
|
||||
'playlist_mincount': 100,
|
||||
}, {
|
||||
'url': 'https://www.ctvnews.ca/it-s-been-23-years-since-toronto-called-in-the-army-after-a-major-snowstorm-1.5736957',
|
||||
'info_dict':
|
||||
{
|
||||
'id': '1.5736957',
|
||||
},
|
||||
'playlist_mincount': 6,
|
||||
}, {
|
||||
'url': 'https://www.ctvnews.ca/business/respondents-to-bank-of-canada-questionnaire-largely-oppose-creating-a-digital-loonie-1.6665797',
|
||||
'md5': '24bc4b88cdc17d8c3fc01dfc228ab72c',
|
||||
'info_dict': {
|
||||
'id': '2695026',
|
||||
'ext': 'flv',
|
||||
'season_id': '89852',
|
||||
'series': 'From CTV News Channel',
|
||||
'description': 'md5:796a985a23cacc7e1e2fafefd94afd0a',
|
||||
'season': '2023',
|
||||
'title': 'Bank of Canada asks public about digital currency',
|
||||
'categories': [],
|
||||
'tags': [],
|
||||
'upload_date': '20230526',
|
||||
'season_number': 2023,
|
||||
'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg',
|
||||
'timestamp': 1685105157,
|
||||
'duration': 253.553,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://stox.ctvnews.ca/video-gallery?clipId=582589',
|
||||
'md5': '135cc592df607d29dddc931f1b756ae2',
|
||||
'info_dict': {
|
||||
'id': '582589',
|
||||
'ext': 'flv',
|
||||
'categories': [],
|
||||
'timestamp': 1427906183,
|
||||
'season_number': 0,
|
||||
'duration': 125.559,
|
||||
'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg',
|
||||
'series': 'CTV News Stox',
|
||||
'description': 'CTV original footage of the rise and fall of the Berlin Wall.',
|
||||
'title': 'Berlin Wall',
|
||||
'season_id': '63817',
|
||||
'season': 'Season 0',
|
||||
'tags': [],
|
||||
'upload_date': '20150401',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://ottawa.ctvnews.ca/features/regional-contact/regional-contact-archive?binId=1.1164587#3023759',
|
||||
'md5': 'a14c0603557decc6531260791c23cc5e',
|
||||
'info_dict': {
|
||||
'id': '3023759',
|
||||
'ext': 'flv',
|
||||
'season_number': 2024,
|
||||
'timestamp': 1731798000,
|
||||
'season': '2024',
|
||||
'episode': 'Episode 125',
|
||||
'description': 'CTV News Ottawa at Six',
|
||||
'duration': 2712.076,
|
||||
'episode_number': 125,
|
||||
'upload_date': '20241116',
|
||||
'title': 'CTV News Ottawa at Six for Saturday, November 16, 2024',
|
||||
'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg',
|
||||
'categories': [],
|
||||
'tags': [],
|
||||
'series': 'CTV News Ottawa at Six',
|
||||
'season_id': '92667',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.ctvnews.ca/1.810401',
|
||||
'only_matching': True,
|
||||
@@ -42,29 +148,35 @@ class CTVNewsIE(InfoExtractor):
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _ninecninemedia_url_result(self, clip_id):
|
||||
return self.url_result(f'9c9media:ctvnews_web:{clip_id}', NineCNineMediaIE, clip_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
page_id = self._match_id(url)
|
||||
|
||||
def ninecninemedia_url_result(clip_id):
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'id': clip_id,
|
||||
'url': f'9c9media:ctvnews_web:{clip_id}',
|
||||
'ie_key': 'NineCNineMedia',
|
||||
}
|
||||
if mobj := re.fullmatch(self._VIDEO_ID_RE, urllib.parse.urlparse(url).fragment):
|
||||
page_id = mobj.group('id')
|
||||
|
||||
if page_id.isdigit():
|
||||
return ninecninemedia_url_result(page_id)
|
||||
else:
|
||||
webpage = self._download_webpage(f'http://www.ctvnews.ca/{page_id}', page_id, query={
|
||||
'ot': 'example.AjaxPageLayout.ot',
|
||||
'maxItemsPerPage': 1000000,
|
||||
})
|
||||
entries = [ninecninemedia_url_result(clip_id) for clip_id in orderedSet(
|
||||
re.findall(r'clip\.id\s*=\s*(\d+);', webpage))]
|
||||
if not entries:
|
||||
webpage = self._download_webpage(url, page_id)
|
||||
if 'getAuthStates("' in webpage:
|
||||
entries = [ninecninemedia_url_result(clip_id) for clip_id in
|
||||
self._search_regex(r'getAuthStates\("([\d+,]+)"', webpage, 'clip ids').split(',')]
|
||||
return self.playlist_result(entries, page_id)
|
||||
if re.fullmatch(self._VIDEO_ID_RE, page_id):
|
||||
return self._ninecninemedia_url_result(page_id)
|
||||
|
||||
webpage = self._download_webpage(f'https://www.ctvnews.ca/{page_id}', page_id, query={
|
||||
'ot': 'example.AjaxPageLayout.ot',
|
||||
'maxItemsPerPage': 1000000,
|
||||
})
|
||||
entries = [self._ninecninemedia_url_result(clip_id)
|
||||
for clip_id in orderedSet(re.findall(r'clip\.id\s*=\s*(\d+);', webpage))]
|
||||
if not entries:
|
||||
webpage = self._download_webpage(url, page_id)
|
||||
if 'getAuthStates("' in webpage:
|
||||
entries = [self._ninecninemedia_url_result(clip_id) for clip_id in
|
||||
self._search_regex(r'getAuthStates\("([\d+,]+)"', webpage, 'clip ids').split(',')]
|
||||
else:
|
||||
entries = [
|
||||
self._ninecninemedia_url_result(clip_id) for clip_id in
|
||||
traverse_obj(webpage, (
|
||||
{find_element(tag='jasper-player-container', html=True)},
|
||||
{extract_attributes}, 'axis-ids', {json.loads}, ..., 'axisId', {str}))
|
||||
]
|
||||
|
||||
return self.playlist_result(entries, page_id)
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..networking import HEADRequest
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
@@ -31,9 +28,6 @@ class CultureUnpluggedIE(InfoExtractor):
|
||||
video_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id') or video_id
|
||||
|
||||
# request setClientTimezone.php to get PHPSESSID cookie which is need to get valid json data in the next request
|
||||
self._request_webpage(HEADRequest(
|
||||
'http://www.cultureunplugged.com/setClientTimezone.php?timeOffset=%d' % -(time.timezone / 3600)), display_id)
|
||||
movie_data = self._download_json(
|
||||
f'http://www.cultureunplugged.com/movie-data/cu-{video_id}.json', display_id)
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import functools
|
||||
import hashlib
|
||||
import re
|
||||
import time
|
||||
@@ -51,6 +52,15 @@ class DacastVODIE(DacastBaseIE):
|
||||
'thumbnail': 'https://universe-files.dacast.com/26137208-5858-65c1-5e9a-9d6b6bd2b6c2',
|
||||
},
|
||||
'params': {'skip_download': 'm3u8'},
|
||||
}, { # /uspaes/ in hls_url
|
||||
'url': 'https://iframe.dacast.com/vod/f9823fc6-faba-b98f-0d00-4a7b50a58c5b/348c5c84-b6af-4859-bb9d-1d01009c795b',
|
||||
'info_dict': {
|
||||
'id': '348c5c84-b6af-4859-bb9d-1d01009c795b',
|
||||
'ext': 'mp4',
|
||||
'title': 'pl1-edyta-rubas-211124.mp4',
|
||||
'uploader_id': 'f9823fc6-faba-b98f-0d00-4a7b50a58c5b',
|
||||
'thumbnail': 'https://universe-files.dacast.com/4d0bd042-a536-752d-fc34-ad2fa44bbcbb.png',
|
||||
},
|
||||
}]
|
||||
_WEBPAGE_TESTS = [{
|
||||
'url': 'https://www.dacast.com/support/knowledgebase/how-can-i-embed-a-video-on-my-website/',
|
||||
@@ -74,6 +84,15 @@ class DacastVODIE(DacastBaseIE):
|
||||
'params': {'skip_download': 'm3u8'},
|
||||
}]
|
||||
|
||||
@functools.cached_property
|
||||
def _usp_signing_secret(self):
|
||||
player_js = self._download_webpage(
|
||||
'https://player.dacast.com/js/player.js', None, 'Downloading player JS')
|
||||
# Rotates every so often, but hardcode a fallback in case of JS change/breakage before rotation
|
||||
return self._search_regex(
|
||||
r'\bUSP_SIGNING_SECRET\s*=\s*(["\'])(?P<secret>(?:(?!\1).)+)', player_js,
|
||||
'usp signing secret', group='secret', fatal=False) or 'odnInCGqhvtyRTtIiddxtuRtawYYICZP'
|
||||
|
||||
def _real_extract(self, url):
|
||||
user_id, video_id = self._match_valid_url(url).group('user_id', 'id')
|
||||
query = {'contentId': f'{user_id}-vod-{video_id}', 'provider': 'universe'}
|
||||
@@ -94,10 +113,10 @@ class DacastVODIE(DacastBaseIE):
|
||||
if 'DRM_EXT' in hls_url:
|
||||
self.report_drm(video_id)
|
||||
elif '/uspaes/' in hls_url:
|
||||
# From https://player.dacast.com/js/player.js
|
||||
# Ref: https://player.dacast.com/js/player.js
|
||||
ts = int(time.time())
|
||||
signature = hashlib.sha1(
|
||||
f'{10413792000 - ts}{ts}YfaKtquEEpDeusCKbvYszIEZnWmBcSvw').digest().hex()
|
||||
f'{10413792000 - ts}{ts}{self._usp_signing_secret}'.encode()).digest().hex()
|
||||
hls_aes['uri'] = f'https://keys.dacast.com/uspaes/{video_id}.key?s={signature}&ts={ts}'
|
||||
|
||||
for retry in self.RetryManager():
|
||||
|
||||
@@ -261,6 +261,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
'tags': [],
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'thumbnail': r're:https://\w+.dmcdn.net/v/WnEY61cmvMxt2Fi6d/x1080',
|
||||
},
|
||||
}, {
|
||||
# https://geo.dailymotion.com/player/xf7zn.html?playlist=x7wdsj
|
||||
@@ -288,6 +289,25 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
'description': 'À bord du « véloto », l’alternative à la voiture pour la campagne',
|
||||
'tags': ['biclou', 'vélo', 'véloto', 'campagne', 'voiture', 'environnement', 'véhicules intermédiaires'],
|
||||
},
|
||||
}, {
|
||||
# https://geo.dailymotion.com/player/xry80.html?video=x8vu47w
|
||||
'url': 'https://www.metatube.com/en/videos/546765/This-frogs-decorates-Christmas-tree/',
|
||||
'info_dict': {
|
||||
'id': 'x8vu47w',
|
||||
'ext': 'mp4',
|
||||
'like_count': int,
|
||||
'uploader': 'Metatube',
|
||||
'thumbnail': r're:https://\w+.dmcdn.net/v/W1G_S1coGSFTfkTeR/x1080',
|
||||
'upload_date': '20240326',
|
||||
'view_count': int,
|
||||
'timestamp': 1711496732,
|
||||
'age_limit': 0,
|
||||
'uploader_id': 'x2xpy74',
|
||||
'title': 'Está lindas ranitas ponen su arbolito',
|
||||
'duration': 28,
|
||||
'description': 'Que lindura',
|
||||
'tags': [],
|
||||
},
|
||||
}]
|
||||
_GEO_BYPASS = False
|
||||
_COMMON_MEDIA_FIELDS = '''description
|
||||
@@ -302,7 +322,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
yield from super()._extract_embed_urls(url, webpage)
|
||||
for mobj in re.finditer(
|
||||
r'(?s)DM\.player\([^,]+,\s*{.*?video[\'"]?\s*:\s*["\']?(?P<id>[0-9a-zA-Z]+).+?}\s*\);', webpage):
|
||||
yield from 'https://www.dailymotion.com/embed/video/' + mobj.group('id')
|
||||
yield 'https://www.dailymotion.com/embed/video/' + mobj.group('id')
|
||||
for mobj in re.finditer(
|
||||
r'(?s)<script [^>]*\bsrc=(["\'])(?:https?:)?//[\w-]+\.dailymotion\.com/player/(?:(?!\1).)+\1[^>]*>', webpage):
|
||||
attrs = extract_attributes(mobj.group(0))
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..networking.exceptions import HTTPError
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
jwt_decode_hs256,
|
||||
parse_codecs,
|
||||
try_get,
|
||||
url_or_none,
|
||||
@@ -13,9 +16,6 @@ from ..utils.traversal import traverse_obj
|
||||
class DigitalConcertHallIE(InfoExtractor):
|
||||
IE_DESC = 'DigitalConcertHall extractor'
|
||||
_VALID_URL = r'https?://(?:www\.)?digitalconcerthall\.com/(?P<language>[a-z]+)/(?P<type>film|concert|work)/(?P<id>[0-9]+)-?(?P<part>[0-9]+)?'
|
||||
_OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'
|
||||
_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.5 Safari/605.1.15'
|
||||
_ACCESS_TOKEN = None
|
||||
_NETRC_MACHINE = 'digitalconcerthall'
|
||||
_TESTS = [{
|
||||
'note': 'Playlist with only one video',
|
||||
@@ -69,59 +69,157 @@ class DigitalConcertHallIE(InfoExtractor):
|
||||
'params': {'skip_download': 'm3u8'},
|
||||
'playlist_count': 1,
|
||||
}]
|
||||
_LOGIN_HINT = ('Use --username token --password ACCESS_TOKEN where ACCESS_TOKEN '
|
||||
'is the "access_token_production" from your browser local storage')
|
||||
_REFRESH_HINT = 'or else use a "refresh_token" with --username refresh --password REFRESH_TOKEN'
|
||||
_OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token'
|
||||
_CLIENT_ID = 'dch.webapp'
|
||||
_CLIENT_SECRET = '2ySLN+2Fwb'
|
||||
_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.5 Safari/605.1.15'
|
||||
_OAUTH_HEADERS = {
|
||||
'Accept': 'application/json',
|
||||
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
|
||||
'Origin': 'https://www.digitalconcerthall.com',
|
||||
'Referer': 'https://www.digitalconcerthall.com/',
|
||||
'User-Agent': _USER_AGENT,
|
||||
}
|
||||
_access_token = None
|
||||
_access_token_expiry = 0
|
||||
_refresh_token = None
|
||||
|
||||
def _perform_login(self, username, password):
|
||||
login_token = self._download_json(
|
||||
self._OAUTH_URL,
|
||||
None, 'Obtaining token', errnote='Unable to obtain token', data=urlencode_postdata({
|
||||
@property
|
||||
def _access_token_is_expired(self):
|
||||
return self._access_token_expiry - 30 <= int(time.time())
|
||||
|
||||
def _set_access_token(self, value):
|
||||
self._access_token = value
|
||||
self._access_token_expiry = traverse_obj(value, ({jwt_decode_hs256}, 'exp', {int})) or 0
|
||||
|
||||
def _cache_tokens(self, /):
|
||||
self.cache.store(self._NETRC_MACHINE, 'tokens', {
|
||||
'access_token': self._access_token,
|
||||
'refresh_token': self._refresh_token,
|
||||
})
|
||||
|
||||
def _fetch_new_tokens(self, invalidate=False):
|
||||
if invalidate:
|
||||
self.report_warning('Access token has been invalidated')
|
||||
self._set_access_token(None)
|
||||
|
||||
if not self._access_token_is_expired:
|
||||
return
|
||||
|
||||
if not self._refresh_token:
|
||||
self._set_access_token(None)
|
||||
self._cache_tokens()
|
||||
raise ExtractorError(
|
||||
'Access token has expired or been invalidated. '
|
||||
'Get a new "access_token_production" value from your browser '
|
||||
f'and try again, {self._REFRESH_HINT}', expected=True)
|
||||
|
||||
# If we only have a refresh token, we need a temporary "initial token" for the refresh flow
|
||||
bearer_token = self._access_token or self._download_json(
|
||||
self._OAUTH_URL, None, 'Obtaining initial token', 'Unable to obtain initial token',
|
||||
data=urlencode_postdata({
|
||||
'affiliate': 'none',
|
||||
'grant_type': 'device',
|
||||
'device_vendor': 'unknown',
|
||||
# device_model 'Safari' gets split streams of 4K/HEVC video and lossless/FLAC audio
|
||||
'device_model': 'unknown' if self._configuration_arg('prefer_combined_hls') else 'Safari',
|
||||
'app_id': 'dch.webapp',
|
||||
# device_model 'Safari' gets split streams of 4K/HEVC video and lossless/FLAC audio,
|
||||
# but this is no longer effective since actual login is not possible anymore
|
||||
'device_model': 'unknown',
|
||||
'app_id': self._CLIENT_ID,
|
||||
'app_distributor': 'berlinphil',
|
||||
'app_version': '1.84.0',
|
||||
'client_secret': '2ySLN+2Fwb',
|
||||
}), headers={
|
||||
'Accept': 'application/json',
|
||||
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
|
||||
'User-Agent': self._USER_AGENT,
|
||||
})['access_token']
|
||||
'app_version': '1.95.0',
|
||||
'client_secret': self._CLIENT_SECRET,
|
||||
}), headers=self._OAUTH_HEADERS)['access_token']
|
||||
|
||||
try:
|
||||
login_response = self._download_json(
|
||||
self._OAUTH_URL,
|
||||
None, note='Logging in', errnote='Unable to login', data=urlencode_postdata({
|
||||
'grant_type': 'password',
|
||||
'username': username,
|
||||
'password': password,
|
||||
response = self._download_json(
|
||||
self._OAUTH_URL, None, 'Refreshing token', 'Unable to refresh token',
|
||||
data=urlencode_postdata({
|
||||
'grant_type': 'refresh_token',
|
||||
'refresh_token': self._refresh_token,
|
||||
'client_id': self._CLIENT_ID,
|
||||
'client_secret': self._CLIENT_SECRET,
|
||||
}), headers={
|
||||
'Accept': 'application/json',
|
||||
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
|
||||
'Referer': 'https://www.digitalconcerthall.com',
|
||||
'Authorization': f'Bearer {login_token}',
|
||||
'User-Agent': self._USER_AGENT,
|
||||
**self._OAUTH_HEADERS,
|
||||
'Authorization': f'Bearer {bearer_token}',
|
||||
})
|
||||
except ExtractorError as error:
|
||||
if isinstance(error.cause, HTTPError) and error.cause.status == 401:
|
||||
raise ExtractorError('Invalid username or password', expected=True)
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||
self._set_access_token(None)
|
||||
self._refresh_token = None
|
||||
self._cache_tokens()
|
||||
raise ExtractorError('Your tokens have been invalidated', expected=True)
|
||||
raise
|
||||
self._ACCESS_TOKEN = login_response['access_token']
|
||||
|
||||
self._set_access_token(response['access_token'])
|
||||
if refresh_token := traverse_obj(response, ('refresh_token', {str})):
|
||||
self.write_debug('New refresh token granted')
|
||||
self._refresh_token = refresh_token
|
||||
self._cache_tokens()
|
||||
|
||||
def _perform_login(self, username, password):
|
||||
self.report_login()
|
||||
|
||||
if username == 'refresh':
|
||||
self._refresh_token = password
|
||||
self._fetch_new_tokens()
|
||||
|
||||
if username == 'token':
|
||||
if not traverse_obj(password, {jwt_decode_hs256}):
|
||||
raise ExtractorError(
|
||||
f'The access token passed to yt-dlp is not valid. {self._LOGIN_HINT}', expected=True)
|
||||
self._set_access_token(password)
|
||||
self._cache_tokens()
|
||||
|
||||
if username in ('refresh', 'token'):
|
||||
if self.get_param('cachedir') is not False:
|
||||
token_type = 'access' if username == 'token' else 'refresh'
|
||||
self.to_screen(f'Your {token_type} token has been cached to disk. To use the cached '
|
||||
'token next time, pass --username cache along with any password')
|
||||
return
|
||||
|
||||
if username != 'cache':
|
||||
raise ExtractorError(
|
||||
'Login with username and password is no longer supported '
|
||||
f'for this site. {self._LOGIN_HINT}, {self._REFRESH_HINT}', expected=True)
|
||||
|
||||
# Try cached access_token
|
||||
cached_tokens = self.cache.load(self._NETRC_MACHINE, 'tokens', default={})
|
||||
self._set_access_token(cached_tokens.get('access_token'))
|
||||
self._refresh_token = cached_tokens.get('refresh_token')
|
||||
if not self._access_token_is_expired:
|
||||
return
|
||||
|
||||
# Try cached refresh_token
|
||||
self._fetch_new_tokens(invalidate=True)
|
||||
|
||||
def _real_initialize(self):
|
||||
if not self._ACCESS_TOKEN:
|
||||
self.raise_login_required(method='password')
|
||||
if not self._access_token:
|
||||
self.raise_login_required(
|
||||
'All content on this site is only available for registered users. '
|
||||
f'{self._LOGIN_HINT}, {self._REFRESH_HINT}', method=None)
|
||||
|
||||
def _entries(self, items, language, type_, **kwargs):
|
||||
for item in items:
|
||||
video_id = item['id']
|
||||
stream_info = self._download_json(
|
||||
self._proto_relative_url(item['_links']['streams']['href']), video_id, headers={
|
||||
'Accept': 'application/json',
|
||||
'Authorization': f'Bearer {self._ACCESS_TOKEN}',
|
||||
'Accept-Language': language,
|
||||
'User-Agent': self._USER_AGENT,
|
||||
})
|
||||
|
||||
for should_retry in (True, False):
|
||||
self._fetch_new_tokens(invalidate=not should_retry)
|
||||
try:
|
||||
stream_info = self._download_json(
|
||||
self._proto_relative_url(item['_links']['streams']['href']), video_id, headers={
|
||||
'Accept': 'application/json',
|
||||
'Authorization': f'Bearer {self._access_token}',
|
||||
'Accept-Language': language,
|
||||
'User-Agent': self._USER_AGENT,
|
||||
})
|
||||
break
|
||||
except ExtractorError as error:
|
||||
if should_retry and isinstance(error.cause, HTTPError) and error.cause.status == 401:
|
||||
continue
|
||||
raise
|
||||
|
||||
formats = []
|
||||
for m3u8_url in traverse_obj(stream_info, ('channel', ..., 'stream', ..., 'url', {url_or_none})):
|
||||
@@ -157,7 +255,6 @@ class DigitalConcertHallIE(InfoExtractor):
|
||||
'Accept': 'application/json',
|
||||
'Accept-Language': language,
|
||||
'User-Agent': self._USER_AGENT,
|
||||
'Authorization': f'Bearer {self._ACCESS_TOKEN}',
|
||||
})
|
||||
videos = [vid_info] if type_ == 'film' else traverse_obj(vid_info, ('_embedded', ..., ...))
|
||||
|
||||
|
||||
@@ -48,32 +48,30 @@ class DropboxIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
fn = urllib.parse.unquote(url_basename(url))
|
||||
title = os.path.splitext(fn)[0]
|
||||
password = self.get_param('videopassword')
|
||||
content_id = None
|
||||
|
||||
for part in self._yield_decoded_parts(webpage):
|
||||
if '/sm/password' in part:
|
||||
webpage = self._download_webpage(
|
||||
update_url('https://www.dropbox.com/sm/password', query=part.partition('?')[2]), video_id)
|
||||
content_id = self._search_regex(r'content_id=([\w.+=/-]+)', part, 'content ID')
|
||||
break
|
||||
|
||||
if (self._og_search_title(webpage, default=None) == 'Dropbox - Password Required'
|
||||
or 'Enter the password for this link' in webpage):
|
||||
if password:
|
||||
response = self._download_json(
|
||||
'https://www.dropbox.com/sm/auth', video_id, 'POSTing video password',
|
||||
headers={'content-type': 'application/x-www-form-urlencoded; charset=UTF-8'},
|
||||
data=urlencode_postdata({
|
||||
'is_xhr': 'true',
|
||||
't': self._get_cookies('https://www.dropbox.com')['t'].value,
|
||||
'content_id': self._search_regex(r'content_id=([\w.+=/-]+)["\']', webpage, 'content id'),
|
||||
'password': password,
|
||||
'url': url,
|
||||
}))
|
||||
|
||||
if response.get('status') != 'authed':
|
||||
raise ExtractorError('Invalid password', expected=True)
|
||||
elif not self._get_cookies('https://dropbox.com').get('sm_auth'):
|
||||
if content_id:
|
||||
password = self.get_param('videopassword')
|
||||
if not password:
|
||||
raise ExtractorError('Password protected video, use --video-password <password>', expected=True)
|
||||
|
||||
response = self._download_json(
|
||||
'https://www.dropbox.com/sm/auth', video_id, 'POSTing video password',
|
||||
data=urlencode_postdata({
|
||||
'is_xhr': 'true',
|
||||
't': self._get_cookies('https://www.dropbox.com')['t'].value,
|
||||
'content_id': content_id,
|
||||
'password': password,
|
||||
'url': update_url(url, scheme='', netloc=''),
|
||||
}))
|
||||
if response.get('status') != 'authed':
|
||||
raise ExtractorError('Invalid password', expected=True)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
formats, subtitles = [], {}
|
||||
|
||||
@@ -5,15 +5,16 @@ from ..utils import (
|
||||
get_element_text_and_html_by_tag,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
parse_qs,
|
||||
str_or_none,
|
||||
try_call,
|
||||
unified_timestamp,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
from ..utils.traversal import traverse_obj, value
|
||||
|
||||
|
||||
class DuoplayIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://duoplay\.ee/(?P<id>\d+)/[\w-]+/?(?:\?(?:[^#]+&)?ep=(?P<ep>\d+))?'
|
||||
_VALID_URL = r'https?://duoplay\.ee/(?P<id>\d+)(?:[/?#]|$)'
|
||||
_TESTS = [{
|
||||
'note': 'Siberi võmm S02E12',
|
||||
'url': 'https://duoplay.ee/4312/siberi-vomm?ep=24',
|
||||
@@ -34,15 +35,16 @@ class DuoplayIE(InfoExtractor):
|
||||
'episode_number': 12,
|
||||
'episode_id': '24',
|
||||
},
|
||||
'skip': 'No video found',
|
||||
}, {
|
||||
'note': 'Empty title',
|
||||
'url': 'https://duoplay.ee/17/uhikarotid?ep=14',
|
||||
'md5': '6aca68be71112314738dd17cced7f8bf',
|
||||
'md5': 'cba9f5dabf2582b224d80ac44fb80e47',
|
||||
'info_dict': {
|
||||
'id': '17_14',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ühikarotid',
|
||||
'thumbnail': r're:https://.+\.jpg(?:\?c=\d+)?$',
|
||||
'title': 'Episode 14',
|
||||
'thumbnail': r're:https?://.+\.jpg',
|
||||
'description': 'md5:4719b418e058c209def41d48b601276e',
|
||||
'upload_date': '20100916',
|
||||
'timestamp': 1284661800,
|
||||
@@ -52,6 +54,8 @@ class DuoplayIE(InfoExtractor):
|
||||
'season_number': 2,
|
||||
'episode_id': '14',
|
||||
'release_year': 2010,
|
||||
'episode': 'Episode 14',
|
||||
'episode_number': 14,
|
||||
},
|
||||
}, {
|
||||
'note': 'Movie without expiry',
|
||||
@@ -68,10 +72,32 @@ class DuoplayIE(InfoExtractor):
|
||||
'timestamp': 1671054000,
|
||||
'release_year': 2018,
|
||||
},
|
||||
'skip': 'No video found',
|
||||
}, {
|
||||
'note': 'Episode url without show name',
|
||||
'url': 'https://duoplay.ee/9644?ep=185',
|
||||
'md5': '63f324b4fe2dbd8194dca16a6d52184a',
|
||||
'info_dict': {
|
||||
'id': '9644_185',
|
||||
'ext': 'mp4',
|
||||
'title': 'Episode 185',
|
||||
'thumbnail': r're:https?://.+\.jpg',
|
||||
'description': 'md5:ed25ba4e9e5d54bc291a4a0cdd241467',
|
||||
'upload_date': '20241120',
|
||||
'timestamp': 1732077000,
|
||||
'episode': 'Episode 63',
|
||||
'episode_id': '185',
|
||||
'episode_number': 63,
|
||||
'season': 'Season 2',
|
||||
'season_number': 2,
|
||||
'series': 'Telehommik',
|
||||
'series_id': '9644',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
telecast_id, episode = self._match_valid_url(url).group('id', 'ep')
|
||||
telecast_id = self._match_id(url)
|
||||
episode = traverse_obj(parse_qs(url), ('ep', 0, {int_or_none}, {str_or_none}))
|
||||
video_id = join_nonempty(telecast_id, episode, delim='_')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_player = try_call(lambda: extract_attributes(
|
||||
@@ -79,25 +105,33 @@ class DuoplayIE(InfoExtractor):
|
||||
if not video_player or not video_player.get('manifest-url'):
|
||||
raise ExtractorError('No video found', expected=True)
|
||||
|
||||
manifest_url = video_player['manifest-url']
|
||||
session_token = self._download_json(
|
||||
'https://sts.postimees.ee/session/register', video_id, 'Registering session',
|
||||
'Unable to register session', headers={
|
||||
'Accept': 'application/json',
|
||||
'X-Original-URI': manifest_url,
|
||||
})['session']
|
||||
|
||||
episode_attr = self._parse_json(video_player.get(':episode') or '', video_id, fatal=False) or {}
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': self._extract_m3u8_formats(video_player['manifest-url'], video_id, 'mp4'),
|
||||
'formats': self._extract_m3u8_formats(manifest_url, video_id, 'mp4', query={'s': session_token}),
|
||||
**traverse_obj(episode_attr, {
|
||||
'title': 'title',
|
||||
'description': 'synopsis',
|
||||
'title': ('title', {str}),
|
||||
'description': ('synopsis', {str}),
|
||||
'thumbnail': ('images', 'original'),
|
||||
'timestamp': ('airtime', {lambda x: unified_timestamp(x + ' +0200')}),
|
||||
'cast': ('cast', {lambda x: x.split(', ')}),
|
||||
'cast': ('cast', filter, {lambda x: x.split(', ')}),
|
||||
'release_year': ('year', {int_or_none}),
|
||||
}),
|
||||
**(traverse_obj(episode_attr, {
|
||||
'title': (None, ('subtitle', ('episode_nr', {lambda x: f'Episode {x}' if x else None}))),
|
||||
'series': 'title',
|
||||
'title': (None, (('subtitle', {str}, filter), {value(f'Episode {episode}' if episode else None)})),
|
||||
'series': ('title', {str}),
|
||||
'series_id': ('telecast_id', {str_or_none}),
|
||||
'season_number': ('season_id', {int_or_none}),
|
||||
'episode': 'subtitle',
|
||||
'episode': ('subtitle', {str}, filter),
|
||||
'episode_number': ('episode_nr', {int_or_none}),
|
||||
'episode_id': ('episode_id', {str_or_none}),
|
||||
}, get_all=False) if episode_attr.get('category') != 'movies' else {}),
|
||||
|
||||
@@ -50,7 +50,7 @@ class FacebookIE(InfoExtractor):
|
||||
[^/]+/videos/(?:[^/]+/)?|
|
||||
[^/]+/posts/|
|
||||
events/(?:[^/]+/)?|
|
||||
groups/[^/]+/(?:permalink|posts)/|
|
||||
groups/[^/]+/(?:permalink|posts)/(?:[\da-f]+/)?|
|
||||
watchparty/
|
||||
)|
|
||||
facebook:
|
||||
@@ -410,6 +410,9 @@ class FacebookIE(InfoExtractor):
|
||||
'uploader': 'Comitato Liberi Pensatori',
|
||||
'uploader_id': '100065709540881',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.facebook.com/groups/1513990329015294/posts/d41d8cd9/2013209885760000/?app=fbl',
|
||||
'only_matching': True,
|
||||
}]
|
||||
_SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)'
|
||||
_api_config = {
|
||||
@@ -563,13 +566,13 @@ class FacebookIE(InfoExtractor):
|
||||
return extract_video_data(try_get(
|
||||
js_data, lambda x: x['jsmods']['instances'], list) or [])
|
||||
|
||||
def extract_dash_manifest(video, formats):
|
||||
def extract_dash_manifest(vid_data, formats, mpd_url=None):
|
||||
dash_manifest = traverse_obj(
|
||||
video, 'dash_manifest', 'playlist', 'dash_manifest_xml_string', expected_type=str)
|
||||
vid_data, 'dash_manifest', 'playlist', 'dash_manifest_xml_string', 'manifest_xml', expected_type=str)
|
||||
if dash_manifest:
|
||||
formats.extend(self._parse_mpd_formats(
|
||||
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
|
||||
mpd_url=url_or_none(video.get('dash_manifest_url'))))
|
||||
mpd_url=url_or_none(vid_data.get('dash_manifest_url')) or mpd_url))
|
||||
|
||||
def process_formats(info):
|
||||
# Downloads with browser's User-Agent are rate limited. Working around
|
||||
@@ -619,9 +622,12 @@ class FacebookIE(InfoExtractor):
|
||||
video = video['creation_story']
|
||||
video['owner'] = traverse_obj(video, ('short_form_video_context', 'video_owner'))
|
||||
video.update(reel_info)
|
||||
fmt_data = traverse_obj(video, ('videoDeliveryLegacyFields', {dict})) or video
|
||||
|
||||
formats = []
|
||||
q = qualities(['sd', 'hd'])
|
||||
|
||||
# Legacy formats extraction
|
||||
fmt_data = traverse_obj(video, ('videoDeliveryLegacyFields', {dict})) or video
|
||||
for key, format_id in (('playable_url', 'sd'), ('playable_url_quality_hd', 'hd'),
|
||||
('playable_url_dash', ''), ('browser_native_hd_url', 'hd'),
|
||||
('browser_native_sd_url', 'sd')):
|
||||
@@ -629,7 +635,7 @@ class FacebookIE(InfoExtractor):
|
||||
if not playable_url:
|
||||
continue
|
||||
if determine_ext(playable_url) == 'mpd':
|
||||
formats.extend(self._extract_mpd_formats(playable_url, video_id))
|
||||
formats.extend(self._extract_mpd_formats(playable_url, video_id, fatal=False))
|
||||
else:
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
@@ -638,6 +644,28 @@ class FacebookIE(InfoExtractor):
|
||||
'url': playable_url,
|
||||
})
|
||||
extract_dash_manifest(fmt_data, formats)
|
||||
|
||||
# New videoDeliveryResponse formats extraction
|
||||
fmt_data = traverse_obj(video, ('videoDeliveryResponseFragment', 'videoDeliveryResponseResult'))
|
||||
mpd_urls = traverse_obj(fmt_data, ('dash_manifest_urls', ..., 'manifest_url', {url_or_none}))
|
||||
dash_manifests = traverse_obj(fmt_data, ('dash_manifests', lambda _, v: v['manifest_xml']))
|
||||
for idx, dash_manifest in enumerate(dash_manifests):
|
||||
extract_dash_manifest(dash_manifest, formats, mpd_url=traverse_obj(mpd_urls, idx))
|
||||
if not dash_manifests:
|
||||
# Only extract from MPD URLs if the manifests are not already provided
|
||||
for mpd_url in mpd_urls:
|
||||
formats.extend(self._extract_mpd_formats(mpd_url, video_id, fatal=False))
|
||||
for prog_fmt in traverse_obj(fmt_data, ('progressive_urls', lambda _, v: v['progressive_url'])):
|
||||
format_id = traverse_obj(prog_fmt, ('metadata', 'quality', {str.lower}))
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
# sd, hd formats w/o resolution info should be deprioritized below DASH
|
||||
'quality': q(format_id) - 3,
|
||||
'url': prog_fmt['progressive_url'],
|
||||
})
|
||||
for m3u8_url in traverse_obj(fmt_data, ('hls_playlist_urls', ..., 'hls_playlist_url', {url_or_none})):
|
||||
formats.extend(self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', fatal=False, m3u8_id='hls'))
|
||||
|
||||
if not formats:
|
||||
# Do not append false positive entry w/o any formats
|
||||
return
|
||||
|
||||
@@ -193,9 +193,9 @@ class FunimationIE(FunimationBaseIE):
|
||||
|
||||
for lang, version, fmt in self._get_experiences(episode):
|
||||
experience_id = str(fmt['experienceId'])
|
||||
if (only_initial_experience and experience_id != initial_experience_id
|
||||
or requested_languages and lang.lower() not in requested_languages
|
||||
or requested_versions and version.lower() not in requested_versions):
|
||||
if ((only_initial_experience and experience_id != initial_experience_id)
|
||||
or (requested_languages and lang.lower() not in requested_languages)
|
||||
or (requested_versions and version.lower() not in requested_versions)):
|
||||
continue
|
||||
thumbnails.append({'url': fmt.get('poster')})
|
||||
duration = max(duration, fmt.get('duration', 0))
|
||||
|
||||
@@ -5,56 +5,63 @@ import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
remove_end,
|
||||
traverse_obj,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class GoPlayIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(www\.)?goplay\.be/video/([^/]+/[^/]+/|)(?P<display_id>[^/#]+)'
|
||||
_VALID_URL = r'https?://(www\.)?goplay\.be/video/([^/?#]+/[^/?#]+/|)(?P<id>[^/#]+)'
|
||||
|
||||
_NETRC_MACHINE = 'goplay'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.goplay.be/video/de-container-cup/de-container-cup-s3/de-container-cup-s3-aflevering-2#autoplay',
|
||||
'url': 'https://www.goplay.be/video/de-slimste-mens-ter-wereld/de-slimste-mens-ter-wereld-s22/de-slimste-mens-ter-wereld-s22-aflevering-1',
|
||||
'info_dict': {
|
||||
'id': '9c4214b8-e55d-4e4b-a446-f015f6c6f811',
|
||||
'id': '2baa4560-87a0-421b-bffc-359914e3c387',
|
||||
'ext': 'mp4',
|
||||
'title': 'S3 - Aflevering 2',
|
||||
'series': 'De Container Cup',
|
||||
'season': 'Season 3',
|
||||
'season_number': 3,
|
||||
'episode': 'Episode 2',
|
||||
'episode_number': 2,
|
||||
'title': 'S22 - Aflevering 1',
|
||||
'description': r're:In aflevering 1 nemen Daan Alferink, Tess Elst en Xander De Rycke .{66}',
|
||||
'series': 'De Slimste Mens ter Wereld',
|
||||
'episode': 'Episode 1',
|
||||
'season_number': 22,
|
||||
'episode_number': 1,
|
||||
'season': 'Season 22',
|
||||
},
|
||||
'params': {'skip_download': True},
|
||||
'skip': 'This video is only available for registered users',
|
||||
}, {
|
||||
'url': 'https://www.goplay.be/video/a-family-for-thr-holidays-s1-aflevering-1#autoplay',
|
||||
'url': 'https://www.goplay.be/video/1917',
|
||||
'info_dict': {
|
||||
'id': '74e3ed07-748c-49e4-85a0-393a93337dbf',
|
||||
'id': '40cac41d-8d29-4ef5-aa11-75047b9f0907',
|
||||
'ext': 'mp4',
|
||||
'title': 'A Family for the Holidays',
|
||||
'title': '1917',
|
||||
'description': r're:Op het hoogtepunt van de Eerste Wereldoorlog krijgen twee jonge .{94}',
|
||||
},
|
||||
'params': {'skip_download': True},
|
||||
'skip': 'This video is only available for registered users',
|
||||
}, {
|
||||
'url': 'https://www.goplay.be/video/de-mol/de-mol-s11/de-mol-s11-aflevering-1#autoplay',
|
||||
'info_dict': {
|
||||
'id': '03eb8f2f-153e-41cb-9805-0d3a29dab656',
|
||||
'id': 'ecb79672-92b9-4cd9-a0d7-e2f0250681ee',
|
||||
'ext': 'mp4',
|
||||
'title': 'S11 - Aflevering 1',
|
||||
'description': r're:Tien kandidaten beginnen aan hun verovering van Amerika en ontmoeten .{102}',
|
||||
'episode': 'Episode 1',
|
||||
'series': 'De Mol',
|
||||
'season_number': 11,
|
||||
'episode_number': 1,
|
||||
'season': 'Season 11',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'params': {'skip_download': True},
|
||||
'skip': 'This video is only available for registered users',
|
||||
}]
|
||||
|
||||
@@ -69,27 +76,42 @@ class GoPlayIE(InfoExtractor):
|
||||
if not self._id_token:
|
||||
raise self.raise_login_required(method='password')
|
||||
|
||||
def _real_extract(self, url):
|
||||
url, display_id = self._match_valid_url(url).group(0, 'display_id')
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_data_json = self._html_search_regex(r'<div\s+data-hero="([^"]+)"', webpage, 'video_data')
|
||||
video_data = self._parse_json(unescapeHTML(video_data_json), display_id).get('data')
|
||||
def _find_json(self, s):
|
||||
return self._search_json(
|
||||
r'\w+\s*:\s*', s, 'next js data', None, contains_pattern=r'\[(?s:.+)\]', default=None)
|
||||
|
||||
movie = video_data.get('movie')
|
||||
if movie:
|
||||
video_id = movie['videoUuid']
|
||||
info_dict = {
|
||||
'title': movie.get('title'),
|
||||
}
|
||||
else:
|
||||
episode = traverse_obj(video_data, ('playlists', ..., 'episodes', lambda _, v: v['pageInfo']['url'] == url), get_all=False)
|
||||
video_id = episode['videoUuid']
|
||||
info_dict = {
|
||||
'title': episode.get('episodeTitle'),
|
||||
'series': traverse_obj(episode, ('program', 'title')),
|
||||
'season_number': episode.get('seasonNumber'),
|
||||
'episode_number': episode.get('episodeNumber'),
|
||||
}
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
nextjs_data = traverse_obj(
|
||||
re.findall(r'<script[^>]*>\s*self\.__next_f\.push\(\s*(\[.+?\])\s*\);?\s*</script>', webpage),
|
||||
(..., {js_to_json}, {json.loads}, ..., {self._find_json}, ...))
|
||||
meta = traverse_obj(nextjs_data, (
|
||||
..., lambda _, v: v['meta']['path'] == urllib.parse.urlparse(url).path, 'meta', any))
|
||||
|
||||
video_id = meta['uuid']
|
||||
info_dict = traverse_obj(meta, {
|
||||
'title': ('title', {str}),
|
||||
'description': ('description', {str.strip}),
|
||||
})
|
||||
|
||||
if traverse_obj(meta, ('program', 'subtype')) != 'movie':
|
||||
for season_data in traverse_obj(nextjs_data, (..., 'children', ..., 'playlists', ...)):
|
||||
episode_data = traverse_obj(
|
||||
season_data, ('videos', lambda _, v: v['videoId'] == video_id, any))
|
||||
if not episode_data:
|
||||
continue
|
||||
|
||||
episode_title = traverse_obj(
|
||||
episode_data, 'contextualTitle', 'episodeTitle', expected_type=str)
|
||||
info_dict.update({
|
||||
'title': episode_title or info_dict.get('title'),
|
||||
'series': remove_end(info_dict.get('title'), f' - {episode_title}'),
|
||||
'season_number': traverse_obj(season_data, ('season', {int_or_none})),
|
||||
'episode_number': traverse_obj(episode_data, ('episodeNumber', {int_or_none})),
|
||||
})
|
||||
break
|
||||
|
||||
api = self._download_json(
|
||||
f'https://api.goplay.be/web/v1/videos/long-form/{video_id}',
|
||||
|
||||
@@ -254,7 +254,7 @@ class InstagramIOSIE(InfoExtractor):
|
||||
|
||||
|
||||
class InstagramIE(InstagramBaseIE):
|
||||
_VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com(?:/[^/]+)?/(?:p|tv|reels?(?!/audio/))/(?P<id>[^/?#&]+))'
|
||||
_VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com(?:/(?!share/)[^/?#]+)?/(?:p|tv|reels?(?!/audio/))/(?P<id>[^/?#&]+))'
|
||||
_EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1']
|
||||
_TESTS = [{
|
||||
'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
|
||||
|
||||
160
yt_dlp/extractor/kenh14.py
Normal file
160
yt_dlp/extractor/kenh14.py
Normal file
@@ -0,0 +1,160 @@
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
extract_attributes,
|
||||
get_element_by_class,
|
||||
get_element_html_by_attribute,
|
||||
get_elements_html_by_class,
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
remove_start,
|
||||
strip_or_none,
|
||||
unescapeHTML,
|
||||
update_url,
|
||||
url_or_none,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class Kenh14VideoIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://video\.kenh14\.vn/(?:video/)?[\w-]+-(?P<id>[0-9]+)\.chn'
|
||||
_TESTS = [{
|
||||
'url': 'https://video.kenh14.vn/video/mo-hop-iphone-14-pro-max-nguon-unbox-therapy-316173.chn',
|
||||
'md5': '1ed67f9c3a1e74acf15db69590cf6210',
|
||||
'info_dict': {
|
||||
'id': '316173',
|
||||
'ext': 'mp4',
|
||||
'title': 'Video mở hộp iPhone 14 Pro Max (Nguồn: Unbox Therapy)',
|
||||
'description': 'Video mở hộp iPhone 14 Pro MaxVideo mở hộp iPhone 14 Pro Max (Nguồn: Unbox Therapy)',
|
||||
'thumbnail': r're:^https?://videothumbs\.mediacdn\.vn/.*\.jpg$',
|
||||
'tags': [],
|
||||
'uploader': 'Unbox Therapy',
|
||||
'upload_date': '20220517',
|
||||
'view_count': int,
|
||||
'duration': 722.86,
|
||||
'timestamp': 1652764468,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://video.kenh14.vn/video-316174.chn',
|
||||
'md5': '2b41877d2afaf4a3f487ceda8e5c7cbd',
|
||||
'info_dict': {
|
||||
'id': '316174',
|
||||
'ext': 'mp4',
|
||||
'title': 'Khoảnh khắc VĐV nằm gục khóc sau chiến thắng: 7 năm trời Việt Nam mới có HCV kiếm chém nữ, chỉ có 8 tháng để khổ luyện trước khi lên sàn đấu',
|
||||
'description': 'md5:de86aa22e143e2b277bce8ec9c6f17dc',
|
||||
'thumbnail': r're:^https?://videothumbs\.mediacdn\.vn/.*\.jpg$',
|
||||
'tags': [],
|
||||
'upload_date': '20220517',
|
||||
'view_count': int,
|
||||
'duration': 70.04,
|
||||
'timestamp': 1652766021,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://video.kenh14.vn/0-344740.chn',
|
||||
'md5': 'b843495d5e728142c8870c09b46df2a9',
|
||||
'info_dict': {
|
||||
'id': '344740',
|
||||
'ext': 'mov',
|
||||
'title': 'Kỳ Duyên đầy căng thẳng trong buổi ra quân đi Miss Universe, nghi thức tuyên thuệ lần đầu xuất hiện gây nhiều tranh cãi',
|
||||
'description': 'md5:2a2dbb4a7397169fb21ee68f09160497',
|
||||
'thumbnail': r're:^https?://kenh14cdn\.com/.*\.jpg$',
|
||||
'tags': ['kỳ duyên', 'Kỳ Duyên tuyên thuệ', 'miss universe'],
|
||||
'uploader': 'Quang Vũ',
|
||||
'upload_date': '20241024',
|
||||
'view_count': int,
|
||||
'duration': 198.88,
|
||||
'timestamp': 1729741590,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
attrs = extract_attributes(get_element_html_by_attribute('type', 'VideoStream', webpage) or '')
|
||||
direct_url = attrs['data-vid']
|
||||
|
||||
metadata = self._download_json(
|
||||
'https://api.kinghub.vn/video/api/v1/detailVideoByGet?FileName={}'.format(
|
||||
remove_start(direct_url, 'kenh14cdn.com/')), video_id, fatal=False)
|
||||
|
||||
formats = [{'url': f'https://{direct_url}', 'format_id': 'http', 'quality': 1}]
|
||||
subtitles = {}
|
||||
video_data = self._download_json(
|
||||
f'https://{direct_url}.json', video_id, note='Downloading video data', fatal=False)
|
||||
if hls_url := traverse_obj(video_data, ('hls', {url_or_none})):
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
hls_url, video_id, m3u8_id='hls', fatal=False)
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
if dash_url := traverse_obj(video_data, ('mpd', {url_or_none})):
|
||||
fmts, subs = self._extract_mpd_formats_and_subtitles(
|
||||
dash_url, video_id, mpd_id='dash', fatal=False)
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
|
||||
return {
|
||||
**traverse_obj(metadata, {
|
||||
'duration': ('duration', {parse_duration}),
|
||||
'uploader': ('author', {strip_or_none}),
|
||||
'timestamp': ('uploadtime', {parse_iso8601(delimiter=' ')}),
|
||||
'view_count': ('views', {int_or_none}),
|
||||
}),
|
||||
'id': video_id,
|
||||
'title': (
|
||||
traverse_obj(metadata, ('title', {strip_or_none}))
|
||||
or clean_html(self._og_search_title(webpage))
|
||||
or clean_html(get_element_by_class('vdbw-title', webpage))),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'description': (
|
||||
clean_html(self._og_search_description(webpage))
|
||||
or clean_html(get_element_by_class('vdbw-sapo', webpage))),
|
||||
'thumbnail': (self._og_search_thumbnail(webpage) or attrs.get('data-thumb')),
|
||||
'tags': traverse_obj(self._html_search_meta('keywords', webpage), (
|
||||
{lambda x: x.split(';')}, ..., filter)),
|
||||
}
|
||||
|
||||
|
||||
class Kenh14PlaylistIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://video\.kenh14\.vn/playlist/[\w-]+-(?P<id>[0-9]+)\.chn'
|
||||
_TESTS = [{
|
||||
'url': 'https://video.kenh14.vn/playlist/tran-tinh-naked-love-mua-2-71.chn',
|
||||
'info_dict': {
|
||||
'id': '71',
|
||||
'title': 'Trần Tình (Naked love) mùa 2',
|
||||
'description': 'md5:e9522339304956dea931722dd72eddb2',
|
||||
'thumbnail': r're:^https?://kenh14cdn\.com/.*\.png$',
|
||||
},
|
||||
'playlist_count': 9,
|
||||
}, {
|
||||
'url': 'https://video.kenh14.vn/playlist/0-72.chn',
|
||||
'info_dict': {
|
||||
'id': '72',
|
||||
'title': 'Lau Lại Đầu Từ',
|
||||
'description': 'Cùng xem xưa và nay có gì khác biệt nhé!',
|
||||
'thumbnail': r're:^https?://kenh14cdn\.com/.*\.png$',
|
||||
},
|
||||
'playlist_count': 6,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
category_detail = get_element_by_class('category-detail', webpage) or ''
|
||||
embed_info = traverse_obj(
|
||||
self._yield_json_ld(webpage, playlist_id),
|
||||
(lambda _, v: v['name'] and v['alternateName'], any)) or {}
|
||||
|
||||
return self.playlist_from_matches(
|
||||
get_elements_html_by_class('video-item', webpage), playlist_id,
|
||||
(clean_html(get_element_by_class('name', category_detail)) or unescapeHTML(embed_info.get('name'))),
|
||||
getter=lambda x: 'https://video.kenh14.vn/video/video-{}.chn'.format(extract_attributes(x)['data-id']),
|
||||
ie=Kenh14VideoIE, playlist_description=(
|
||||
clean_html(get_element_by_class('description', category_detail))
|
||||
or unescapeHTML(embed_info.get('alternateName'))),
|
||||
thumbnail=traverse_obj(
|
||||
self._og_search_thumbnail(webpage),
|
||||
({url_or_none}, {update_url(query=None)})))
|
||||
@@ -1,30 +1,32 @@
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
smuggle_url,
|
||||
traverse_obj,
|
||||
try_call,
|
||||
unsmuggle_url,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
class LiTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?litv\.tv/(?:vod|promo)/[^/]+/(?:content\.do)?\?.*?\b(?:content_)?id=(?P<id>[^&]+)'
|
||||
|
||||
_URL_TEMPLATE = 'https://www.litv.tv/vod/%s/content.do?content_id=%s'
|
||||
|
||||
_VALID_URL = r'https?://(?:www\.)?litv\.tv/(?:[^/?#]+/watch/|vod/[^/?#]+/content\.do\?content_id=)(?P<id>[\w-]+)'
|
||||
_URL_TEMPLATE = 'https://www.litv.tv/%s/watch/%s'
|
||||
_GEO_COUNTRIES = ['TW']
|
||||
_TESTS = [{
|
||||
'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1',
|
||||
'url': 'https://www.litv.tv/drama/watch/VOD00041610',
|
||||
'info_dict': {
|
||||
'id': 'VOD00041606',
|
||||
'title': '花千骨',
|
||||
},
|
||||
'playlist_count': 51, # 50 episodes + 1 trailer
|
||||
}, {
|
||||
'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1',
|
||||
'url': 'https://www.litv.tv/drama/watch/VOD00041610',
|
||||
'md5': 'b90ff1e9f1d8f5cfcd0a44c3e2b34c7a',
|
||||
'info_dict': {
|
||||
'id': 'VOD00041610',
|
||||
@@ -32,16 +34,15 @@ class LiTVIE(InfoExtractor):
|
||||
'title': '花千骨第1集',
|
||||
'thumbnail': r're:https?://.*\.jpg$',
|
||||
'description': '《花千骨》陸劇線上看。十六年前,平靜的村莊內,一名女嬰隨異相出生,途徑此地的蜀山掌門清虛道長算出此女命運非同一般,她體內散發的異香易招惹妖魔。一念慈悲下,他在村莊周邊設下結界阻擋妖魔入侵,讓其年滿十六後去蜀山,並賜名花千骨。',
|
||||
'categories': ['奇幻', '愛情', '中國', '仙俠'],
|
||||
'categories': ['奇幻', '愛情', '仙俠', '古裝'],
|
||||
'episode': 'Episode 1',
|
||||
'episode_number': 1,
|
||||
},
|
||||
'params': {
|
||||
'noplaylist': True,
|
||||
},
|
||||
'skip': 'Georestricted to Taiwan',
|
||||
}, {
|
||||
'url': 'https://www.litv.tv/promo/miyuezhuan/?content_id=VOD00044841&',
|
||||
'url': 'https://www.litv.tv/drama/watch/VOD00044841',
|
||||
'md5': '88322ea132f848d6e3e18b32a832b918',
|
||||
'info_dict': {
|
||||
'id': 'VOD00044841',
|
||||
@@ -55,94 +56,62 @@ class LiTVIE(InfoExtractor):
|
||||
def _extract_playlist(self, playlist_data, content_type):
|
||||
all_episodes = [
|
||||
self.url_result(smuggle_url(
|
||||
self._URL_TEMPLATE % (content_type, episode['contentId']),
|
||||
self._URL_TEMPLATE % (content_type, episode['content_id']),
|
||||
{'force_noplaylist': True})) # To prevent infinite recursion
|
||||
for episode in traverse_obj(playlist_data, ('seasons', ..., 'episode', lambda _, v: v['contentId']))]
|
||||
for episode in traverse_obj(playlist_data, ('seasons', ..., 'episodes', lambda _, v: v['content_id']))]
|
||||
|
||||
return self.playlist_result(all_episodes, playlist_data['contentId'], playlist_data.get('title'))
|
||||
return self.playlist_result(all_episodes, playlist_data['content_id'], playlist_data.get('title'))
|
||||
|
||||
def _real_extract(self, url):
|
||||
url, smuggled_data = unsmuggle_url(url, {})
|
||||
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
vod_data = self._search_nextjs_data(webpage, video_id)['props']['pageProps']
|
||||
|
||||
if self._search_regex(
|
||||
r'(?i)<meta\s[^>]*http-equiv="refresh"\s[^>]*content="[0-9]+;\s*url=https://www\.litv\.tv/"',
|
||||
webpage, 'meta refresh redirect', default=False, group=0):
|
||||
raise ExtractorError('No such content found', expected=True)
|
||||
program_info = traverse_obj(vod_data, ('programInformation', {dict})) or {}
|
||||
playlist_data = traverse_obj(vod_data, ('seriesTree'))
|
||||
if playlist_data and self._yes_playlist(program_info.get('series_id'), video_id, smuggled_data):
|
||||
return self._extract_playlist(playlist_data, program_info.get('content_type'))
|
||||
|
||||
program_info = self._parse_json(self._search_regex(
|
||||
r'var\s+programInfo\s*=\s*([^;]+)', webpage, 'VOD data', default='{}'),
|
||||
video_id)
|
||||
asset_id = traverse_obj(program_info, ('assets', 0, 'asset_id', {str}))
|
||||
if asset_id: # This is a VOD
|
||||
media_type = 'vod'
|
||||
else: # This is a live stream
|
||||
asset_id = program_info['content_id']
|
||||
media_type = program_info['content_type']
|
||||
puid = try_call(lambda: self._get_cookies('https://www.litv.tv/')['PUID'].value)
|
||||
if puid:
|
||||
endpoint = 'get-urls'
|
||||
else:
|
||||
puid = str(uuid.uuid4())
|
||||
endpoint = 'get-urls-no-auth'
|
||||
video_data = self._download_json(
|
||||
f'https://www.litv.tv/api/{endpoint}', video_id,
|
||||
data=json.dumps({'AssetId': asset_id, 'MediaType': media_type, 'puid': puid}).encode(),
|
||||
headers={'Content-Type': 'application/json'})
|
||||
|
||||
# In browsers `getProgramInfo` request is always issued. Usually this
|
||||
# endpoint gives the same result as the data embedded in the webpage.
|
||||
# If, for some reason, there are no embedded data, we do an extra request.
|
||||
if 'assetId' not in program_info:
|
||||
program_info = self._download_json(
|
||||
'https://www.litv.tv/vod/ajax/getProgramInfo', video_id,
|
||||
query={'contentId': video_id},
|
||||
headers={'Accept': 'application/json'})
|
||||
|
||||
series_id = program_info['seriesId']
|
||||
if self._yes_playlist(series_id, video_id, smuggled_data):
|
||||
playlist_data = self._download_json(
|
||||
'https://www.litv.tv/vod/ajax/getSeriesTree', video_id,
|
||||
query={'seriesId': series_id}, headers={'Accept': 'application/json'})
|
||||
return self._extract_playlist(playlist_data, program_info['contentType'])
|
||||
|
||||
video_data = self._parse_json(self._search_regex(
|
||||
r'uiHlsUrl\s*=\s*testBackendData\(([^;]+)\);',
|
||||
webpage, 'video data', default='{}'), video_id)
|
||||
if not video_data:
|
||||
payload = {'assetId': program_info['assetId']}
|
||||
puid = try_call(lambda: self._get_cookies('https://www.litv.tv/')['PUID'].value)
|
||||
if puid:
|
||||
payload.update({
|
||||
'type': 'auth',
|
||||
'puid': puid,
|
||||
})
|
||||
endpoint = 'getUrl'
|
||||
else:
|
||||
payload.update({
|
||||
'watchDevices': program_info['watchDevices'],
|
||||
'contentType': program_info['contentType'],
|
||||
})
|
||||
endpoint = 'getMainUrlNoAuth'
|
||||
video_data = self._download_json(
|
||||
f'https://www.litv.tv/vod/ajax/{endpoint}', video_id,
|
||||
data=json.dumps(payload).encode(),
|
||||
headers={'Content-Type': 'application/json'})
|
||||
|
||||
if not video_data.get('fullpath'):
|
||||
error_msg = video_data.get('errorMessage')
|
||||
if error_msg == 'vod.error.outsideregionerror':
|
||||
if error := traverse_obj(video_data, ('error', {dict})):
|
||||
error_msg = traverse_obj(error, ('message', {str}))
|
||||
if error_msg and 'OutsideRegionError' in error_msg:
|
||||
self.raise_geo_restricted('This video is available in Taiwan only')
|
||||
if error_msg:
|
||||
elif error_msg:
|
||||
raise ExtractorError(f'{self.IE_NAME} said: {error_msg}', expected=True)
|
||||
raise ExtractorError(f'Unexpected result from {self.IE_NAME}')
|
||||
raise ExtractorError(f'Unexpected error from {self.IE_NAME}')
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
video_data['fullpath'], video_id, ext='mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id='hls')
|
||||
video_data['result']['AssetURLs'][0], video_id, ext='mp4', m3u8_id='hls')
|
||||
for a_format in formats:
|
||||
# LiTV HLS segments doesn't like compressions
|
||||
a_format.setdefault('http_headers', {})['Accept-Encoding'] = 'identity'
|
||||
|
||||
title = program_info['title'] + program_info.get('secondaryMark', '')
|
||||
description = program_info.get('description')
|
||||
thumbnail = program_info.get('imageFile')
|
||||
categories = [item['name'] for item in program_info.get('category', [])]
|
||||
episode = int_or_none(program_info.get('episode'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'categories': categories,
|
||||
'episode_number': episode,
|
||||
'title': join_nonempty('title', 'secondary_mark', delim='', from_dict=program_info),
|
||||
**traverse_obj(program_info, {
|
||||
'description': ('description', {str}),
|
||||
'thumbnail': ('picture', {urljoin('https://p-cdnstatic.svc.litv.tv/')}),
|
||||
'categories': ('genres', ..., 'name', {str}),
|
||||
'episode_number': ('episode', {int_or_none}),
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ class MicrosoftEmbedIE(InfoExtractor):
|
||||
'timestamp': 1631658316,
|
||||
'upload_date': '20210914',
|
||||
},
|
||||
'expected_warnings': ['Failed to parse XML: syntax error: line 1, column 0'],
|
||||
}]
|
||||
_API_URL = 'https://prod-video-cms-rt-microsoft-com.akamaized.net/vhs/api/videos/'
|
||||
|
||||
@@ -36,11 +37,11 @@ class MicrosoftEmbedIE(InfoExtractor):
|
||||
formats = []
|
||||
for source_type, source in metadata['streams'].items():
|
||||
if source_type == 'smooth_Streaming':
|
||||
formats.extend(self._extract_ism_formats(source['url'], video_id, 'mss'))
|
||||
formats.extend(self._extract_ism_formats(source['url'], video_id, 'mss', fatal=False))
|
||||
elif source_type == 'apple_HTTP_Live_Streaming':
|
||||
formats.extend(self._extract_m3u8_formats(source['url'], video_id, 'mp4'))
|
||||
formats.extend(self._extract_m3u8_formats(source['url'], video_id, 'mp4', fatal=False))
|
||||
elif source_type == 'mPEG_DASH':
|
||||
formats.extend(self._extract_mpd_formats(source['url'], video_id))
|
||||
formats.extend(self._extract_mpd_formats(source['url'], video_id, fatal=False))
|
||||
else:
|
||||
formats.append({
|
||||
'format_id': source_type,
|
||||
|
||||
@@ -1,291 +0,0 @@
|
||||
import functools
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
OnDemandPagedList,
|
||||
determine_ext,
|
||||
dict_get,
|
||||
float_or_none,
|
||||
traverse_obj,
|
||||
)
|
||||
|
||||
|
||||
class MildomBaseIE(InfoExtractor):
|
||||
_GUEST_ID = None
|
||||
|
||||
def _call_api(self, url, video_id, query=None, note='Downloading JSON metadata', body=None):
|
||||
if not self._GUEST_ID:
|
||||
self._GUEST_ID = f'pc-gp-{uuid.uuid4()}'
|
||||
|
||||
content = self._download_json(
|
||||
url, video_id, note=note, data=json.dumps(body).encode() if body else None,
|
||||
headers={'Content-Type': 'application/json'} if body else {},
|
||||
query={
|
||||
'__guest_id': self._GUEST_ID,
|
||||
'__platform': 'web',
|
||||
**(query or {}),
|
||||
})
|
||||
|
||||
if content['code'] != 0:
|
||||
raise ExtractorError(
|
||||
f'Mildom says: {content["message"]} (code {content["code"]})',
|
||||
expected=True)
|
||||
return content['body']
|
||||
|
||||
|
||||
class MildomIE(MildomBaseIE):
|
||||
IE_NAME = 'mildom'
|
||||
IE_DESC = 'Record ongoing live by specific user in Mildom'
|
||||
_VALID_URL = r'https?://(?:(?:www|m)\.)mildom\.com/(?P<id>\d+)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(f'https://www.mildom.com/{video_id}', video_id)
|
||||
|
||||
enterstudio = self._call_api(
|
||||
'https://cloudac.mildom.com/nonolive/gappserv/live/enterstudio', video_id,
|
||||
note='Downloading live metadata', query={'user_id': video_id})
|
||||
result_video_id = enterstudio.get('log_id', video_id)
|
||||
|
||||
servers = self._call_api(
|
||||
'https://cloudac.mildom.com/nonolive/gappserv/live/liveserver', result_video_id,
|
||||
note='Downloading live server list', query={
|
||||
'user_id': video_id,
|
||||
'live_server_type': 'hls',
|
||||
})
|
||||
|
||||
playback_token = self._call_api(
|
||||
'https://cloudac.mildom.com/nonolive/gappserv/live/token', result_video_id,
|
||||
note='Obtaining live playback token', body={'host_id': video_id, 'type': 'hls'})
|
||||
playback_token = traverse_obj(playback_token, ('data', ..., 'token'), get_all=False)
|
||||
if not playback_token:
|
||||
raise ExtractorError('Failed to obtain live playback token')
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
f'{servers["stream_server"]}/{video_id}_master.m3u8?{playback_token}',
|
||||
result_video_id, 'mp4', headers={
|
||||
'Referer': 'https://www.mildom.com/',
|
||||
'Origin': 'https://www.mildom.com',
|
||||
})
|
||||
|
||||
for fmt in formats:
|
||||
fmt.setdefault('http_headers', {})['Referer'] = 'https://www.mildom.com/'
|
||||
|
||||
return {
|
||||
'id': result_video_id,
|
||||
'title': self._html_search_meta('twitter:description', webpage, default=None) or traverse_obj(enterstudio, 'anchor_intro'),
|
||||
'description': traverse_obj(enterstudio, 'intro', 'live_intro', expected_type=str),
|
||||
'timestamp': float_or_none(enterstudio.get('live_start_ms'), scale=1000),
|
||||
'uploader': self._html_search_meta('twitter:title', webpage, default=None) or traverse_obj(enterstudio, 'loginname'),
|
||||
'uploader_id': video_id,
|
||||
'formats': formats,
|
||||
'is_live': True,
|
||||
}
|
||||
|
||||
|
||||
class MildomVodIE(MildomBaseIE):
|
||||
IE_NAME = 'mildom:vod'
|
||||
IE_DESC = 'VOD in Mildom'
|
||||
_VALID_URL = r'https?://(?:(?:www|m)\.)mildom\.com/playback/(?P<user_id>\d+)/(?P<id>(?P=user_id)-[a-zA-Z0-9]+-?[0-9]*)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.mildom.com/playback/10882672/10882672-1597662269',
|
||||
'info_dict': {
|
||||
'id': '10882672-1597662269',
|
||||
'ext': 'mp4',
|
||||
'title': '始めてのミルダム配信じゃぃ!',
|
||||
'thumbnail': r're:^https?://.*\.(png|jpg)$',
|
||||
'upload_date': '20200817',
|
||||
'duration': 4138.37,
|
||||
'description': 'ゲームをしたくて!',
|
||||
'timestamp': 1597662269.0,
|
||||
'uploader_id': '10882672',
|
||||
'uploader': 'kson組長(けいそん)',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.mildom.com/playback/10882672/10882672-1597758589870-477',
|
||||
'info_dict': {
|
||||
'id': '10882672-1597758589870-477',
|
||||
'ext': 'mp4',
|
||||
'title': '【kson】感染メイズ!麻酔銃で無双する',
|
||||
'thumbnail': r're:^https?://.*\.(png|jpg)$',
|
||||
'timestamp': 1597759093.0,
|
||||
'uploader': 'kson組長(けいそん)',
|
||||
'duration': 4302.58,
|
||||
'uploader_id': '10882672',
|
||||
'description': 'このステージ絶対乗り越えたい',
|
||||
'upload_date': '20200818',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.mildom.com/playback/10882672/10882672-buha9td2lrn97fk2jme0',
|
||||
'info_dict': {
|
||||
'id': '10882672-buha9td2lrn97fk2jme0',
|
||||
'ext': 'mp4',
|
||||
'title': '【kson組長】CART RACER!!!',
|
||||
'thumbnail': r're:^https?://.*\.(png|jpg)$',
|
||||
'uploader_id': '10882672',
|
||||
'uploader': 'kson組長(けいそん)',
|
||||
'upload_date': '20201104',
|
||||
'timestamp': 1604494797.0,
|
||||
'duration': 4657.25,
|
||||
'description': 'WTF',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
user_id, video_id = self._match_valid_url(url).group('user_id', 'id')
|
||||
webpage = self._download_webpage(f'https://www.mildom.com/playback/{user_id}/{video_id}', video_id)
|
||||
|
||||
autoplay = self._call_api(
|
||||
'https://cloudac.mildom.com/nonolive/videocontent/playback/getPlaybackDetail', video_id,
|
||||
note='Downloading playback metadata', query={
|
||||
'v_id': video_id,
|
||||
})['playback']
|
||||
|
||||
formats = [{
|
||||
'url': autoplay['audio_url'],
|
||||
'format_id': 'audio',
|
||||
'protocol': 'm3u8_native',
|
||||
'vcodec': 'none',
|
||||
'acodec': 'aac',
|
||||
'ext': 'm4a',
|
||||
}]
|
||||
for fmt in autoplay['video_link']:
|
||||
formats.append({
|
||||
'format_id': 'video-{}'.format(fmt['name']),
|
||||
'url': fmt['url'],
|
||||
'protocol': 'm3u8_native',
|
||||
'width': fmt['level'] * autoplay['video_width'] // autoplay['video_height'],
|
||||
'height': fmt['level'],
|
||||
'vcodec': 'h264',
|
||||
'acodec': 'aac',
|
||||
'ext': 'mp4',
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._html_search_meta(('og:description', 'description'), webpage, default=None) or autoplay.get('title'),
|
||||
'description': traverse_obj(autoplay, 'video_intro'),
|
||||
'timestamp': float_or_none(autoplay.get('publish_time'), scale=1000),
|
||||
'duration': float_or_none(autoplay.get('video_length'), scale=1000),
|
||||
'thumbnail': dict_get(autoplay, ('upload_pic', 'video_pic')),
|
||||
'uploader': traverse_obj(autoplay, ('author_info', 'login_name')),
|
||||
'uploader_id': user_id,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
class MildomClipIE(MildomBaseIE):
|
||||
IE_NAME = 'mildom:clip'
|
||||
IE_DESC = 'Clip in Mildom'
|
||||
_VALID_URL = r'https?://(?:(?:www|m)\.)mildom\.com/clip/(?P<id>(?P<user_id>\d+)-[a-zA-Z0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.mildom.com/clip/10042245-63921673e7b147ebb0806d42b5ba5ce9',
|
||||
'info_dict': {
|
||||
'id': '10042245-63921673e7b147ebb0806d42b5ba5ce9',
|
||||
'title': '全然違ったよ',
|
||||
'timestamp': 1619181890,
|
||||
'duration': 59,
|
||||
'thumbnail': r're:https?://.+',
|
||||
'uploader': 'ざきんぽ',
|
||||
'uploader_id': '10042245',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.mildom.com/clip/10111524-ebf4036e5aa8411c99fb3a1ae0902864',
|
||||
'info_dict': {
|
||||
'id': '10111524-ebf4036e5aa8411c99fb3a1ae0902864',
|
||||
'title': 'かっこいい',
|
||||
'timestamp': 1621094003,
|
||||
'duration': 59,
|
||||
'thumbnail': r're:https?://.+',
|
||||
'uploader': '(ルーキー',
|
||||
'uploader_id': '10111524',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.mildom.com/clip/10660174-2c539e6e277c4aaeb4b1fbe8d22cb902',
|
||||
'info_dict': {
|
||||
'id': '10660174-2c539e6e277c4aaeb4b1fbe8d22cb902',
|
||||
'title': 'あ',
|
||||
'timestamp': 1614769431,
|
||||
'duration': 31,
|
||||
'thumbnail': r're:https?://.+',
|
||||
'uploader': 'ドルゴルスレンギーン=ダグワドルジ',
|
||||
'uploader_id': '10660174',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
user_id, video_id = self._match_valid_url(url).group('user_id', 'id')
|
||||
webpage = self._download_webpage(f'https://www.mildom.com/clip/{video_id}', video_id)
|
||||
|
||||
clip_detail = self._call_api(
|
||||
'https://cloudac-cf-jp.mildom.com/nonolive/videocontent/clip/detail', video_id,
|
||||
note='Downloading playback metadata', query={
|
||||
'clip_id': video_id,
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._html_search_meta(
|
||||
('og:description', 'description'), webpage, default=None) or clip_detail.get('title'),
|
||||
'timestamp': float_or_none(clip_detail.get('create_time')),
|
||||
'duration': float_or_none(clip_detail.get('length')),
|
||||
'thumbnail': clip_detail.get('cover'),
|
||||
'uploader': traverse_obj(clip_detail, ('user_info', 'loginname')),
|
||||
'uploader_id': user_id,
|
||||
|
||||
'url': clip_detail['url'],
|
||||
'ext': determine_ext(clip_detail.get('url'), 'mp4'),
|
||||
}
|
||||
|
||||
|
||||
class MildomUserVodIE(MildomBaseIE):
|
||||
IE_NAME = 'mildom:user:vod'
|
||||
IE_DESC = 'Download all VODs from specific user in Mildom'
|
||||
_VALID_URL = r'https?://(?:(?:www|m)\.)mildom\.com/profile/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.mildom.com/profile/10093333',
|
||||
'info_dict': {
|
||||
'id': '10093333',
|
||||
'title': 'Uploads from ねこばたけ',
|
||||
},
|
||||
'playlist_mincount': 732,
|
||||
}, {
|
||||
'url': 'https://www.mildom.com/profile/10882672',
|
||||
'info_dict': {
|
||||
'id': '10882672',
|
||||
'title': 'Uploads from kson組長(けいそん)',
|
||||
},
|
||||
'playlist_mincount': 201,
|
||||
}]
|
||||
|
||||
def _fetch_page(self, user_id, page):
|
||||
page += 1
|
||||
reply = self._call_api(
|
||||
'https://cloudac.mildom.com/nonolive/videocontent/profile/playbackList',
|
||||
user_id, note=f'Downloading page {page}', query={
|
||||
'user_id': user_id,
|
||||
'page': page,
|
||||
'limit': '30',
|
||||
})
|
||||
if not reply:
|
||||
return
|
||||
for x in reply:
|
||||
v_id = x.get('v_id')
|
||||
if not v_id:
|
||||
continue
|
||||
yield self.url_result(f'https://www.mildom.com/playback/{user_id}/{v_id}')
|
||||
|
||||
def _real_extract(self, url):
|
||||
user_id = self._match_id(url)
|
||||
self.to_screen(f'This will download all VODs belonging to user. To download ongoing live video, use "https://www.mildom.com/{user_id}" instead')
|
||||
|
||||
profile = self._call_api(
|
||||
'https://cloudac.mildom.com/nonolive/gappserv/user/profileV2', user_id,
|
||||
query={'user_id': user_id}, note='Downloading user profile')['user_info']
|
||||
|
||||
return self.playlist_result(
|
||||
OnDemandPagedList(functools.partial(self._fetch_page, user_id), 30),
|
||||
user_id, f'Uploads from {profile["loginname"]}')
|
||||
@@ -80,9 +80,9 @@ class MiTeleIE(TelecincoBaseIE):
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
pre_player = self._parse_json(self._search_regex(
|
||||
r'window\.\$REACTBASE_STATE\.prePlayer_mtweb\s*=\s*({.+})',
|
||||
webpage, 'Pre Player'), display_id)['prePlayer']
|
||||
pre_player = self._search_json(
|
||||
r'window\.\$REACTBASE_STATE\.prePlayer_mtweb\s*=',
|
||||
webpage, 'Pre Player', display_id)['prePlayer']
|
||||
title = pre_player['title']
|
||||
video_info = self._parse_content(pre_player['video'], url)
|
||||
content = pre_player.get('content') or {}
|
||||
|
||||
@@ -12,7 +12,7 @@ from ..utils.traversal import traverse_obj
|
||||
|
||||
class MixchIE(InfoExtractor):
|
||||
IE_NAME = 'mixch'
|
||||
_VALID_URL = r'https?://(?:www\.)?mixch\.tv/u/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://mixch\.tv/u/(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://mixch.tv/u/16943797/live',
|
||||
@@ -74,7 +74,7 @@ class MixchIE(InfoExtractor):
|
||||
|
||||
class MixchArchiveIE(InfoExtractor):
|
||||
IE_NAME = 'mixch:archive'
|
||||
_VALID_URL = r'https?://(?:www\.)?mixch\.tv/archive/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://mixch\.tv/archive/(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://mixch.tv/archive/421',
|
||||
@@ -116,3 +116,56 @@ class MixchArchiveIE(InfoExtractor):
|
||||
'formats': self._extract_m3u8_formats(info_json['archiveURL'], video_id),
|
||||
'thumbnail': traverse_obj(info_json, ('thumbnailURL', {url_or_none})),
|
||||
}
|
||||
|
||||
|
||||
class MixchMovieIE(InfoExtractor):
|
||||
IE_NAME = 'mixch:movie'
|
||||
_VALID_URL = r'https?://mixch\.tv/m/(?P<id>\w+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://mixch.tv/m/Ve8KNkJ5',
|
||||
'info_dict': {
|
||||
'id': 'Ve8KNkJ5',
|
||||
'title': '夏☀️\nムービーへのポイントは本イベントに加算されないので配信にてお願い致します🙇🏻\u200d♀️\n#TGCCAMPUS #ミス東大 #ミス東大2024 ',
|
||||
'ext': 'mp4',
|
||||
'uploader': 'ミス東大No.5 松藤百香🍑💫',
|
||||
'uploader_id': '12299174',
|
||||
'channel_follower_count': int,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
'timestamp': 1724070828,
|
||||
'uploader_url': 'https://mixch.tv/u/12299174',
|
||||
'live_status': 'not_live',
|
||||
'upload_date': '20240819',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://mixch.tv/m/61DzpIKE',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
data = self._download_json(
|
||||
f'https://mixch.tv/api-web/movies/{video_id}', video_id)
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': [{
|
||||
'format_id': 'mp4',
|
||||
'url': data['movie']['file'],
|
||||
'ext': 'mp4',
|
||||
}],
|
||||
**traverse_obj(data, {
|
||||
'title': ('movie', 'title', {str}),
|
||||
'thumbnail': ('movie', 'thumbnailURL', {url_or_none}),
|
||||
'uploader': ('ownerInfo', 'name', {str}),
|
||||
'uploader_id': ('ownerInfo', 'id', {int}, {str_or_none}),
|
||||
'channel_follower_count': ('ownerInfo', 'fan', {int_or_none}),
|
||||
'view_count': ('ownerInfo', 'view', {int_or_none}),
|
||||
'like_count': ('movie', 'favCount', {int_or_none}),
|
||||
'comment_count': ('movie', 'commentCount', {int_or_none}),
|
||||
'timestamp': ('movie', 'published', {int_or_none}),
|
||||
'uploader_url': ('ownerInfo', 'id', {lambda x: x and f'https://mixch.tv/u/{x}'}, filter),
|
||||
}),
|
||||
'live_status': 'not_live',
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ from ..utils import (
|
||||
parse_iso8601,
|
||||
smuggle_url,
|
||||
str_or_none,
|
||||
traverse_obj,
|
||||
url_or_none,
|
||||
urljoin,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj, value
|
||||
|
||||
|
||||
class PatreonBaseIE(InfoExtractor):
|
||||
@@ -252,6 +252,27 @@ class PatreonIE(PatreonBaseIE):
|
||||
'thumbnail': r're:^https?://.+',
|
||||
},
|
||||
'skip': 'Patron-only content',
|
||||
}, {
|
||||
# Contains a comment reply in the 'included' section
|
||||
'url': 'https://www.patreon.com/posts/114721679',
|
||||
'info_dict': {
|
||||
'id': '114721679',
|
||||
'ext': 'mp4',
|
||||
'upload_date': '20241025',
|
||||
'uploader': 'Japanalysis',
|
||||
'like_count': int,
|
||||
'thumbnail': r're:^https?://.+',
|
||||
'comment_count': int,
|
||||
'title': 'Karasawa Part 2',
|
||||
'description': 'Part 2 of this video https://www.youtube.com/watch?v=Azms2-VTASk',
|
||||
'uploader_url': 'https://www.patreon.com/japanalysis',
|
||||
'uploader_id': '80504268',
|
||||
'channel_url': 'https://www.patreon.com/japanalysis',
|
||||
'channel_follower_count': int,
|
||||
'timestamp': 1729897015,
|
||||
'channel_id': '9346307',
|
||||
},
|
||||
'params': {'getcomments': True},
|
||||
}]
|
||||
_RETURN_TYPE = 'video'
|
||||
|
||||
@@ -404,26 +425,24 @@ class PatreonIE(PatreonBaseIE):
|
||||
f'posts/{post_id}/comments', post_id, query=params, note=f'Downloading comments page {page}')
|
||||
|
||||
cursor = None
|
||||
for comment in traverse_obj(response, (('data', ('included', lambda _, v: v['type'] == 'comment')), ...)):
|
||||
for comment in traverse_obj(response, (('data', 'included'), lambda _, v: v['type'] == 'comment' and v['id'])):
|
||||
count += 1
|
||||
comment_id = comment.get('id')
|
||||
attributes = comment.get('attributes') or {}
|
||||
if comment_id is None:
|
||||
continue
|
||||
author_id = traverse_obj(comment, ('relationships', 'commenter', 'data', 'id'))
|
||||
author_info = traverse_obj(
|
||||
response, ('included', lambda _, v: v['id'] == author_id and v['type'] == 'user', 'attributes'),
|
||||
get_all=False, expected_type=dict, default={})
|
||||
|
||||
yield {
|
||||
'id': comment_id,
|
||||
'text': attributes.get('body'),
|
||||
'timestamp': parse_iso8601(attributes.get('created')),
|
||||
'parent': traverse_obj(comment, ('relationships', 'parent', 'data', 'id'), default='root'),
|
||||
'author_is_uploader': attributes.get('is_by_creator'),
|
||||
**traverse_obj(comment, {
|
||||
'id': ('id', {str_or_none}),
|
||||
'text': ('attributes', 'body', {str}),
|
||||
'timestamp': ('attributes', 'created', {parse_iso8601}),
|
||||
'parent': ('relationships', 'parent', 'data', ('id', {value('root')}), {str}, any),
|
||||
'author_is_uploader': ('attributes', 'is_by_creator', {bool}),
|
||||
}),
|
||||
**traverse_obj(response, (
|
||||
'included', lambda _, v: v['id'] == author_id and v['type'] == 'user', 'attributes', {
|
||||
'author': ('full_name', {str}),
|
||||
'author_thumbnail': ('image_url', {url_or_none}),
|
||||
}), get_all=False),
|
||||
'author_id': author_id,
|
||||
'author': author_info.get('full_name'),
|
||||
'author_thumbnail': author_info.get('image_url'),
|
||||
}
|
||||
|
||||
if count < traverse_obj(response, ('meta', 'count')):
|
||||
|
||||
122
yt_dlp/extractor/pialive.py
Normal file
122
yt_dlp/extractor/pialive.py
Normal file
@@ -0,0 +1,122 @@
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
extract_attributes,
|
||||
get_element_by_class,
|
||||
get_element_html_by_class,
|
||||
multipart_encode,
|
||||
str_or_none,
|
||||
unified_timestamp,
|
||||
url_or_none,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class PiaLiveIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://player\.pia-live\.jp/stream/(?P<id>[\w-]+)'
|
||||
_PLAYER_ROOT_URL = 'https://player.pia-live.jp/'
|
||||
_PIA_LIVE_API_URL = 'https://api.pia-live.jp'
|
||||
_API_KEY = 'kfds)FKFps-dms9e'
|
||||
_TESTS = [{
|
||||
'url': 'https://player.pia-live.jp/stream/4JagFBEIM14s_hK9aXHKf3k3F3bY5eoHFQxu68TC6krUDqGOwN4d61dCWQYOd6CTxl4hjya9dsfEZGsM4uGOUdax60lEI4twsXGXf7crmz8Gk__GhupTrWxA7RFRVt76',
|
||||
'info_dict': {
|
||||
'id': '88f3109a-f503-4d0f-a9f7-9f39ac745d84',
|
||||
'display_id': '2431867_001',
|
||||
'title': 'こながめでたい日2024の視聴ページ | PIA LIVE STREAM(ぴあライブストリーム)',
|
||||
'live_status': 'was_live',
|
||||
'comment_count': int,
|
||||
},
|
||||
'params': {
|
||||
'getcomments': True,
|
||||
'skip_download': True,
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
'skip': 'The video is no longer available',
|
||||
}, {
|
||||
'url': 'https://player.pia-live.jp/stream/4JagFBEIM14s_hK9aXHKf3k3F3bY5eoHFQxu68TC6krJdu0GVBVbVy01IwpJ6J3qBEm3d9TCTt1d0eWpsZGj7DrOjVOmS7GAWGwyscMgiThopJvzgWC4H5b-7XQjAfRZ',
|
||||
'info_dict': {
|
||||
'id': '9ce8b8ba-f6d1-4d1f-83a0-18c3148ded93',
|
||||
'display_id': '2431867_002',
|
||||
'title': 'こながめでたい日2024の視聴ページ | PIA LIVE STREAM(ぴあライブストリーム)',
|
||||
'live_status': 'was_live',
|
||||
'comment_count': int,
|
||||
},
|
||||
'params': {
|
||||
'getcomments': True,
|
||||
'skip_download': True,
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
'skip': 'The video is no longer available',
|
||||
}]
|
||||
|
||||
def _extract_var(self, variable, html):
|
||||
return self._search_regex(
|
||||
rf'(?:var|const|let)\s+{variable}\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
|
||||
html, f'variable {variable}', group='value')
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_key = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_key)
|
||||
|
||||
program_code = self._extract_var('programCode', webpage)
|
||||
article_code = self._extract_var('articleCode', webpage)
|
||||
title = self._html_extract_title(webpage)
|
||||
|
||||
if get_element_html_by_class('play-end', webpage):
|
||||
raise ExtractorError('The video is no longer available', expected=True, video_id=program_code)
|
||||
|
||||
if start_info := clean_html(get_element_by_class('play-waiting__date', webpage)):
|
||||
date, time = self._search_regex(
|
||||
r'(?P<date>\d{4}/\d{1,2}/\d{1,2})\([月火水木金土日]\)(?P<time>\d{2}:\d{2})',
|
||||
start_info, 'start_info', fatal=False, group=('date', 'time'))
|
||||
if date and time:
|
||||
release_timestamp_str = f'{date} {time} +09:00'
|
||||
release_timestamp = unified_timestamp(release_timestamp_str)
|
||||
self.raise_no_formats(f'The video will be available after {release_timestamp_str}', expected=True)
|
||||
return {
|
||||
'id': program_code,
|
||||
'title': title,
|
||||
'live_status': 'is_upcoming',
|
||||
'release_timestamp': release_timestamp,
|
||||
}
|
||||
|
||||
payload, content_type = multipart_encode({
|
||||
'play_url': video_key,
|
||||
'api_key': self._API_KEY,
|
||||
})
|
||||
api_data_and_headers = {
|
||||
'data': payload,
|
||||
'headers': {'Content-Type': content_type, 'Referer': self._PLAYER_ROOT_URL},
|
||||
}
|
||||
|
||||
player_tag_list = self._download_json(
|
||||
f'{self._PIA_LIVE_API_URL}/perf/player-tag-list/{program_code}', program_code,
|
||||
'Fetching player tag list', 'Unable to fetch player tag list', **api_data_and_headers)
|
||||
|
||||
return self.url_result(
|
||||
extract_attributes(player_tag_list['data']['movie_one_tag'])['src'],
|
||||
url_transparent=True, title=title, display_id=program_code,
|
||||
__post_extractor=self.extract_comments(program_code, article_code, api_data_and_headers))
|
||||
|
||||
def _get_comments(self, program_code, article_code, api_data_and_headers):
|
||||
chat_room_url = traverse_obj(self._download_json(
|
||||
f'{self._PIA_LIVE_API_URL}/perf/chat-tag-list/{program_code}/{article_code}', program_code,
|
||||
'Fetching chat info', 'Unable to fetch chat info', fatal=False, **api_data_and_headers),
|
||||
('data', 'chat_one_tag', {extract_attributes}, 'src', {url_or_none}))
|
||||
if not chat_room_url:
|
||||
return
|
||||
comment_page = self._download_webpage(
|
||||
chat_room_url, program_code, 'Fetching comment page', 'Unable to fetch comment page',
|
||||
fatal=False, headers={'Referer': self._PLAYER_ROOT_URL})
|
||||
if not comment_page:
|
||||
return
|
||||
yield from traverse_obj(self._search_json(
|
||||
r'var\s+_history\s*=', comment_page, 'comment list',
|
||||
program_code, contains_pattern=r'\[(?s:.+)\]', fatal=False), (..., {
|
||||
'timestamp': (0, {int}),
|
||||
'author_is_uploader': (1, {lambda x: x == 2}),
|
||||
'author': (2, {str}),
|
||||
'text': (3, {str}),
|
||||
'id': (4, {str_or_none}),
|
||||
}))
|
||||
@@ -1,70 +0,0 @@
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
parse_qs,
|
||||
time_seconds,
|
||||
traverse_obj,
|
||||
)
|
||||
|
||||
|
||||
class PIAULIZAPortalIE(InfoExtractor):
|
||||
IE_DESC = 'ulizaportal.jp - PIA LIVE STREAM'
|
||||
_VALID_URL = r'https?://(?:www\.)?ulizaportal\.jp/pages/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})'
|
||||
_TESTS = [{
|
||||
'url': 'https://ulizaportal.jp/pages/005f18b7-e810-5618-cb82-0987c5755d44',
|
||||
'info_dict': {
|
||||
'id': '005f18b7-e810-5618-cb82-0987c5755d44',
|
||||
'title': 'プレゼンテーションプレイヤーのサンプル',
|
||||
'live_status': 'not_live',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://ulizaportal.jp/pages/005e1b23-fe93-5780-19a0-98e917cc4b7d?expires=4102412400&signature=f422a993b683e1068f946caf406d211c17d1ef17da8bef3df4a519502155aa91&version=1',
|
||||
'info_dict': {
|
||||
'id': '005e1b23-fe93-5780-19a0-98e917cc4b7d',
|
||||
'title': '【確認用】視聴サンプルページ(ULIZA)',
|
||||
'live_status': 'not_live',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
expires = int_or_none(traverse_obj(parse_qs(url), ('expires', 0)))
|
||||
if expires and expires <= time_seconds():
|
||||
raise ExtractorError('The link is expired.', video_id=video_id, expected=True)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
player_data = self._download_webpage(
|
||||
self._search_regex(
|
||||
r'<script [^>]*\bsrc="(https://player-api\.p\.uliza\.jp/v1/players/[^"]+)"',
|
||||
webpage, 'player data url'),
|
||||
video_id, headers={'Referer': 'https://ulizaportal.jp/'},
|
||||
note='Fetching player data', errnote='Unable to fetch player data')
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
self._search_regex(
|
||||
r'["\'](https://vms-api\.p\.uliza\.jp/v1/prog-index\.m3u8[^"\']+)', player_data,
|
||||
'm3u8 url', default=None),
|
||||
video_id, fatal=False)
|
||||
m3u8_type = self._search_regex(
|
||||
r'/hls/(dvr|video)/', traverse_obj(formats, (0, 'url')), 'm3u8 type', default=None)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._html_extract_title(webpage),
|
||||
'formats': formats,
|
||||
'live_status': {
|
||||
'video': 'is_live',
|
||||
'dvr': 'was_live', # short-term archives
|
||||
}.get(m3u8_type, 'not_live'), # VOD or long-term archives
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
from .common import InfoExtractor
|
||||
from ..networking.exceptions import HTTPError
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
traverse_obj,
|
||||
@@ -110,8 +111,8 @@ class PixivSketchUserIE(PixivSketchBaseIE):
|
||||
if not traverse_obj(data, 'is_broadcasting'):
|
||||
try:
|
||||
self._call_api(user_id, 'users/current.json', url, 'Investigating reason for request failure')
|
||||
except ExtractorError as ex:
|
||||
if ex.cause and ex.cause.code == 401:
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||
self.raise_login_required(f'Please log in, or use direct link like https://sketch.pixiv.net/@{user_id}/1234567890', method='cookies')
|
||||
raise ExtractorError('This user is offline', expected=True)
|
||||
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
extract_attributes,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
merge_dicts,
|
||||
)
|
||||
|
||||
|
||||
class PokemonIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?pokemon\.com/[a-z]{2}(?:.*?play=(?P<id>[a-z0-9]{32})|/(?:[^/]+/)+(?P<display_id>[^/?#&]+))'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.pokemon.com/us/pokemon-episodes/20_30-the-ol-raise-and-switch/',
|
||||
'md5': '2fe8eaec69768b25ef898cda9c43062e',
|
||||
'info_dict': {
|
||||
'id': 'afe22e30f01c41f49d4f1d9eab5cd9a4',
|
||||
'ext': 'mp4',
|
||||
'title': 'The Ol’ Raise and Switch!',
|
||||
'description': 'md5:7db77f7107f98ba88401d3adc80ff7af',
|
||||
},
|
||||
'add_id': ['LimelightMedia'],
|
||||
}, {
|
||||
# no data-video-title
|
||||
'url': 'https://www.pokemon.com/fr/episodes-pokemon/films-pokemon/pokemon-lascension-de-darkrai-2008',
|
||||
'info_dict': {
|
||||
'id': 'dfbaf830d7e54e179837c50c0c6cc0e1',
|
||||
'ext': 'mp4',
|
||||
'title': "Pokémon : L'ascension de Darkrai",
|
||||
'description': 'md5:d1dbc9e206070c3e14a06ff557659fb5',
|
||||
},
|
||||
'add_id': ['LimelightMedia'],
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.pokemon.com/uk/pokemon-episodes/?play=2e8b5c761f1d4a9286165d7748c1ece2',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.pokemon.com/fr/episodes-pokemon/18_09-un-hiver-inattendu/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.pokemon.com/de/pokemon-folgen/01_20-bye-bye-smettbo/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, display_id = self._match_valid_url(url).groups()
|
||||
webpage = self._download_webpage(url, video_id or display_id)
|
||||
video_data = extract_attributes(self._search_regex(
|
||||
r'(<[^>]+data-video-id="{}"[^>]*>)'.format(video_id if video_id else '[a-z0-9]{32}'),
|
||||
webpage, 'video data element'))
|
||||
video_id = video_data['data-video-id']
|
||||
title = video_data.get('data-video-title') or self._html_search_meta(
|
||||
'pkm-title', webpage, ' title', default=None) or self._search_regex(
|
||||
r'<h1[^>]+\bclass=["\']us-title[^>]+>([^<]+)', webpage, 'title')
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'id': video_id,
|
||||
'url': f'limelight:media:{video_id}',
|
||||
'title': title,
|
||||
'description': video_data.get('data-video-summary'),
|
||||
'thumbnail': video_data.get('data-video-poster'),
|
||||
'series': 'Pokémon',
|
||||
'season_number': int_or_none(video_data.get('data-video-season')),
|
||||
'episode': title,
|
||||
'episode_number': int_or_none(video_data.get('data-video-episode')),
|
||||
'ie_key': 'LimelightMedia',
|
||||
}
|
||||
|
||||
|
||||
class PokemonWatchIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://watch\.pokemon\.com/[a-z]{2}-[a-z]{2}/(?:#/)?player(?:\.html)?\?id=(?P<id>[a-z0-9]{32})'
|
||||
_API_URL = 'https://www.pokemon.com/api/pokemontv/v2/channels/{0:}'
|
||||
_TESTS = [{
|
||||
'url': 'https://watch.pokemon.com/en-us/player.html?id=8309a40969894a8e8d5bc1311e9c5667',
|
||||
'md5': '62833938a31e61ab49ada92f524c42ff',
|
||||
'info_dict': {
|
||||
'id': '8309a40969894a8e8d5bc1311e9c5667',
|
||||
'ext': 'mp4',
|
||||
'title': 'Lillier and the Staff!',
|
||||
'description': 'md5:338841b8c21b283d24bdc9b568849f04',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://watch.pokemon.com/en-us/#/player?id=3fe7752ba09141f0b0f7756d1981c6b2',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://watch.pokemon.com/de-de/player.html?id=b3c402e111a4459eb47e12160ab0ba07',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _extract_media(self, channel_array, video_id):
|
||||
for channel in channel_array:
|
||||
for media in channel.get('media'):
|
||||
if media.get('id') == video_id:
|
||||
return media
|
||||
return None
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
info = {
|
||||
'_type': 'url',
|
||||
'id': video_id,
|
||||
'url': f'limelight:media:{video_id}',
|
||||
'ie_key': 'LimelightMedia',
|
||||
}
|
||||
|
||||
# API call can be avoided entirely if we are listing formats
|
||||
if self.get_param('listformats', False):
|
||||
return info
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
build_vars = self._parse_json(self._search_regex(
|
||||
r'(?s)buildVars\s*=\s*({.*?})', webpage, 'build vars'),
|
||||
video_id, transform_source=js_to_json)
|
||||
region = build_vars.get('region')
|
||||
channel_array = self._download_json(self._API_URL.format(region), video_id)
|
||||
video_data = self._extract_media(channel_array, video_id)
|
||||
|
||||
if video_data is None:
|
||||
raise ExtractorError(
|
||||
f'Video {video_id} does not exist', expected=True)
|
||||
|
||||
info['_type'] = 'url_transparent'
|
||||
images = video_data.get('images')
|
||||
|
||||
return merge_dicts(info, {
|
||||
'title': video_data.get('title'),
|
||||
'description': video_data.get('description'),
|
||||
'thumbnail': images.get('medium') or images.get('small'),
|
||||
'series': 'Pokémon',
|
||||
'season_number': int_or_none(video_data.get('season')),
|
||||
'episode': video_data.get('title'),
|
||||
'episode_number': int_or_none(video_data.get('episode')),
|
||||
})
|
||||
105
yt_dlp/extractor/radioradicale.py
Normal file
105
yt_dlp/extractor/radioradicale.py
Normal file
@@ -0,0 +1,105 @@
|
||||
from .common import InfoExtractor
|
||||
from ..utils import url_or_none
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class RadioRadicaleIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?radioradicale\.it/scheda/(?P<id>[0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.radioradicale.it/scheda/471591',
|
||||
'md5': 'eb0fbe43a601f1a361cbd00f3c45af4a',
|
||||
'info_dict': {
|
||||
'id': '471591',
|
||||
'ext': 'mp4',
|
||||
'title': 'md5:e8fbb8de57011a3255db0beca69af73d',
|
||||
'description': 'md5:5e15a789a2fe4d67da8d1366996e89ef',
|
||||
'location': 'Napoli',
|
||||
'duration': 2852.0,
|
||||
'timestamp': 1459987200,
|
||||
'upload_date': '20160407',
|
||||
'thumbnail': 'https://www.radioradicale.it/photo400/0/0/9/0/1/00901768.jpg',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.radioradicale.it/scheda/742783/parlamento-riunito-in-seduta-comune-11a-della-xix-legislatura',
|
||||
'info_dict': {
|
||||
'id': '742783',
|
||||
'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)',
|
||||
'description': '-) Votazione per l\'elezione di un giudice della Corte Costituzionale (nono scrutinio)',
|
||||
'location': 'CAMERA',
|
||||
'duration': 5868.0,
|
||||
'timestamp': 1730246400,
|
||||
'upload_date': '20241030',
|
||||
},
|
||||
'playlist': [{
|
||||
'md5': 'aa48de55dcc45478e4cd200f299aab7d',
|
||||
'info_dict': {
|
||||
'id': '742783-0',
|
||||
'ext': 'mp4',
|
||||
'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)',
|
||||
},
|
||||
}, {
|
||||
'md5': 'be915c189c70ad2920e5810f32260ff5',
|
||||
'info_dict': {
|
||||
'id': '742783-1',
|
||||
'ext': 'mp4',
|
||||
'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)',
|
||||
},
|
||||
}, {
|
||||
'md5': 'f0ee4047342baf8ed3128a8417ac5e0a',
|
||||
'info_dict': {
|
||||
'id': '742783-2',
|
||||
'ext': 'mp4',
|
||||
'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)',
|
||||
},
|
||||
}],
|
||||
}]
|
||||
|
||||
def _entries(self, videos_info, page_id):
|
||||
for idx, video in enumerate(traverse_obj(
|
||||
videos_info, ('playlist', lambda _, v: v['sources']))):
|
||||
video_id = f'{page_id}-{idx}'
|
||||
formats = []
|
||||
subtitles = {}
|
||||
|
||||
for m3u8_url in traverse_obj(video, ('sources', ..., 'src', {url_or_none})):
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id)
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
for sub in traverse_obj(video, ('subtitles', ..., lambda _, v: url_or_none(v['src']))):
|
||||
self._merge_subtitles({sub.get('srclang') or 'und': [{
|
||||
'url': sub['src'],
|
||||
'name': sub.get('label'),
|
||||
}]}, target=subtitles)
|
||||
|
||||
yield {
|
||||
'id': video_id,
|
||||
'title': video.get('title'),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
page_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, page_id)
|
||||
|
||||
videos_info = self._search_json(
|
||||
r'jQuery\.extend\(Drupal\.settings\s*,',
|
||||
webpage, 'videos_info', page_id)['RRscheda']
|
||||
|
||||
entries = list(self._entries(videos_info, page_id))
|
||||
|
||||
common_info = {
|
||||
'id': page_id,
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'location': videos_info.get('luogo'),
|
||||
**self._search_json_ld(webpage, page_id),
|
||||
}
|
||||
|
||||
if len(entries) == 1:
|
||||
return {
|
||||
**entries[0],
|
||||
**common_info,
|
||||
}
|
||||
|
||||
return self.playlist_result(entries, multi_video=True, **common_info)
|
||||
@@ -259,6 +259,8 @@ class RedditIE(InfoExtractor):
|
||||
f'https://www.reddit.com/{slug}/.json', video_id, expected_status=403)
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, json.JSONDecodeError):
|
||||
if self._get_cookies('https://www.reddit.com/').get('reddit_session'):
|
||||
raise ExtractorError('Your IP address is unable to access the Reddit API', expected=True)
|
||||
self.raise_login_required('Account authentication is required')
|
||||
raise
|
||||
|
||||
|
||||
@@ -213,7 +213,7 @@ class RedGifsSearchIE(RedGifsBaseInfoExtractor):
|
||||
class RedGifsUserIE(RedGifsBaseInfoExtractor):
|
||||
IE_DESC = 'Redgifs user'
|
||||
_VALID_URL = r'https?://(?:www\.)?redgifs\.com/users/(?P<username>[^/?#]+)(?:\?(?P<query>[^#]+))?'
|
||||
_PAGE_SIZE = 30
|
||||
_PAGE_SIZE = 80
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'https://www.redgifs.com/users/lamsinka89',
|
||||
@@ -222,7 +222,7 @@ class RedGifsUserIE(RedGifsBaseInfoExtractor):
|
||||
'title': 'lamsinka89',
|
||||
'description': 'RedGifs user lamsinka89, ordered by recent',
|
||||
},
|
||||
'playlist_mincount': 100,
|
||||
'playlist_mincount': 391,
|
||||
},
|
||||
{
|
||||
'url': 'https://www.redgifs.com/users/lamsinka89?page=3',
|
||||
@@ -231,7 +231,7 @@ class RedGifsUserIE(RedGifsBaseInfoExtractor):
|
||||
'title': 'lamsinka89',
|
||||
'description': 'RedGifs user lamsinka89, ordered by recent',
|
||||
},
|
||||
'playlist_count': 30,
|
||||
'playlist_count': 80,
|
||||
},
|
||||
{
|
||||
'url': 'https://www.redgifs.com/users/lamsinka89?order=best&type=g',
|
||||
@@ -240,7 +240,17 @@ class RedGifsUserIE(RedGifsBaseInfoExtractor):
|
||||
'title': 'lamsinka89',
|
||||
'description': 'RedGifs user lamsinka89, ordered by best',
|
||||
},
|
||||
'playlist_mincount': 100,
|
||||
'playlist_mincount': 391,
|
||||
},
|
||||
{
|
||||
'url': 'https://www.redgifs.com/users/ignored52',
|
||||
'note': 'https://github.com/yt-dlp/yt-dlp/issues/7382',
|
||||
'info_dict': {
|
||||
'id': 'ignored52',
|
||||
'title': 'ignored52',
|
||||
'description': 'RedGifs user ignored52, ordered by recent',
|
||||
},
|
||||
'playlist_mincount': 121,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@@ -2,15 +2,21 @@ import itertools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
UnsupportedError,
|
||||
bool_or_none,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
parse_qs,
|
||||
traverse_obj,
|
||||
str_or_none,
|
||||
try_get,
|
||||
unified_timestamp,
|
||||
url_or_none,
|
||||
)
|
||||
from ..utils.traversal import (
|
||||
subs_list_to_dict,
|
||||
traverse_obj,
|
||||
)
|
||||
|
||||
|
||||
class RutubeBaseIE(InfoExtractor):
|
||||
@@ -19,7 +25,7 @@ class RutubeBaseIE(InfoExtractor):
|
||||
query = {}
|
||||
query['format'] = 'json'
|
||||
return self._download_json(
|
||||
f'http://rutube.ru/api/video/{video_id}/',
|
||||
f'https://rutube.ru/api/video/{video_id}/',
|
||||
video_id, 'Downloading video JSON',
|
||||
'Unable to download video JSON', query=query)
|
||||
|
||||
@@ -61,18 +67,21 @@ class RutubeBaseIE(InfoExtractor):
|
||||
query = {}
|
||||
query['format'] = 'json'
|
||||
return self._download_json(
|
||||
f'http://rutube.ru/api/play/options/{video_id}/',
|
||||
f'https://rutube.ru/api/play/options/{video_id}/',
|
||||
video_id, 'Downloading options JSON',
|
||||
'Unable to download options JSON',
|
||||
headers=self.geo_verification_headers(), query=query)
|
||||
|
||||
def _extract_formats(self, options, video_id):
|
||||
def _extract_formats_and_subtitles(self, options, video_id):
|
||||
formats = []
|
||||
subtitles = {}
|
||||
for format_id, format_url in options['video_balancer'].items():
|
||||
ext = determine_ext(format_url)
|
||||
if ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False)
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
elif ext == 'f4m':
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
format_url, video_id, f4m_id=format_id, fatal=False))
|
||||
@@ -82,11 +91,19 @@ class RutubeBaseIE(InfoExtractor):
|
||||
'format_id': format_id,
|
||||
})
|
||||
for hls_url in traverse_obj(options, ('live_streams', 'hls', ..., 'url', {url_or_none})):
|
||||
formats.extend(self._extract_m3u8_formats(hls_url, video_id, ext='mp4', fatal=False))
|
||||
return formats
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
hls_url, video_id, 'mp4', fatal=False, m3u8_id='hls')
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
self._merge_subtitles(traverse_obj(options, ('captions', ..., {
|
||||
'id': 'code',
|
||||
'url': 'file',
|
||||
'name': ('langTitle', {str}),
|
||||
}, all, {subs_list_to_dict(lang='ru')})), target=subtitles)
|
||||
return formats, subtitles
|
||||
|
||||
def _download_and_extract_formats(self, video_id, query=None):
|
||||
return self._extract_formats(
|
||||
def _download_and_extract_formats_and_subtitles(self, video_id, query=None):
|
||||
return self._extract_formats_and_subtitles(
|
||||
self._download_api_options(video_id, query=query), video_id)
|
||||
|
||||
|
||||
@@ -97,8 +114,8 @@ class RutubeIE(RutubeBaseIE):
|
||||
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//rutube\.ru/(?:play/)?embed/[\da-z]{32}.*?)\1']
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
|
||||
'md5': 'e33ac625efca66aba86cbec9851f2692',
|
||||
'url': 'https://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
|
||||
'md5': '3d73fdfe5bb81b9aef139e22ef3de26a',
|
||||
'info_dict': {
|
||||
'id': '3eac3b4561676c17df9132a9a1e62e3e',
|
||||
'ext': 'mp4',
|
||||
@@ -111,26 +128,25 @@ class RutubeIE(RutubeBaseIE):
|
||||
'upload_date': '20131016',
|
||||
'age_limit': 0,
|
||||
'view_count': int,
|
||||
'thumbnail': 'http://pic.rutubelist.ru/video/d2/a0/d2a0aec998494a396deafc7ba2c82add.jpg',
|
||||
'thumbnail': 'https://pic.rutubelist.ru/video/d2/a0/d2a0aec998494a396deafc7ba2c82add.jpg',
|
||||
'categories': ['Новости и СМИ'],
|
||||
'chapters': [],
|
||||
},
|
||||
'expected_warnings': ['Unable to download f4m'],
|
||||
}, {
|
||||
'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661',
|
||||
'url': 'https://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://rutube.ru/embed/a10e53b86e8f349080f718582ce4c661',
|
||||
'url': 'https://rutube.ru/embed/a10e53b86e8f349080f718582ce4c661',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/?pl_id=4252',
|
||||
'url': 'https://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/?pl_id=4252',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_type=source',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://rutube.ru/video/private/884fb55f07a97ab673c7d654553e0f48/?p=x2QojCumHTS3rsKHWXN8Lg',
|
||||
'md5': 'd106225f15d625538fe22971158e896f',
|
||||
'md5': '4fce7b4fcc7b1bcaa3f45eb1e1ad0dd7',
|
||||
'info_dict': {
|
||||
'id': '884fb55f07a97ab673c7d654553e0f48',
|
||||
'ext': 'mp4',
|
||||
@@ -143,11 +159,10 @@ class RutubeIE(RutubeBaseIE):
|
||||
'upload_date': '20221210',
|
||||
'age_limit': 0,
|
||||
'view_count': int,
|
||||
'thumbnail': 'http://pic.rutubelist.ru/video/f2/d4/f2d42b54be0a6e69c1c22539e3152156.jpg',
|
||||
'thumbnail': 'https://pic.rutubelist.ru/video/f2/d4/f2d42b54be0a6e69c1c22539e3152156.jpg',
|
||||
'categories': ['Видеоигры'],
|
||||
'chapters': [],
|
||||
},
|
||||
'expected_warnings': ['Unable to download f4m'],
|
||||
}, {
|
||||
'url': 'https://rutube.ru/video/c65b465ad0c98c89f3b25cb03dcc87c6/',
|
||||
'info_dict': {
|
||||
@@ -156,17 +171,16 @@ class RutubeIE(RutubeBaseIE):
|
||||
'chapters': 'count:4',
|
||||
'categories': ['Бизнес и предпринимательство'],
|
||||
'description': 'md5:252feac1305257d8c1bab215cedde75d',
|
||||
'thumbnail': 'http://pic.rutubelist.ru/video/71/8f/718f27425ea9706073eb80883dd3787b.png',
|
||||
'thumbnail': 'https://pic.rutubelist.ru/video/71/8f/718f27425ea9706073eb80883dd3787b.png',
|
||||
'duration': 782,
|
||||
'age_limit': 0,
|
||||
'uploader_id': '23491359',
|
||||
'timestamp': 1677153329,
|
||||
'view_count': int,
|
||||
'upload_date': '20230223',
|
||||
'title': 'Бизнес с нуля: найм сотрудников. Интервью с директором строительной компании',
|
||||
'title': 'Бизнес с нуля: найм сотрудников. Интервью с директором строительной компании #1',
|
||||
'uploader': 'Стас Быков',
|
||||
},
|
||||
'expected_warnings': ['Unable to download f4m'],
|
||||
}, {
|
||||
'url': 'https://rutube.ru/live/video/c58f502c7bb34a8fcdd976b221fca292/',
|
||||
'info_dict': {
|
||||
@@ -174,7 +188,7 @@ class RutubeIE(RutubeBaseIE):
|
||||
'ext': 'mp4',
|
||||
'categories': ['Телепередачи'],
|
||||
'description': '',
|
||||
'thumbnail': 'http://pic.rutubelist.ru/video/14/19/14190807c0c48b40361aca93ad0867c7.jpg',
|
||||
'thumbnail': 'https://pic.rutubelist.ru/video/14/19/14190807c0c48b40361aca93ad0867c7.jpg',
|
||||
'live_status': 'is_live',
|
||||
'age_limit': 0,
|
||||
'uploader_id': '23460655',
|
||||
@@ -184,6 +198,24 @@ class RutubeIE(RutubeBaseIE):
|
||||
'title': r're:Первый канал. Прямой эфир \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
|
||||
'uploader': 'Первый канал',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://rutube.ru/play/embed/03a9cb54bac3376af4c5cb0f18444e01/',
|
||||
'info_dict': {
|
||||
'id': '03a9cb54bac3376af4c5cb0f18444e01',
|
||||
'ext': 'mp4',
|
||||
'age_limit': 0,
|
||||
'description': '',
|
||||
'title': 'Церемония начала торгов акциями ПАО «ЕвроТранс»',
|
||||
'chapters': [],
|
||||
'upload_date': '20240829',
|
||||
'duration': 293,
|
||||
'uploader': 'MOEX - Московская биржа',
|
||||
'timestamp': 1724946628,
|
||||
'thumbnail': 'https://pic.rutubelist.ru/video/2e/24/2e241fddb459baf0fa54acfca44874f4.jpg',
|
||||
'view_count': int,
|
||||
'uploader_id': '38420507',
|
||||
'categories': ['Интервью'],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://rutube.ru/video/5ab908fccfac5bb43ef2b1e4182256b0/',
|
||||
'only_matching': True,
|
||||
@@ -192,40 +224,46 @@ class RutubeIE(RutubeBaseIE):
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if RutubePlaylistIE.suitable(url) else super().suitable(url)
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
query = parse_qs(url)
|
||||
info = self._download_and_extract_info(video_id, query)
|
||||
info['formats'] = self._download_and_extract_formats(video_id, query)
|
||||
return info
|
||||
formats, subtitles = self._download_and_extract_formats_and_subtitles(video_id, query)
|
||||
return {
|
||||
**info,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
|
||||
class RutubeEmbedIE(RutubeBaseIE):
|
||||
IE_NAME = 'rutube:embed'
|
||||
IE_DESC = 'Rutube embedded videos'
|
||||
_VALID_URL = r'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)'
|
||||
_VALID_URL = r'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)(?:[?#/]|$)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=',
|
||||
'url': 'https://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=',
|
||||
'info_dict': {
|
||||
'id': 'a10e53b86e8f349080f718582ce4c661',
|
||||
'ext': 'mp4',
|
||||
'timestamp': 1387830582,
|
||||
'upload_date': '20131223',
|
||||
'uploader_id': '297833',
|
||||
'description': 'Видео группы ★http://vk.com/foxkidsreset★ музей Fox Kids и Jetix<br/><br/> восстановлено и сделано в шикоформате subziro89 http://vk.com/subziro89',
|
||||
'uploader': 'subziro89 ILya',
|
||||
'title': 'Мистический городок Эйри в Индиан 5 серия озвучка subziro89',
|
||||
'age_limit': 0,
|
||||
'duration': 1395,
|
||||
'chapters': [],
|
||||
'description': 'md5:a5acea57bbc3ccdc3cacd1f11a014b5b',
|
||||
'view_count': int,
|
||||
'thumbnail': 'https://pic.rutubelist.ru/video/d3/03/d3031f4670a6e6170d88fb3607948418.jpg',
|
||||
'categories': ['Сериалы'],
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://rutube.ru/play/embed/8083783',
|
||||
'url': 'https://rutube.ru/play/embed/8083783',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# private video
|
||||
@@ -240,11 +278,12 @@ class RutubeEmbedIE(RutubeBaseIE):
|
||||
query = parse_qs(url)
|
||||
options = self._download_api_options(embed_id, query)
|
||||
video_id = options['effective_video']
|
||||
formats = self._extract_formats(options, video_id)
|
||||
formats, subtitles = self._extract_formats_and_subtitles(options, video_id)
|
||||
info = self._download_and_extract_info(video_id, query)
|
||||
info.update({
|
||||
'extractor_key': 'Rutube',
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
})
|
||||
return info
|
||||
|
||||
@@ -295,14 +334,14 @@ class RutubeTagsIE(RutubePlaylistBaseIE):
|
||||
IE_DESC = 'Rutube tags'
|
||||
_VALID_URL = r'https?://rutube\.ru/tags/video/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://rutube.ru/tags/video/1800/',
|
||||
'url': 'https://rutube.ru/tags/video/1800/',
|
||||
'info_dict': {
|
||||
'id': '1800',
|
||||
},
|
||||
'playlist_mincount': 68,
|
||||
}]
|
||||
|
||||
_PAGE_TEMPLATE = 'http://rutube.ru/api/tags/video/%s/?page=%s&format=json'
|
||||
_PAGE_TEMPLATE = 'https://rutube.ru/api/tags/video/%s/?page=%s&format=json'
|
||||
|
||||
|
||||
class RutubeMovieIE(RutubePlaylistBaseIE):
|
||||
@@ -310,8 +349,8 @@ class RutubeMovieIE(RutubePlaylistBaseIE):
|
||||
IE_DESC = 'Rutube movies'
|
||||
_VALID_URL = r'https?://rutube\.ru/metainfo/tv/(?P<id>\d+)'
|
||||
|
||||
_MOVIE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/?format=json'
|
||||
_PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
|
||||
_MOVIE_TEMPLATE = 'https://rutube.ru/api/metainfo/tv/%s/?format=json'
|
||||
_PAGE_TEMPLATE = 'https://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
|
||||
|
||||
def _real_extract(self, url):
|
||||
movie_id = self._match_id(url)
|
||||
@@ -327,62 +366,82 @@ class RutubePersonIE(RutubePlaylistBaseIE):
|
||||
IE_DESC = 'Rutube person videos'
|
||||
_VALID_URL = r'https?://rutube\.ru/video/person/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://rutube.ru/video/person/313878/',
|
||||
'url': 'https://rutube.ru/video/person/313878/',
|
||||
'info_dict': {
|
||||
'id': '313878',
|
||||
},
|
||||
'playlist_mincount': 37,
|
||||
'playlist_mincount': 36,
|
||||
}]
|
||||
|
||||
_PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json'
|
||||
_PAGE_TEMPLATE = 'https://rutube.ru/api/video/person/%s/?page=%s&format=json'
|
||||
|
||||
|
||||
class RutubePlaylistIE(RutubePlaylistBaseIE):
|
||||
IE_NAME = 'rutube:playlist'
|
||||
IE_DESC = 'Rutube playlists'
|
||||
_VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/[\da-z]{32}/\?.*?\bpl_id=(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://rutube\.ru/plst/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://rutube.ru/video/cecd58ed7d531fc0f3d795d51cee9026/?pl_id=3097&pl_type=tag',
|
||||
'url': 'https://rutube.ru/plst/308547/',
|
||||
'info_dict': {
|
||||
'id': '3097',
|
||||
'id': '308547',
|
||||
},
|
||||
'playlist_count': 27,
|
||||
}, {
|
||||
'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_id=4252&pl_type=source',
|
||||
'only_matching': True,
|
||||
'playlist_mincount': 22,
|
||||
}]
|
||||
|
||||
_PAGE_TEMPLATE = 'http://rutube.ru/api/playlist/%s/%s/?page=%s&format=json'
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
from ..utils import int_or_none, parse_qs
|
||||
|
||||
if not super().suitable(url):
|
||||
return False
|
||||
params = parse_qs(url)
|
||||
return params.get('pl_type', [None])[0] and int_or_none(params.get('pl_id', [None])[0])
|
||||
|
||||
def _next_page_url(self, page_num, playlist_id, item_kind):
|
||||
return self._PAGE_TEMPLATE % (item_kind, playlist_id, page_num)
|
||||
|
||||
def _real_extract(self, url):
|
||||
qs = parse_qs(url)
|
||||
playlist_kind = qs['pl_type'][0]
|
||||
playlist_id = qs['pl_id'][0]
|
||||
return self._extract_playlist(playlist_id, item_kind=playlist_kind)
|
||||
_PAGE_TEMPLATE = 'https://rutube.ru/api/playlist/custom/%s/videos?page=%s&format=json'
|
||||
|
||||
|
||||
class RutubeChannelIE(RutubePlaylistBaseIE):
|
||||
IE_NAME = 'rutube:channel'
|
||||
IE_DESC = 'Rutube channel'
|
||||
_VALID_URL = r'https?://rutube\.ru/channel/(?P<id>\d+)/videos'
|
||||
_VALID_URL = r'https?://rutube\.ru/(?:channel/(?P<id>\d+)|u/(?P<slug>\w+))(?:/(?P<section>videos|shorts|playlists))?'
|
||||
_TESTS = [{
|
||||
'url': 'https://rutube.ru/channel/639184/videos/',
|
||||
'info_dict': {
|
||||
'id': '639184',
|
||||
'id': '639184_videos',
|
||||
},
|
||||
'playlist_mincount': 133,
|
||||
'playlist_mincount': 129,
|
||||
}, {
|
||||
'url': 'https://rutube.ru/channel/25902603/shorts/',
|
||||
'info_dict': {
|
||||
'id': '25902603_shorts',
|
||||
},
|
||||
'playlist_mincount': 277,
|
||||
}, {
|
||||
'url': 'https://rutube.ru/channel/25902603/',
|
||||
'info_dict': {
|
||||
'id': '25902603',
|
||||
},
|
||||
'playlist_mincount': 406,
|
||||
}, {
|
||||
'url': 'https://rutube.ru/u/rutube/videos/',
|
||||
'info_dict': {
|
||||
'id': '23704195_videos',
|
||||
},
|
||||
'playlist_mincount': 113,
|
||||
}]
|
||||
|
||||
_PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json'
|
||||
_PAGE_TEMPLATE = 'https://rutube.ru/api/video/person/%s/?page=%s&format=json&origin__type=%s'
|
||||
|
||||
def _next_page_url(self, page_num, playlist_id, section):
|
||||
origin_type = {
|
||||
'videos': 'rtb,rst,ifrm,rspa',
|
||||
'shorts': 'rshorts',
|
||||
None: '',
|
||||
}.get(section)
|
||||
return self._PAGE_TEMPLATE % (playlist_id, page_num, origin_type)
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id, slug, section = self._match_valid_url(url).group('id', 'slug', 'section')
|
||||
if section == 'playlists':
|
||||
raise UnsupportedError(url)
|
||||
if slug:
|
||||
webpage = self._download_webpage(url, slug)
|
||||
redux_state = self._search_json(
|
||||
r'window\.reduxState\s*=', webpage, 'redux state', slug, transform_source=js_to_json)
|
||||
playlist_id = traverse_obj(redux_state, (
|
||||
'api', 'queries', lambda k, _: k.startswith('channelIdBySlug'),
|
||||
'data', 'channel_id', {int}, {str_or_none}, any))
|
||||
playlist = self._extract_playlist(playlist_id, section=section)
|
||||
if section:
|
||||
playlist['id'] = f'{playlist_id}_{section}'
|
||||
return playlist
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import base64
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..aes import aes_cbc_decrypt, unpad_pkcs7
|
||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
bytes_to_intlist,
|
||||
intlist_to_bytes,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
@@ -68,10 +66,10 @@ class ShemarooMeIE(InfoExtractor):
|
||||
data_json = self._download_json('https://www.shemaroome.com/users/user_all_lists', video_id, data=data.encode())
|
||||
if not data_json.get('status'):
|
||||
raise ExtractorError('Premium videos cannot be downloaded yet.', expected=True)
|
||||
url_data = bytes_to_intlist(base64.b64decode(data_json['new_play_url']))
|
||||
key = bytes_to_intlist(base64.b64decode(data_json['key']))
|
||||
iv = [0] * 16
|
||||
m3u8_url = unpad_pkcs7(intlist_to_bytes(aes_cbc_decrypt(url_data, key, iv))).decode('ascii')
|
||||
url_data = base64.b64decode(data_json['new_play_url'])
|
||||
key = base64.b64decode(data_json['key'])
|
||||
iv = bytes(16)
|
||||
m3u8_url = unpad_pkcs7(aes_cbc_decrypt_bytes(url_data, key, iv)).decode('ascii')
|
||||
headers = {'stream_key': data_json['stream_key']}
|
||||
formats, m3u8_subs = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, fatal=False, headers=headers)
|
||||
for fmt in formats:
|
||||
|
||||
@@ -199,8 +199,9 @@ class SonyLIVSeriesIE(InfoExtractor):
|
||||
},
|
||||
}]
|
||||
_API_BASE = 'https://apiv2.sonyliv.com/AGL'
|
||||
_SORT_ORDERS = ('asc', 'desc')
|
||||
|
||||
def _entries(self, show_id):
|
||||
def _entries(self, show_id, sort_order):
|
||||
headers = {
|
||||
'Accept': 'application/json, text/plain, */*',
|
||||
'Referer': 'https://www.sonyliv.com',
|
||||
@@ -215,6 +216,9 @@ class SonyLIVSeriesIE(InfoExtractor):
|
||||
'from': '0',
|
||||
'to': '49',
|
||||
}), ('resultObj', 'containers', 0, 'containers', lambda _, v: int_or_none(v['id'])))
|
||||
|
||||
if sort_order == 'desc':
|
||||
seasons = reversed(seasons)
|
||||
for season in seasons:
|
||||
season_id = str(season['id'])
|
||||
note = traverse_obj(season, ('metadata', 'title', {str})) or 'season'
|
||||
@@ -226,7 +230,7 @@ class SonyLIVSeriesIE(InfoExtractor):
|
||||
'from': str(cursor),
|
||||
'to': str(cursor + 99),
|
||||
'orderBy': 'episodeNumber',
|
||||
'sortOrder': 'asc',
|
||||
'sortOrder': sort_order,
|
||||
}), ('resultObj', 'containers', 0, 'containers', lambda _, v: int_or_none(v['id'])))
|
||||
if not episodes:
|
||||
break
|
||||
@@ -237,4 +241,10 @@ class SonyLIVSeriesIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id = self._match_id(url)
|
||||
return self.playlist_result(self._entries(show_id), playlist_id=show_id)
|
||||
|
||||
sort_order = self._configuration_arg('sort_order', [self._SORT_ORDERS[0]])[0]
|
||||
if sort_order not in self._SORT_ORDERS:
|
||||
raise ValueError(
|
||||
f'Invalid sort order "{sort_order}". Allowed values are: {", ".join(self._SORT_ORDERS)}')
|
||||
|
||||
return self.playlist_result(self._entries(show_id, sort_order), playlist_id=show_id)
|
||||
|
||||
@@ -7,7 +7,6 @@ from .common import InfoExtractor, SearchInfoExtractor
|
||||
from ..networking import HEADRequest
|
||||
from ..networking.exceptions import HTTPError
|
||||
from ..utils import (
|
||||
KNOWN_EXTENSIONS,
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
@@ -241,7 +240,7 @@ class SoundcloudBaseIE(InfoExtractor):
|
||||
format_urls.add(format_url)
|
||||
formats.append({
|
||||
'format_id': 'download',
|
||||
'ext': urlhandle_detect_ext(urlh) or 'mp3',
|
||||
'ext': urlhandle_detect_ext(urlh, default='mp3'),
|
||||
'filesize': int_or_none(urlh.headers.get('Content-Length')),
|
||||
'url': format_url,
|
||||
'quality': 10,
|
||||
@@ -251,50 +250,15 @@ class SoundcloudBaseIE(InfoExtractor):
|
||||
def invalid_url(url):
|
||||
return not url or url in format_urls
|
||||
|
||||
def add_format(f, protocol, is_preview=False):
|
||||
mobj = re.search(r'\.(?P<abr>\d+)\.(?P<ext>[0-9a-z]{3,4})(?=[/?])', stream_url)
|
||||
if mobj:
|
||||
for k, v in mobj.groupdict().items():
|
||||
if not f.get(k):
|
||||
f[k] = v
|
||||
format_id_list = []
|
||||
if protocol:
|
||||
format_id_list.append(protocol)
|
||||
ext = f.get('ext')
|
||||
if ext == 'aac':
|
||||
f.update({
|
||||
'abr': 256,
|
||||
'quality': 5,
|
||||
'format_note': 'Premium',
|
||||
})
|
||||
for k in ('ext', 'abr'):
|
||||
v = str_or_none(f.get(k))
|
||||
if v:
|
||||
format_id_list.append(v)
|
||||
preview = is_preview or re.search(r'/(?:preview|playlist)/0/30/', f['url'])
|
||||
if preview:
|
||||
format_id_list.append('preview')
|
||||
abr = f.get('abr')
|
||||
if abr:
|
||||
f['abr'] = int(abr)
|
||||
if protocol in ('hls', 'hls-aes'):
|
||||
protocol = 'm3u8' if ext == 'aac' else 'm3u8_native'
|
||||
else:
|
||||
protocol = 'http'
|
||||
f.update({
|
||||
'format_id': '_'.join(format_id_list),
|
||||
'protocol': protocol,
|
||||
'preference': -10 if preview else None,
|
||||
})
|
||||
formats.append(f)
|
||||
|
||||
# New API
|
||||
for t in traverse_obj(info, ('media', 'transcodings', lambda _, v: url_or_none(v['url']))):
|
||||
for t in traverse_obj(info, ('media', 'transcodings', lambda _, v: url_or_none(v['url']) and v['preset'])):
|
||||
if extract_flat:
|
||||
break
|
||||
format_url = t['url']
|
||||
preset = t['preset']
|
||||
preset_base = preset.partition('_')[0]
|
||||
|
||||
protocol = traverse_obj(t, ('format', 'protocol', {str}))
|
||||
protocol = traverse_obj(t, ('format', 'protocol', {str})) or 'http'
|
||||
if protocol == 'progressive':
|
||||
protocol = 'http'
|
||||
if protocol != 'hls' and '/hls' in format_url:
|
||||
@@ -302,32 +266,54 @@ class SoundcloudBaseIE(InfoExtractor):
|
||||
if protocol == 'encrypted-hls' or '/encrypted-hls' in format_url:
|
||||
protocol = 'hls-aes'
|
||||
|
||||
ext = None
|
||||
if preset := traverse_obj(t, ('preset', {str_or_none})):
|
||||
ext = preset.split('_')[0]
|
||||
if ext not in KNOWN_EXTENSIONS:
|
||||
ext = mimetype2ext(traverse_obj(t, ('format', 'mime_type', {str})))
|
||||
|
||||
identifier = join_nonempty(protocol, ext, delim='_')
|
||||
if not self._is_requested(identifier):
|
||||
self.write_debug(f'"{identifier}" is not a requested format, skipping')
|
||||
short_identifier = f'{protocol}_{preset_base}'
|
||||
if preset_base == 'abr':
|
||||
self.write_debug(f'Skipping broken "{short_identifier}" format')
|
||||
continue
|
||||
if not self._is_requested(short_identifier):
|
||||
self.write_debug(f'"{short_identifier}" is not a requested format, skipping')
|
||||
continue
|
||||
|
||||
# XXX: if not extract_flat, 429 error must be caught where _extract_info_dict is called
|
||||
stream_url = traverse_obj(self._call_api(
|
||||
format_url, track_id, f'Downloading {identifier} format info JSON',
|
||||
format_url, track_id, f'Downloading {short_identifier} format info JSON',
|
||||
query=query, headers=self._HEADERS), ('url', {url_or_none}))
|
||||
|
||||
if invalid_url(stream_url):
|
||||
continue
|
||||
format_urls.add(stream_url)
|
||||
add_format({
|
||||
|
||||
mime_type = traverse_obj(t, ('format', 'mime_type', {str}))
|
||||
codec = self._search_regex(r'codecs="([^"]+)"', mime_type, 'codec', default=None)
|
||||
ext = {
|
||||
'mp4a': 'm4a',
|
||||
'opus': 'opus',
|
||||
}.get(codec[:4] if codec else None) or mimetype2ext(mime_type, default=None)
|
||||
if not ext or ext == 'm3u8':
|
||||
ext = preset_base
|
||||
|
||||
is_premium = t.get('quality') == 'hq'
|
||||
abr = int_or_none(
|
||||
self._search_regex(r'(\d+)k$', preset, 'abr', default=None)
|
||||
or self._search_regex(r'\.(\d+)\.(?:opus|mp3)[/?]', stream_url, 'abr', default=None)
|
||||
or (256 if (is_premium and 'aac' in preset) else None))
|
||||
|
||||
is_preview = (t.get('snipped')
|
||||
or '/preview/' in format_url
|
||||
or re.search(r'/(?:preview|playlist)/0/30/', stream_url))
|
||||
|
||||
formats.append({
|
||||
'format_id': join_nonempty(protocol, preset, is_preview and 'preview', delim='_'),
|
||||
'url': stream_url,
|
||||
'ext': ext,
|
||||
}, protocol, t.get('snipped') or '/preview/' in format_url)
|
||||
|
||||
for f in formats:
|
||||
f['vcodec'] = 'none'
|
||||
'acodec': codec,
|
||||
'vcodec': 'none',
|
||||
'abr': abr,
|
||||
'protocol': 'm3u8_native' if protocol in ('hls', 'hls-aes') else 'http',
|
||||
'container': 'm4a_dash' if ext == 'm4a' else None,
|
||||
'quality': 5 if is_premium else 0 if (abr and abr >= 160) else -1,
|
||||
'format_note': 'Premium' if is_premium else None,
|
||||
'preference': -10 if is_preview else None,
|
||||
})
|
||||
|
||||
if not formats and info.get('policy') == 'BLOCK':
|
||||
self.raise_geo_restricted(metadata_available=True)
|
||||
|
||||
@@ -71,9 +71,11 @@ class SpankBangIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id = mobj.group('id') or mobj.group('id_2')
|
||||
country = self.get_param('geo_bypass_country') or 'US'
|
||||
self._set_cookie('.spankbang.com', 'country', country.upper())
|
||||
webpage = self._download_webpage(
|
||||
url.replace(f'/{video_id}/embed', f'/{video_id}/video'),
|
||||
video_id, headers={'Cookie': 'country=US'})
|
||||
video_id, impersonate=True)
|
||||
|
||||
if re.search(r'<[^>]+\b(?:id|class)=["\']video_removed', webpage):
|
||||
raise ExtractorError(
|
||||
|
||||
@@ -2,13 +2,16 @@ import itertools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
filter_dict,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
parse_qs,
|
||||
str_or_none,
|
||||
try_get,
|
||||
unified_timestamp,
|
||||
url_or_none,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
def _extract_episode(data, episode_id=None):
|
||||
@@ -58,15 +61,10 @@ def _extract_episode(data, episode_id=None):
|
||||
|
||||
|
||||
class SpreakerIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
api\.spreaker\.com/
|
||||
(?:
|
||||
(?:download/)?episode|
|
||||
v2/episodes
|
||||
)/
|
||||
(?P<id>\d+)
|
||||
'''
|
||||
_VALID_URL = [
|
||||
r'https?://api\.spreaker\.com/(?:(?:download/)?episode|v2/episodes)/(?P<id>\d+)',
|
||||
r'https?://(?:www\.)?spreaker\.com/episode/[^#?/]*?(?P<id>\d+)/?(?:[?#]|$)',
|
||||
]
|
||||
_TESTS = [{
|
||||
'url': 'https://api.spreaker.com/episode/12534508',
|
||||
'info_dict': {
|
||||
@@ -83,7 +81,9 @@ class SpreakerIE(InfoExtractor):
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
'series': 'Success With Music (SWM)',
|
||||
'series': 'Success With Music | SWM',
|
||||
'thumbnail': 'https://d3wo5wojvuv7l.cloudfront.net/t_square_limited_160/images.spreaker.com/original/777ce4f96b71b0e1b7c09a5e625210e3.jpg',
|
||||
'creators': ['SWM'],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://api.spreaker.com/download/episode/12534508/swm_ep15_how_to_market_your_music_part_2.mp3',
|
||||
@@ -91,52 +91,75 @@ class SpreakerIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://api.spreaker.com/v2/episodes/12534508?export=episode_segments',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'note': 'episode',
|
||||
'url': 'https://www.spreaker.com/episode/grunge-music-origins-the-raw-sound-that-defined-a-generation--60269615',
|
||||
'info_dict': {
|
||||
'id': '60269615',
|
||||
'display_id': 'grunge-music-origins-the-raw-sound-that-',
|
||||
'ext': 'mp3',
|
||||
'title': 'Grunge Music Origins - The Raw Sound that Defined a Generation',
|
||||
'description': str,
|
||||
'timestamp': 1717468905,
|
||||
'upload_date': '20240604',
|
||||
'uploader': 'Katie Brown 2',
|
||||
'uploader_id': '17733249',
|
||||
'duration': 818.83,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
'series': '90s Grunge',
|
||||
'thumbnail': 'https://d3wo5wojvuv7l.cloudfront.net/t_square_limited_160/images.spreaker.com/original/bb0d4178f7cf57cc8786dedbd9c5d969.jpg',
|
||||
'creators': ['Katie Brown 2'],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.spreaker.com/episode/60269615',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
episode_id = self._match_id(url)
|
||||
data = self._download_json(
|
||||
f'https://api.spreaker.com/v2/episodes/{episode_id}',
|
||||
episode_id)['response']['episode']
|
||||
f'https://api.spreaker.com/v2/episodes/{episode_id}', episode_id,
|
||||
query=traverse_obj(parse_qs(url), {'key': ('key', 0)}))['response']['episode']
|
||||
return _extract_episode(data, episode_id)
|
||||
|
||||
|
||||
class SpreakerPageIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?spreaker\.com/user/[^/]+/(?P<id>[^/?#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.spreaker.com/user/9780658/swm-ep15-how-to-market-your-music-part-2',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
episode_id = self._search_regex(
|
||||
(r'data-episode_id=["\'](?P<id>\d+)',
|
||||
r'episode_id\s*:\s*(?P<id>\d+)'), webpage, 'episode id')
|
||||
return self.url_result(
|
||||
f'https://api.spreaker.com/episode/{episode_id}',
|
||||
ie=SpreakerIE.ie_key(), video_id=episode_id)
|
||||
|
||||
|
||||
class SpreakerShowIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://api\.spreaker\.com/show/(?P<id>\d+)'
|
||||
_VALID_URL = [
|
||||
r'https?://api\.spreaker\.com/show/(?P<id>\d+)',
|
||||
r'https?://(?:www\.)?spreaker\.com/podcast/[\w-]+--(?P<id>[\d]+)',
|
||||
r'https?://(?:www\.)?spreaker\.com/show/(?P<id>\d+)/episodes/feed',
|
||||
]
|
||||
_TESTS = [{
|
||||
'url': 'https://api.spreaker.com/show/4652058',
|
||||
'info_dict': {
|
||||
'id': '4652058',
|
||||
},
|
||||
'playlist_mincount': 118,
|
||||
}, {
|
||||
'url': 'https://www.spreaker.com/podcast/health-wealth--5918323',
|
||||
'info_dict': {
|
||||
'id': '5918323',
|
||||
},
|
||||
'playlist_mincount': 60,
|
||||
}, {
|
||||
'url': 'https://www.spreaker.com/show/5887186/episodes/feed',
|
||||
'info_dict': {
|
||||
'id': '5887186',
|
||||
},
|
||||
'playlist_mincount': 290,
|
||||
}]
|
||||
|
||||
def _entries(self, show_id):
|
||||
def _entries(self, show_id, key=None):
|
||||
for page_num in itertools.count(1):
|
||||
episodes = self._download_json(
|
||||
f'https://api.spreaker.com/show/{show_id}/episodes',
|
||||
show_id, note=f'Downloading JSON page {page_num}', query={
|
||||
show_id, note=f'Downloading JSON page {page_num}', query=filter_dict({
|
||||
'page': page_num,
|
||||
'max_per_page': 100,
|
||||
})
|
||||
'key': key,
|
||||
}))
|
||||
pager = try_get(episodes, lambda x: x['response']['pager'], dict)
|
||||
if not pager:
|
||||
break
|
||||
@@ -152,21 +175,5 @@ class SpreakerShowIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id = self._match_id(url)
|
||||
return self.playlist_result(self._entries(show_id), playlist_id=show_id)
|
||||
|
||||
|
||||
class SpreakerShowPageIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?spreaker\.com/show/(?P<id>[^/?#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.spreaker.com/show/success-with-music',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
show_id = self._search_regex(
|
||||
r'show_id\s*:\s*(?P<id>\d+)', webpage, 'show id')
|
||||
return self.url_result(
|
||||
f'https://api.spreaker.com/show/{show_id}',
|
||||
ie=SpreakerShowIE.ie_key(), video_id=show_id)
|
||||
key = traverse_obj(parse_qs(url), ('key', 0))
|
||||
return self.playlist_result(self._entries(show_id, key), playlist_id=show_id)
|
||||
|
||||
@@ -28,24 +28,21 @@ class StripchatIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id, headers=self.geo_verification_headers())
|
||||
data = self._search_json(
|
||||
r'<script\b[^>]*>\s*window\.__PRELOADED_STATE__\s*=',
|
||||
webpage, 'data', video_id, transform_source=lowercase_escape)
|
||||
|
||||
data = self._parse_json(
|
||||
self._search_regex(
|
||||
r'<script\b[^>]*>\s*window\.__PRELOADED_STATE__\s*=(?P<value>.*?)<\/script>',
|
||||
webpage, 'data', default='{}', group='value'),
|
||||
video_id, transform_source=lowercase_escape, fatal=False)
|
||||
if not data:
|
||||
raise ExtractorError('Unable to find configuration for stream.')
|
||||
|
||||
if traverse_obj(data, ('viewCam', 'show'), expected_type=dict):
|
||||
raise ExtractorError('Model is in private show', expected=True)
|
||||
elif not traverse_obj(data, ('viewCam', 'model', 'isLive'), expected_type=bool):
|
||||
if traverse_obj(data, ('viewCam', 'show', {dict})):
|
||||
raise ExtractorError('Model is in a private show', expected=True)
|
||||
if not traverse_obj(data, ('viewCam', 'model', 'isLive', {bool})):
|
||||
raise UserNotLive(video_id=video_id)
|
||||
|
||||
model_id = traverse_obj(data, ('viewCam', 'model', 'id'), expected_type=int)
|
||||
model_id = data['viewCam']['model']['id']
|
||||
|
||||
formats = []
|
||||
for host in traverse_obj(data, ('config', 'data', (
|
||||
# HLS hosts are currently found in .configV3.static.features.hlsFallback.fallbackDomains[]
|
||||
# The rest of the path is for backwards compatibility and to guard against A/B testing
|
||||
for host in traverse_obj(data, ((('config', 'data'), ('configV3', 'static')), (
|
||||
(('features', 'featuresV2'), 'hlsFallback', 'fallbackDomains', ...), 'hlsStreamHost'))):
|
||||
formats = self._extract_m3u8_formats(
|
||||
f'https://edge-hls.{host}/hls/{model_id}/master/{model_id}_auto.m3u8',
|
||||
@@ -53,7 +50,7 @@ class StripchatIE(InfoExtractor):
|
||||
if formats:
|
||||
break
|
||||
if not formats:
|
||||
self.raise_no_formats('No active streams found', expected=True)
|
||||
self.raise_no_formats('Unable to extract stream host', video_id=video_id)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
||||
@@ -413,15 +413,6 @@ class TikTokBaseIE(InfoExtractor):
|
||||
for f in formats:
|
||||
self._set_cookie(urllib.parse.urlparse(f['url']).hostname, 'sid_tt', auth_cookie.value)
|
||||
|
||||
thumbnails = []
|
||||
for cover_id in ('cover', 'ai_dynamic_cover', 'animated_cover', 'ai_dynamic_cover_bak',
|
||||
'origin_cover', 'dynamic_cover'):
|
||||
for cover_url in traverse_obj(video_info, (cover_id, 'url_list', ...)):
|
||||
thumbnails.append({
|
||||
'id': cover_id,
|
||||
'url': cover_url,
|
||||
})
|
||||
|
||||
stats_info = aweme_detail.get('statistics') or {}
|
||||
music_info = aweme_detail.get('music') or {}
|
||||
labels = traverse_obj(aweme_detail, ('hybrid_label', ..., 'text'), expected_type=str)
|
||||
@@ -467,7 +458,17 @@ class TikTokBaseIE(InfoExtractor):
|
||||
'formats': formats,
|
||||
'subtitles': self.extract_subtitles(
|
||||
aweme_detail, aweme_id, traverse_obj(author_info, 'uploader', 'uploader_id', 'channel_id')),
|
||||
'thumbnails': thumbnails,
|
||||
'thumbnails': [
|
||||
{
|
||||
'id': cover_id,
|
||||
'url': cover_url,
|
||||
'preference': -1 if cover_id in ('cover', 'origin_cover') else -2,
|
||||
}
|
||||
for cover_id in (
|
||||
'cover', 'ai_dynamic_cover', 'animated_cover',
|
||||
'ai_dynamic_cover_bak', 'origin_cover', 'dynamic_cover')
|
||||
for cover_url in traverse_obj(video_info, (cover_id, 'url_list', ...))
|
||||
],
|
||||
'duration': (traverse_obj(video_info, (
|
||||
(None, 'download_addr'), 'duration', {int_or_none(scale=1000)}, any))
|
||||
or traverse_obj(music_info, ('duration', {int_or_none}))),
|
||||
@@ -600,11 +601,15 @@ class TikTokBaseIE(InfoExtractor):
|
||||
'repost_count': 'shareCount',
|
||||
'comment_count': 'commentCount',
|
||||
}), expected_type=int_or_none),
|
||||
'thumbnails': traverse_obj(aweme_detail, (
|
||||
(None, 'video'), ('thumbnail', 'cover', 'dynamicCover', 'originCover'), {
|
||||
'url': ({url_or_none}, {self._proto_relative_url}),
|
||||
},
|
||||
)),
|
||||
'thumbnails': [
|
||||
{
|
||||
'id': cover_id,
|
||||
'url': self._proto_relative_url(cover_url),
|
||||
'preference': -2 if cover_id == 'dynamicCover' else -1,
|
||||
}
|
||||
for cover_id in ('thumbnail', 'cover', 'dynamicCover', 'originCover')
|
||||
for cover_url in traverse_obj(aweme_detail, ((None, 'video'), cover_id, {url_or_none}))
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
|
||||
113
yt_dlp/extractor/uliza.py
Normal file
113
yt_dlp/extractor/uliza.py
Normal file
@@ -0,0 +1,113 @@
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
make_archive_id,
|
||||
parse_qs,
|
||||
time_seconds,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class UlizaPlayerIE(InfoExtractor):
|
||||
_VALID_URL = r'https://player-api\.p\.uliza\.jp/v1/players/[^?#]+\?(?:[^#]*&)?name=(?P<id>[^#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://player-api.p.uliza.jp/v1/players/timeshift-disabled/pia/admin?type=normal&playerobjectname=ulizaPlayer&name=livestream01_dvr&repeatable=true',
|
||||
'info_dict': {
|
||||
'id': '88f3109a-f503-4d0f-a9f7-9f39ac745d84',
|
||||
'ext': 'mp4',
|
||||
'title': '88f3109a-f503-4d0f-a9f7-9f39ac745d84',
|
||||
'live_status': 'was_live',
|
||||
'_old_archive_ids': ['piaulizaportal 88f3109a-f503-4d0f-a9f7-9f39ac745d84'],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://player-api.p.uliza.jp/v1/players/uliza_jp_gallery_normal/promotion/admin?type=presentation&name=cookings&targetid=player1',
|
||||
'info_dict': {
|
||||
'id': 'ae350126-5e22-4a7f-a8ac-8d0fd448b800',
|
||||
'ext': 'mp4',
|
||||
'title': 'ae350126-5e22-4a7f-a8ac-8d0fd448b800',
|
||||
'live_status': 'not_live',
|
||||
'_old_archive_ids': ['piaulizaportal ae350126-5e22-4a7f-a8ac-8d0fd448b800'],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://player-api.p.uliza.jp/v1/players/default-player/pia/admin?type=normal&name=pia_movie_uliza_fix&targetid=ulizahtml5&repeatable=true',
|
||||
'info_dict': {
|
||||
'id': '0644ecc8-e354-41b4-b957-3b08a2d63df1',
|
||||
'ext': 'mp4',
|
||||
'title': '0644ecc8-e354-41b4-b957-3b08a2d63df1',
|
||||
'live_status': 'not_live',
|
||||
'_old_archive_ids': ['piaulizaportal 0644ecc8-e354-41b4-b957-3b08a2d63df1'],
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
player_data = self._download_webpage(
|
||||
url, display_id, headers={'Referer': 'https://player-api.p.uliza.jp/'},
|
||||
note='Fetching player data', errnote='Unable to fetch player data')
|
||||
|
||||
m3u8_url = self._search_regex(
|
||||
r'["\'](https://vms-api\.p\.uliza\.jp/v1/prog-index\.m3u8[^"\']+)', player_data, 'm3u8 url')
|
||||
video_id = parse_qs(m3u8_url).get('ss', [display_id])[0]
|
||||
|
||||
formats = self._extract_m3u8_formats(m3u8_url, video_id)
|
||||
m3u8_type = self._search_regex(
|
||||
r'/hls/(dvr|video)/', traverse_obj(formats, (0, 'url')), 'm3u8 type', default=None)
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_id,
|
||||
'formats': formats,
|
||||
'live_status': {
|
||||
'video': 'is_live',
|
||||
'dvr': 'was_live', # short-term archives
|
||||
}.get(m3u8_type, 'not_live'), # VOD or long-term archives
|
||||
'_old_archive_ids': [make_archive_id('PIAULIZAPortal', video_id)],
|
||||
}
|
||||
|
||||
|
||||
class UlizaPortalIE(InfoExtractor):
|
||||
IE_DESC = 'ulizaportal.jp'
|
||||
_VALID_URL = r'https?://(?:www\.)?ulizaportal\.jp/pages/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})'
|
||||
_TESTS = [{
|
||||
'url': 'https://ulizaportal.jp/pages/005f18b7-e810-5618-cb82-0987c5755d44',
|
||||
'info_dict': {
|
||||
'id': 'ae350126-5e22-4a7f-a8ac-8d0fd448b800',
|
||||
'display_id': '005f18b7-e810-5618-cb82-0987c5755d44',
|
||||
'title': 'プレゼンテーションプレイヤーのサンプル',
|
||||
'live_status': 'not_live',
|
||||
'_old_archive_ids': ['piaulizaportal ae350126-5e22-4a7f-a8ac-8d0fd448b800'],
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://ulizaportal.jp/pages/005e1b23-fe93-5780-19a0-98e917cc4b7d?expires=4102412400&signature=f422a993b683e1068f946caf406d211c17d1ef17da8bef3df4a519502155aa91&version=1',
|
||||
'info_dict': {
|
||||
'id': '0644ecc8-e354-41b4-b957-3b08a2d63df1',
|
||||
'display_id': '005e1b23-fe93-5780-19a0-98e917cc4b7d',
|
||||
'title': '【確認用】視聴サンプルページ(ULIZA)',
|
||||
'live_status': 'not_live',
|
||||
'_old_archive_ids': ['piaulizaportal 0644ecc8-e354-41b4-b957-3b08a2d63df1'],
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
expires = int_or_none(traverse_obj(parse_qs(url), ('expires', 0)))
|
||||
if expires and expires <= time_seconds():
|
||||
raise ExtractorError('The link is expired', video_id=video_id, expected=True)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
player_data_url = self._search_regex(
|
||||
r'<script [^>]*\bsrc="(https://player-api\.p\.uliza\.jp/v1/players/[^"]+)"',
|
||||
webpage, 'player data url')
|
||||
return self.url_result(
|
||||
player_data_url, UlizaPlayerIE, url_transparent=True,
|
||||
display_id=video_id, video_title=self._html_extract_title(webpage))
|
||||
@@ -1,189 +0,0 @@
|
||||
import functools
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
OnDemandPagedList,
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
qualities,
|
||||
remove_start,
|
||||
strip_or_none,
|
||||
)
|
||||
|
||||
|
||||
class VeohIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?veoh\.com/(?:watch|videos|embed|iphone/#_Watch)/(?P<id>(?:v|e|yapi-)[\da-zA-Z]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.veoh.com/watch/v56314296nk7Zdmz3',
|
||||
'md5': '620e68e6a3cff80086df3348426c9ca3',
|
||||
'info_dict': {
|
||||
'id': 'v56314296nk7Zdmz3',
|
||||
'ext': 'mp4',
|
||||
'title': 'Straight Backs Are Stronger',
|
||||
'description': 'md5:203f976279939a6dc664d4001e13f5f4',
|
||||
'thumbnail': 're:https://fcache\\.veoh\\.com/file/f/th56314296\\.jpg(\\?.*)?',
|
||||
'uploader': 'LUMOback',
|
||||
'duration': 46,
|
||||
'view_count': int,
|
||||
'average_rating': int,
|
||||
'comment_count': int,
|
||||
'age_limit': 0,
|
||||
'categories': ['technology_and_gaming'],
|
||||
'tags': ['posture', 'posture', 'sensor', 'back', 'pain', 'wearable', 'tech', 'lumo'],
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.veoh.com/embed/v56314296nk7Zdmz3',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.veoh.com/watch/v27701988pbTc4wzN?h1=Chile+workers+cover+up+to+avoid+skin+damage',
|
||||
'md5': '4a6ff84b87d536a6a71e6aa6c0ad07fa',
|
||||
'info_dict': {
|
||||
'id': '27701988',
|
||||
'ext': 'mp4',
|
||||
'title': 'Chile workers cover up to avoid skin damage',
|
||||
'description': 'md5:2bd151625a60a32822873efc246ba20d',
|
||||
'uploader': 'afp-news',
|
||||
'duration': 123,
|
||||
},
|
||||
'skip': 'This video has been deleted.',
|
||||
}, {
|
||||
'url': 'http://www.veoh.com/watch/v69525809F6Nc4frX',
|
||||
'md5': '4fde7b9e33577bab2f2f8f260e30e979',
|
||||
'note': 'Embedded ooyala video',
|
||||
'info_dict': {
|
||||
'id': '69525809',
|
||||
'ext': 'mp4',
|
||||
'title': 'Doctors Alter Plan For Preteen\'s Weight Loss Surgery',
|
||||
'description': 'md5:f5a11c51f8fb51d2315bca0937526891',
|
||||
'uploader': 'newsy-videos',
|
||||
},
|
||||
'skip': 'This video has been deleted.',
|
||||
}, {
|
||||
'url': 'http://www.veoh.com/watch/e152215AJxZktGS',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.veoh.com/videos/v16374379WA437rMH',
|
||||
'md5': 'cceb73f3909063d64f4b93d4defca1b3',
|
||||
'info_dict': {
|
||||
'id': 'v16374379WA437rMH',
|
||||
'ext': 'mp4',
|
||||
'title': 'Phantasmagoria 2, pt. 1-3',
|
||||
'description': 'Phantasmagoria: a Puzzle of Flesh',
|
||||
'thumbnail': 're:https://fcache\\.veoh\\.com/file/f/th16374379\\.jpg(\\?.*)?',
|
||||
'uploader': 'davidspackage',
|
||||
'duration': 968,
|
||||
'view_count': int,
|
||||
'average_rating': int,
|
||||
'comment_count': int,
|
||||
'age_limit': 18,
|
||||
'categories': ['technology_and_gaming', 'gaming'],
|
||||
'tags': ['puzzle', 'of', 'flesh'],
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
metadata = self._download_json(
|
||||
'https://www.veoh.com/watch/getVideo/' + video_id,
|
||||
video_id)
|
||||
video = metadata['video']
|
||||
title = video['title']
|
||||
|
||||
thumbnail_url = None
|
||||
q = qualities(['Regular', 'HQ'])
|
||||
formats = []
|
||||
for f_id, f_url in video.get('src', {}).items():
|
||||
if not f_url:
|
||||
continue
|
||||
if f_id == 'poster':
|
||||
thumbnail_url = f_url
|
||||
else:
|
||||
formats.append({
|
||||
'format_id': f_id,
|
||||
'quality': q(f_id),
|
||||
'url': f_url,
|
||||
})
|
||||
|
||||
categories = metadata.get('categoryPath')
|
||||
if not categories:
|
||||
category = remove_start(strip_or_none(video.get('category')), 'category_')
|
||||
categories = [category] if category else None
|
||||
tags = video.get('tags')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': video.get('description'),
|
||||
'thumbnail': thumbnail_url,
|
||||
'uploader': video.get('author', {}).get('nickname'),
|
||||
'duration': int_or_none(video.get('lengthBySec')) or parse_duration(video.get('length')),
|
||||
'view_count': int_or_none(video.get('views')),
|
||||
'formats': formats,
|
||||
'average_rating': int_or_none(video.get('rating')),
|
||||
'comment_count': int_or_none(video.get('numOfComments')),
|
||||
'age_limit': 18 if video.get('contentRatingId') == 2 else 0,
|
||||
'categories': categories,
|
||||
'tags': tags.split(', ') if tags else None,
|
||||
}
|
||||
|
||||
|
||||
class VeohUserIE(VeohIE): # XXX: Do not subclass from concrete IE
|
||||
_VALID_URL = r'https?://(?:www\.)?veoh\.com/users/(?P<id>[\w-]+)'
|
||||
IE_NAME = 'veoh:user'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'https://www.veoh.com/users/valentinazoe',
|
||||
'info_dict': {
|
||||
'id': 'valentinazoe',
|
||||
'title': 'valentinazoe (Uploads)',
|
||||
},
|
||||
'playlist_mincount': 75,
|
||||
},
|
||||
{
|
||||
'url': 'https://www.veoh.com/users/PiensaLibre',
|
||||
'info_dict': {
|
||||
'id': 'PiensaLibre',
|
||||
'title': 'PiensaLibre (Uploads)',
|
||||
},
|
||||
'playlist_mincount': 2,
|
||||
}]
|
||||
|
||||
_PAGE_SIZE = 16
|
||||
|
||||
def _fetch_page(self, uploader, page):
|
||||
response = self._download_json(
|
||||
'https://www.veoh.com/users/published/videos', uploader,
|
||||
note=f'Downloading videos page {page + 1}',
|
||||
headers={
|
||||
'x-csrf-token': self._TOKEN,
|
||||
'content-type': 'application/json;charset=UTF-8',
|
||||
},
|
||||
data=json.dumps({
|
||||
'username': uploader,
|
||||
'maxResults': self._PAGE_SIZE,
|
||||
'page': page + 1,
|
||||
'requestName': 'userPage',
|
||||
}).encode())
|
||||
if not response.get('success'):
|
||||
raise ExtractorError(response['message'])
|
||||
|
||||
for video in response['videos']:
|
||||
yield self.url_result(f'https://www.veoh.com/watch/{video["permalinkId"]}', VeohIE,
|
||||
video['permalinkId'], video.get('title'))
|
||||
|
||||
def _real_initialize(self):
|
||||
webpage = self._download_webpage(
|
||||
'https://www.veoh.com', None, note='Downloading authorization token')
|
||||
self._TOKEN = self._search_regex(
|
||||
r'csrfToken:\s*(["\'])(?P<token>[0-9a-zA-Z]{40})\1', webpage,
|
||||
'request token', group='token')
|
||||
|
||||
def _real_extract(self, url):
|
||||
uploader = self._match_id(url)
|
||||
return self.playlist_result(OnDemandPagedList(
|
||||
functools.partial(self._fetch_page, uploader),
|
||||
self._PAGE_SIZE), uploader, f'{uploader} (Uploads)')
|
||||
@@ -17,10 +17,10 @@ from ..utils import (
|
||||
get_element_html_by_id,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
parse_qs,
|
||||
parse_resolution,
|
||||
str_or_none,
|
||||
str_to_int,
|
||||
traverse_obj,
|
||||
try_call,
|
||||
unescapeHTML,
|
||||
unified_timestamp,
|
||||
@@ -29,6 +29,7 @@ from ..utils import (
|
||||
urlencode_postdata,
|
||||
urljoin,
|
||||
)
|
||||
from ..utils.traversal import require, traverse_obj
|
||||
|
||||
|
||||
class VKBaseIE(InfoExtractor):
|
||||
@@ -91,17 +92,17 @@ class VKBaseIE(InfoExtractor):
|
||||
class VKIE(VKBaseIE):
|
||||
IE_NAME = 'vk'
|
||||
IE_DESC = 'VK'
|
||||
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1']
|
||||
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk(?:(?:video)?\.ru|\.com)/video_ext\.php.+?)\1']
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?:
|
||||
(?:
|
||||
(?:(?:m|new)\.)?vk\.com/video_|
|
||||
(?:(?:m|new)\.)?vk(?:(?:video)?\.ru|\.com)/video_|
|
||||
(?:www\.)?daxab\.com/
|
||||
)
|
||||
ext\.php\?(?P<embed_query>.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+).*)|
|
||||
(?:
|
||||
(?:(?:m|new)\.)?vk\.com/(?:.+?\?.*?z=)?(?:video|clip)|
|
||||
(?:(?:m|new)\.)?vk(?:(?:video)?\.ru|\.com)/(?:.+?\?.*?z=)?(?:video|clip)|
|
||||
(?:www\.)?daxab\.com/embed/
|
||||
)
|
||||
(?P<videoid>-?\d+_\d+)(?:.*\blist=(?P<list_id>([\da-f]+)|(ln-[\da-zA-Z]+)))?
|
||||
@@ -110,7 +111,7 @@ class VKIE(VKBaseIE):
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://vk.com/videos-77521?z=video-77521_162222515%2Fclub77521',
|
||||
'url': 'https://vk.com/videos-77521?z=video-77521_162222515%2Fclub77521',
|
||||
'info_dict': {
|
||||
'id': '-77521_162222515',
|
||||
'ext': 'mp4',
|
||||
@@ -127,7 +128,7 @@ class VKIE(VKBaseIE):
|
||||
'params': {'skip_download': 'm3u8'},
|
||||
},
|
||||
{
|
||||
'url': 'http://vk.com/video205387401_165548505',
|
||||
'url': 'https://vk.com/video205387401_165548505',
|
||||
'info_dict': {
|
||||
'id': '205387401_165548505',
|
||||
'ext': 'mp4',
|
||||
@@ -182,10 +183,10 @@ class VKIE(VKBaseIE):
|
||||
'ext': 'mp4',
|
||||
'title': "DSWD Awards 'Children's Joy Foundation, Inc.' Certificate of Registration and License to Operate",
|
||||
'description': 'md5:bf9c26cfa4acdfb146362682edd3827a',
|
||||
'duration': 178,
|
||||
'duration': 179,
|
||||
'upload_date': '20130117',
|
||||
'uploader': "Children's Joy Foundation Inc.",
|
||||
'uploader_id': 'thecjf',
|
||||
'uploader_id': '@CJFIofficial',
|
||||
'view_count': int,
|
||||
'channel_id': 'UCgzCNQ11TmR9V97ECnhi3gw',
|
||||
'availability': 'public',
|
||||
@@ -193,7 +194,7 @@ class VKIE(VKBaseIE):
|
||||
'live_status': 'not_live',
|
||||
'playable_in_embed': True,
|
||||
'channel': 'Children\'s Joy Foundation Inc.',
|
||||
'uploader_url': 'http://www.youtube.com/user/thecjf',
|
||||
'uploader_url': 'https://www.youtube.com/@CJFIofficial',
|
||||
'thumbnail': r're:https?://.+\.jpg$',
|
||||
'tags': 'count:27',
|
||||
'start_time': 0.0,
|
||||
@@ -201,6 +202,7 @@ class VKIE(VKBaseIE):
|
||||
'channel_url': 'https://www.youtube.com/channel/UCgzCNQ11TmR9V97ECnhi3gw',
|
||||
'channel_follower_count': int,
|
||||
'age_limit': 0,
|
||||
'timestamp': 1358394935,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -222,6 +224,7 @@ class VKIE(VKBaseIE):
|
||||
'thumbnail': r're:https?://.+x1080$',
|
||||
'tags': list,
|
||||
},
|
||||
'skip': 'This video has been deleted and is no longer available.',
|
||||
},
|
||||
{
|
||||
'url': 'https://vk.com/clips-74006511?z=clip-74006511_456247211',
|
||||
@@ -235,13 +238,13 @@ class VKIE(VKBaseIE):
|
||||
'timestamp': 1664995597,
|
||||
'title': 'Clip by @madempress',
|
||||
'upload_date': '20221005',
|
||||
'uploader': 'Шальная императрица',
|
||||
'uploader': 'Шальная Императрица',
|
||||
'uploader_id': '-74006511',
|
||||
},
|
||||
},
|
||||
{
|
||||
# video key is extra_data not url\d+
|
||||
'url': 'http://vk.com/video-110305615_171782105',
|
||||
'url': 'https://vk.com/video-110305615_171782105',
|
||||
'md5': 'e13fcda136f99764872e739d13fac1d1',
|
||||
'info_dict': {
|
||||
'id': '-110305615_171782105',
|
||||
@@ -273,6 +276,7 @@ class VKIE(VKBaseIE):
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'No formats found',
|
||||
},
|
||||
{
|
||||
# live stream, hls and rtmp links, most likely already finished live
|
||||
@@ -312,7 +316,16 @@ class VKIE(VKBaseIE):
|
||||
{
|
||||
'url': 'https://vk.com/clip30014565_456240946',
|
||||
'only_matching': True,
|
||||
}]
|
||||
},
|
||||
{
|
||||
'url': 'https://vkvideo.ru/video-127553155_456242961',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'url': 'https://vk.ru/video-220754053_456242564',
|
||||
'only_matching': True,
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = self._match_valid_url(url)
|
||||
@@ -338,7 +351,7 @@ class VKIE(VKBaseIE):
|
||||
video_id = '{}_{}'.format(mobj.group('oid'), mobj.group('id'))
|
||||
|
||||
info_page = self._download_webpage(
|
||||
'http://vk.com/video_ext.php?' + mobj.group('embed_query'), video_id)
|
||||
'https://vk.com/video_ext.php?' + mobj.group('embed_query'), video_id)
|
||||
|
||||
error_message = self._html_search_regex(
|
||||
[r'(?s)<!><div[^>]+class="video_layer_message"[^>]*>(.+?)</div>',
|
||||
@@ -432,7 +445,7 @@ class VKIE(VKBaseIE):
|
||||
if m_opts_url:
|
||||
opts_url = m_opts_url.group(1)
|
||||
if opts_url.startswith('//'):
|
||||
opts_url = 'http:' + opts_url
|
||||
opts_url = 'https:' + opts_url
|
||||
return self.url_result(opts_url)
|
||||
|
||||
data = player['params'][0]
|
||||
@@ -512,8 +525,11 @@ class VKIE(VKBaseIE):
|
||||
class VKUserVideosIE(VKBaseIE):
|
||||
IE_NAME = 'vk:uservideos'
|
||||
IE_DESC = "VK - User's Videos"
|
||||
_VALID_URL = r'https?://(?:(?:m|new)\.)?vk\.com/video/(?:playlist/)?(?P<id>[^?$#/&]+)(?!\?.*\bz=video)(?:[/?#&](?:.*?\bsection=(?P<section>\w+))?|$)'
|
||||
_TEMPLATE_URL = 'https://vk.com/videos'
|
||||
_BASE_URL_RE = r'https?://(?:(?:m|new)\.)?vk(?:video\.ru|\.com/video)'
|
||||
_VALID_URL = [
|
||||
rf'{_BASE_URL_RE}/playlist/(?P<id>-?\d+_\d+)',
|
||||
rf'{_BASE_URL_RE}/(?P<id>@[^/?#]+)(?:/all)?/?(?!\?.*\bz=video)(?:[?#]|$)',
|
||||
]
|
||||
_TESTS = [{
|
||||
'url': 'https://vk.com/video/@mobidevices',
|
||||
'info_dict': {
|
||||
@@ -527,12 +543,20 @@ class VKUserVideosIE(VKBaseIE):
|
||||
},
|
||||
'playlist_mincount': 182,
|
||||
}, {
|
||||
'url': 'https://vk.com/video/playlist/-174476437_2',
|
||||
'url': 'https://vkvideo.ru/playlist/-204353299_426',
|
||||
'info_dict': {
|
||||
'id': '-174476437_playlist_2',
|
||||
'title': 'Анонсы',
|
||||
'id': '-204353299_playlist_426',
|
||||
},
|
||||
'playlist_mincount': 108,
|
||||
'playlist_mincount': 33,
|
||||
}, {
|
||||
'url': 'https://vk.com/video/@gorkyfilmstudio/all',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://vkvideo.ru/@mobidevices',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://vk.com/video/playlist/-174476437_2',
|
||||
'only_matching': True,
|
||||
}]
|
||||
_VIDEO = collections.namedtuple('Video', ['owner_id', 'id'])
|
||||
|
||||
@@ -552,7 +576,7 @@ class VKUserVideosIE(VKBaseIE):
|
||||
v = self._VIDEO._make(video[:2])
|
||||
video_id = '%d_%d' % (v.owner_id, v.id)
|
||||
yield self.url_result(
|
||||
'http://vk.com/video' + video_id, VKIE.ie_key(), video_id)
|
||||
'https://vk.com/video' + video_id, VKIE.ie_key(), video_id)
|
||||
if count >= total:
|
||||
break
|
||||
video_list_json = self._download_payload('al_video', page_id, {
|
||||
@@ -561,23 +585,25 @@ class VKUserVideosIE(VKBaseIE):
|
||||
'oid': page_id,
|
||||
'section': section,
|
||||
})[0][section]
|
||||
count += video_list_json['count']
|
||||
new_count = video_list_json['count']
|
||||
if not new_count:
|
||||
self.to_screen(f'{page_id}: Skipping {total - count} unavailable videos')
|
||||
break
|
||||
count += new_count
|
||||
video_list = video_list_json['list']
|
||||
|
||||
def _real_extract(self, url):
|
||||
u_id, section = self._match_valid_url(url).groups()
|
||||
u_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, u_id)
|
||||
|
||||
if u_id.startswith('@'):
|
||||
page_id = self._search_regex(r'data-owner-id\s?=\s?"([^"]+)"', webpage, 'page_id')
|
||||
elif '_' in u_id:
|
||||
page_id, section = u_id.split('_', 1)
|
||||
section = f'playlist_{section}'
|
||||
page_id = traverse_obj(
|
||||
self._search_json(r'\bvar newCur\s*=', webpage, 'cursor data', u_id),
|
||||
('oid', {int}, {str_or_none}, {require('page id')}))
|
||||
section = traverse_obj(parse_qs(url), ('section', 0)) or 'all'
|
||||
else:
|
||||
raise ExtractorError('Invalid URL', expected=True)
|
||||
|
||||
if not section:
|
||||
section = 'all'
|
||||
page_id, _, section = u_id.partition('_')
|
||||
section = f'playlist_{section}'
|
||||
|
||||
playlist_title = clean_html(get_element_by_class('VideoInfoPanel__title', webpage))
|
||||
return self.playlist_result(self._entries(page_id, section), f'{page_id}_{section}', playlist_title)
|
||||
@@ -717,7 +743,7 @@ class VKWallPostIE(VKBaseIE):
|
||||
|
||||
|
||||
class VKPlayBaseIE(InfoExtractor):
|
||||
_BASE_URL_RE = r'https?://(?:vkplay\.live|live\.vkplay\.ru)/'
|
||||
_BASE_URL_RE = r'https?://(?:vkplay\.live|live\.vk(?:play|video)\.ru)/'
|
||||
_RESOLUTIONS = {
|
||||
'tiny': '256x144',
|
||||
'lowest': '426x240',
|
||||
@@ -797,6 +823,9 @@ class VKPlayIE(VKPlayBaseIE):
|
||||
}, {
|
||||
'url': 'https://live.vkplay.ru/lebwa/record/33a4e4ce-e3ef-49db-bb14-f006cc6fabc9/records',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://live.vkvideo.ru/lebwa/record/33a4e4ce-e3ef-49db-bb14-f006cc6fabc9/records',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@@ -839,6 +868,9 @@ class VKPlayLiveIE(VKPlayBaseIE):
|
||||
}, {
|
||||
'url': 'https://live.vkplay.ru/lebwa',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://live.vkvideo.ru/panterka',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
@@ -22,7 +22,7 @@ import urllib.parse
|
||||
from .common import InfoExtractor, SearchInfoExtractor
|
||||
from .openload import PhantomJSwrapper
|
||||
from ..jsinterp import JSInterpreter
|
||||
from ..networking.exceptions import HTTPError, TransportError, network_exceptions
|
||||
from ..networking.exceptions import HTTPError, network_exceptions
|
||||
from ..utils import (
|
||||
NO_DEFAULT,
|
||||
ExtractorError,
|
||||
@@ -50,12 +50,12 @@ from ..utils import (
|
||||
parse_iso8601,
|
||||
parse_qs,
|
||||
qualities,
|
||||
remove_end,
|
||||
remove_start,
|
||||
smuggle_url,
|
||||
str_or_none,
|
||||
str_to_int,
|
||||
strftime_or_none,
|
||||
time_seconds,
|
||||
traverse_obj,
|
||||
try_call,
|
||||
try_get,
|
||||
@@ -78,60 +78,66 @@ INNERTUBE_CLIENTS = {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20240726.00.00',
|
||||
'clientVersion': '2.20241126.01.00',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 1,
|
||||
'REQUIRE_PO_TOKEN': True,
|
||||
'SUPPORTS_COOKIES': True,
|
||||
},
|
||||
# Safari UA returns pre-merged video+audio 144p/240p/360p/720p/1080p HLS formats
|
||||
'web_safari': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20240726.00.00',
|
||||
'clientVersion': '2.20241126.01.00',
|
||||
'userAgent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.5 Safari/605.1.15,gzip(gfe)',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 1,
|
||||
'REQUIRE_PO_TOKEN': True,
|
||||
'SUPPORTS_COOKIES': True,
|
||||
},
|
||||
'web_embedded': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'WEB_EMBEDDED_PLAYER',
|
||||
'clientVersion': '1.20240723.01.00',
|
||||
'clientVersion': '1.20241201.00.00',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 56,
|
||||
'SUPPORTS_COOKIES': True,
|
||||
},
|
||||
'web_music': {
|
||||
'INNERTUBE_HOST': 'music.youtube.com',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'WEB_REMIX',
|
||||
'clientVersion': '1.20240724.00.00',
|
||||
'clientVersion': '1.20241127.01.00',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 67,
|
||||
'SUPPORTS_COOKIES': True,
|
||||
},
|
||||
# This client now requires sign-in for every video
|
||||
'web_creator': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'WEB_CREATOR',
|
||||
'clientVersion': '1.20240723.03.00',
|
||||
'clientVersion': '1.20241203.01.00',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 62,
|
||||
'REQUIRE_AUTH': True,
|
||||
'SUPPORTS_COOKIES': True,
|
||||
},
|
||||
'android': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'ANDROID',
|
||||
'clientVersion': '19.29.37',
|
||||
'clientVersion': '19.44.38',
|
||||
'androidSdkVersion': 30,
|
||||
'userAgent': 'com.google.android.youtube/19.29.37 (Linux; U; Android 11) gzip',
|
||||
'userAgent': 'com.google.android.youtube/19.44.38 (Linux; U; Android 11) gzip',
|
||||
'osName': 'Android',
|
||||
'osVersion': '11',
|
||||
},
|
||||
@@ -140,13 +146,14 @@ INNERTUBE_CLIENTS = {
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
'REQUIRE_PO_TOKEN': True,
|
||||
},
|
||||
# This client now requires sign-in for every video
|
||||
'android_music': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'ANDROID_MUSIC',
|
||||
'clientVersion': '7.11.50',
|
||||
'clientVersion': '7.27.52',
|
||||
'androidSdkVersion': 30,
|
||||
'userAgent': 'com.google.android.apps.youtube.music/7.11.50 (Linux; U; Android 11) gzip',
|
||||
'userAgent': 'com.google.android.apps.youtube.music/7.27.52 (Linux; U; Android 11) gzip',
|
||||
'osName': 'Android',
|
||||
'osVersion': '11',
|
||||
},
|
||||
@@ -154,15 +161,17 @@ INNERTUBE_CLIENTS = {
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 21,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
'REQUIRE_PO_TOKEN': True,
|
||||
'REQUIRE_AUTH': True,
|
||||
'SUPPORTS_COOKIES': True,
|
||||
},
|
||||
# This client now requires sign-in for every video
|
||||
'android_creator': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'ANDROID_CREATOR',
|
||||
'clientVersion': '24.30.100',
|
||||
'clientVersion': '24.45.100',
|
||||
'androidSdkVersion': 30,
|
||||
'userAgent': 'com.google.android.apps.youtube.creator/24.30.100 (Linux; U; Android 11) gzip',
|
||||
'userAgent': 'com.google.android.apps.youtube.creator/24.45.100 (Linux; U; Android 11) gzip',
|
||||
'osName': 'Android',
|
||||
'osVersion': '11',
|
||||
},
|
||||
@@ -170,38 +179,25 @@ INNERTUBE_CLIENTS = {
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 14,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
'REQUIRE_PO_TOKEN': True,
|
||||
'REQUIRE_AUTH': True,
|
||||
},
|
||||
# YouTube Kids videos aren't returned on this client for some reason
|
||||
'android_vr': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'ANDROID_VR',
|
||||
'clientVersion': '1.57.29',
|
||||
'clientVersion': '1.60.19',
|
||||
'deviceMake': 'Oculus',
|
||||
'deviceModel': 'Quest 3',
|
||||
'androidSdkVersion': 32,
|
||||
'userAgent': 'com.google.android.apps.youtube.vr.oculus/1.57.29 (Linux; U; Android 12L; eureka-user Build/SQ3A.220605.009.A1) gzip',
|
||||
'userAgent': 'com.google.android.apps.youtube.vr.oculus/1.60.19 (Linux; U; Android 12L; eureka-user Build/SQ3A.220605.009.A1) gzip',
|
||||
'osName': 'Android',
|
||||
'osVersion': '12L',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 28,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
'android_testsuite': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'ANDROID_TESTSUITE',
|
||||
'clientVersion': '1.9',
|
||||
'androidSdkVersion': 30,
|
||||
'userAgent': 'com.google.android.youtube/1.9 (Linux; U; Android 11) gzip',
|
||||
'osName': 'Android',
|
||||
'osVersion': '11',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 30,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
'PLAYER_PARAMS': '2AMB',
|
||||
'SUPPORTS_COOKIES': True,
|
||||
},
|
||||
# iOS clients have HLS live streams. Setting device model to get 60fps formats.
|
||||
# See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680#issuecomment-1002724558
|
||||
@@ -209,47 +205,51 @@ INNERTUBE_CLIENTS = {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'IOS',
|
||||
'clientVersion': '19.29.1',
|
||||
'clientVersion': '19.45.4',
|
||||
'deviceMake': 'Apple',
|
||||
'deviceModel': 'iPhone16,2',
|
||||
'userAgent': 'com.google.ios.youtube/19.29.1 (iPhone16,2; U; CPU iOS 17_5_1 like Mac OS X;)',
|
||||
'userAgent': 'com.google.ios.youtube/19.45.4 (iPhone16,2; U; CPU iOS 18_1_0 like Mac OS X;)',
|
||||
'osName': 'iPhone',
|
||||
'osVersion': '17.5.1.21F90',
|
||||
'osVersion': '18.1.0.22B83',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
# This client now requires sign-in for every video
|
||||
'ios_music': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'IOS_MUSIC',
|
||||
'clientVersion': '7.08.2',
|
||||
'clientVersion': '7.27.0',
|
||||
'deviceMake': 'Apple',
|
||||
'deviceModel': 'iPhone16,2',
|
||||
'userAgent': 'com.google.ios.youtubemusic/7.08.2 (iPhone16,2; U; CPU iOS 17_5_1 like Mac OS X;)',
|
||||
'userAgent': 'com.google.ios.youtubemusic/7.27.0 (iPhone16,2; U; CPU iOS 18_1_0 like Mac OS X;)',
|
||||
'osName': 'iPhone',
|
||||
'osVersion': '17.5.1.21F90',
|
||||
'osVersion': '18.1.0.22B83',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 26,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
'REQUIRE_AUTH': True,
|
||||
'SUPPORTS_COOKIES': True,
|
||||
},
|
||||
# This client now requires sign-in for every video
|
||||
'ios_creator': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'IOS_CREATOR',
|
||||
'clientVersion': '24.30.100',
|
||||
'clientVersion': '24.45.100',
|
||||
'deviceMake': 'Apple',
|
||||
'deviceModel': 'iPhone16,2',
|
||||
'userAgent': 'com.google.ios.ytcreator/24.30.100 (iPhone16,2; U; CPU iOS 17_5_1 like Mac OS X;)',
|
||||
'userAgent': 'com.google.ios.ytcreator/24.45.100 (iPhone16,2; U; CPU iOS 18_1_0 like Mac OS X;)',
|
||||
'osName': 'iPhone',
|
||||
'osVersion': '17.5.1.21F90',
|
||||
'osVersion': '18.1.0.22B83',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 15,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
'REQUIRE_AUTH': True,
|
||||
},
|
||||
# mweb has 'ultralow' formats
|
||||
# See: https://github.com/yt-dlp/yt-dlp/pull/557
|
||||
@@ -257,19 +257,22 @@ INNERTUBE_CLIENTS = {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'MWEB',
|
||||
'clientVersion': '2.20240726.01.00',
|
||||
'clientVersion': '2.20241202.07.00',
|
||||
'userAgent': 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 2,
|
||||
'SUPPORTS_COOKIES': True,
|
||||
},
|
||||
'tv': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'TVHTML5',
|
||||
'clientVersion': '7.20240724.13.00',
|
||||
'clientVersion': '7.20241201.18.00',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 7,
|
||||
'SUPPORTS_COOKIES': True,
|
||||
},
|
||||
# This client now requires sign-in for every video
|
||||
# It was previously an age-gate workaround for videos that were `playable_in_embed`
|
||||
@@ -282,17 +285,8 @@ INNERTUBE_CLIENTS = {
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 85,
|
||||
},
|
||||
# This client has pre-merged video+audio 720p/1080p streams
|
||||
'mediaconnect': {
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'MEDIA_CONNECT_FRONTEND',
|
||||
'clientVersion': '0.1',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 95,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
'REQUIRE_AUTH': True,
|
||||
'SUPPORTS_COOKIES': True,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -321,6 +315,8 @@ def build_innertube_clients():
|
||||
ytcfg.setdefault('INNERTUBE_HOST', 'www.youtube.com')
|
||||
ytcfg.setdefault('REQUIRE_JS_PLAYER', True)
|
||||
ytcfg.setdefault('REQUIRE_PO_TOKEN', False)
|
||||
ytcfg.setdefault('REQUIRE_AUTH', False)
|
||||
ytcfg.setdefault('SUPPORTS_COOKIES', False)
|
||||
ytcfg.setdefault('PLAYER_PARAMS', None)
|
||||
ytcfg['INNERTUBE_CONTEXT']['client'].setdefault('hl', 'en')
|
||||
|
||||
@@ -577,208 +573,18 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
self._check_login_required()
|
||||
|
||||
def _perform_login(self, username, password):
|
||||
auth_type, _, user = (username or '').partition('+')
|
||||
|
||||
if auth_type != 'oauth':
|
||||
raise ExtractorError(self._youtube_login_hint, expected=True)
|
||||
|
||||
self._initialize_oauth(user, password)
|
||||
|
||||
'''
|
||||
OAuth 2.0 Device Authorization Grant flow, used by the YouTube TV client (youtube.com/tv).
|
||||
|
||||
For more information regarding OAuth 2.0 and the Device Authorization Grant flow in general, see:
|
||||
- https://developers.google.com/identity/protocols/oauth2/limited-input-device
|
||||
- https://accounts.google.com/.well-known/openid-configuration
|
||||
- https://www.rfc-editor.org/rfc/rfc8628
|
||||
- https://www.rfc-editor.org/rfc/rfc6749
|
||||
|
||||
Note: The official client appears to use a proxied version of the oauth2 endpoints on youtube.com/o/oauth2,
|
||||
which applies some modifications to the response (such as returning errors as 200 OK).
|
||||
Since the client works with the standard API, we will use that as it is well-documented.
|
||||
'''
|
||||
|
||||
_OAUTH_PROFILE = None
|
||||
_OAUTH_ACCESS_TOKEN_CACHE = {}
|
||||
_OAUTH_DISPLAY_ID = 'oauth'
|
||||
|
||||
# YouTube TV (TVHTML5) client. You can find these at youtube.com/tv
|
||||
_OAUTH_CLIENT_ID = '861556708454-d6dlm3lh05idd8npek18k6be8ba3oc68.apps.googleusercontent.com'
|
||||
_OAUTH_CLIENT_SECRET = 'SboVhoG9s0rNafixCSGGKXAT'
|
||||
_OAUTH_SCOPE = 'http://gdata.youtube.com https://www.googleapis.com/auth/youtube-paid-content'
|
||||
|
||||
# From https://accounts.google.com/.well-known/openid-configuration
|
||||
# Technically, these should be fetched dynamically and not hard-coded.
|
||||
# However, as these endpoints rarely change, we can risk saving an extra request for every invocation.
|
||||
_OAUTH_DEVICE_AUTHORIZATION_ENDPOINT = 'https://oauth2.googleapis.com/device/code'
|
||||
_OAUTH_TOKEN_ENDPOINT = 'https://oauth2.googleapis.com/token'
|
||||
|
||||
@property
|
||||
def _oauth_cache_key(self):
|
||||
return f'oauth_refresh_token_{self._OAUTH_PROFILE}'
|
||||
|
||||
def _read_oauth_error_response(self, response):
|
||||
return traverse_obj(
|
||||
self._webpage_read_content(response, self._OAUTH_TOKEN_ENDPOINT, self._OAUTH_DISPLAY_ID, fatal=False),
|
||||
({json.loads}, 'error', {str}))
|
||||
|
||||
def _set_oauth_info(self, token_response):
|
||||
YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE.setdefault(self._OAUTH_PROFILE, {}).update({
|
||||
'access_token': token_response['access_token'],
|
||||
'token_type': token_response['token_type'],
|
||||
'expiry': time_seconds(
|
||||
seconds=traverse_obj(token_response, ('expires_in', {float_or_none}), default=300) - 10),
|
||||
})
|
||||
refresh_token = traverse_obj(token_response, ('refresh_token', {str}))
|
||||
if refresh_token:
|
||||
self.cache.store(self._NETRC_MACHINE, self._oauth_cache_key, refresh_token)
|
||||
YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE[self._OAUTH_PROFILE]['refresh_token'] = refresh_token
|
||||
|
||||
def _initialize_oauth(self, user, refresh_token):
|
||||
self._OAUTH_PROFILE = user or 'default'
|
||||
|
||||
if self._OAUTH_PROFILE in YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE:
|
||||
self.write_debug(f'{self._OAUTH_DISPLAY_ID}: Using cached access token for profile "{self._OAUTH_PROFILE}"')
|
||||
return
|
||||
|
||||
YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE[self._OAUTH_PROFILE] = {}
|
||||
|
||||
if refresh_token:
|
||||
msg = f'{self._OAUTH_DISPLAY_ID}: Using password input as refresh token'
|
||||
if self.get_param('cachedir') is not False:
|
||||
msg += ' and caching token to disk; you should supply an empty password next time'
|
||||
self.to_screen(msg)
|
||||
self.cache.store(self._NETRC_MACHINE, self._oauth_cache_key, refresh_token)
|
||||
else:
|
||||
refresh_token = self.cache.load(self._NETRC_MACHINE, self._oauth_cache_key)
|
||||
|
||||
if refresh_token:
|
||||
YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE[self._OAUTH_PROFILE]['refresh_token'] = refresh_token
|
||||
try:
|
||||
token_response = self._refresh_token(refresh_token)
|
||||
except ExtractorError as e:
|
||||
error_msg = str(e.orig_msg).replace('Failed to refresh access token: ', '')
|
||||
self.report_warning(f'{self._OAUTH_DISPLAY_ID}: Failed to refresh access token: {error_msg}')
|
||||
token_response = self._oauth_authorize
|
||||
else:
|
||||
token_response = self._oauth_authorize
|
||||
|
||||
self._set_oauth_info(token_response)
|
||||
self.write_debug(f'{self._OAUTH_DISPLAY_ID}: Logged in using profile "{self._OAUTH_PROFILE}"')
|
||||
|
||||
def _refresh_token(self, refresh_token):
|
||||
try:
|
||||
token_response = self._download_json(
|
||||
self._OAUTH_TOKEN_ENDPOINT,
|
||||
video_id=self._OAUTH_DISPLAY_ID,
|
||||
note='Refreshing access token',
|
||||
data=json.dumps({
|
||||
'client_id': self._OAUTH_CLIENT_ID,
|
||||
'client_secret': self._OAUTH_CLIENT_SECRET,
|
||||
'refresh_token': refresh_token,
|
||||
'grant_type': 'refresh_token',
|
||||
}).encode(),
|
||||
headers={'Content-Type': 'application/json'})
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, HTTPError):
|
||||
error = self._read_oauth_error_response(e.cause.response)
|
||||
if error == 'invalid_grant':
|
||||
# RFC6749 § 5.2
|
||||
raise ExtractorError(
|
||||
'Failed to refresh access token: Refresh token is invalid, revoked, or expired (invalid_grant)',
|
||||
expected=True, video_id=self._OAUTH_DISPLAY_ID)
|
||||
raise ExtractorError(
|
||||
f'Failed to refresh access token: Authorization server returned error {error}',
|
||||
video_id=self._OAUTH_DISPLAY_ID)
|
||||
raise
|
||||
return token_response
|
||||
|
||||
@property
|
||||
def _oauth_authorize(self):
|
||||
code_response = self._download_json(
|
||||
self._OAUTH_DEVICE_AUTHORIZATION_ENDPOINT,
|
||||
video_id=self._OAUTH_DISPLAY_ID,
|
||||
note='Initializing authorization flow',
|
||||
data=json.dumps({
|
||||
'client_id': self._OAUTH_CLIENT_ID,
|
||||
'scope': self._OAUTH_SCOPE,
|
||||
}).encode(),
|
||||
headers={'Content-Type': 'application/json'})
|
||||
|
||||
verification_url = traverse_obj(code_response, ('verification_url', {str}))
|
||||
user_code = traverse_obj(code_response, ('user_code', {str}))
|
||||
if not verification_url or not user_code:
|
||||
if username.startswith('oauth'):
|
||||
raise ExtractorError(
|
||||
'Authorization server did not provide verification_url or user_code', video_id=self._OAUTH_DISPLAY_ID)
|
||||
f'Login with OAuth is no longer supported. {self._youtube_login_hint}', expected=True)
|
||||
|
||||
# note: The whitespace is intentional
|
||||
self.to_screen(
|
||||
f'{self._OAUTH_DISPLAY_ID}: To give yt-dlp access to your account, '
|
||||
f'go to {verification_url} and enter code {user_code}')
|
||||
|
||||
# RFC8628 § 3.5: default poll interval is 5 seconds if not provided
|
||||
poll_interval = traverse_obj(code_response, ('interval', {int}), default=5)
|
||||
|
||||
for retry in self.RetryManager():
|
||||
while True:
|
||||
try:
|
||||
token_response = self._download_json(
|
||||
self._OAUTH_TOKEN_ENDPOINT,
|
||||
video_id=self._OAUTH_DISPLAY_ID,
|
||||
note=False,
|
||||
errnote='Failed to request access token',
|
||||
data=json.dumps({
|
||||
'client_id': self._OAUTH_CLIENT_ID,
|
||||
'client_secret': self._OAUTH_CLIENT_SECRET,
|
||||
'device_code': code_response['device_code'],
|
||||
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code',
|
||||
}).encode(),
|
||||
headers={'Content-Type': 'application/json'})
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, TransportError):
|
||||
retry.error = e
|
||||
break
|
||||
elif isinstance(e.cause, HTTPError):
|
||||
error = self._read_oauth_error_response(e.cause.response)
|
||||
if not error:
|
||||
retry.error = e
|
||||
break
|
||||
|
||||
if error == 'authorization_pending':
|
||||
time.sleep(poll_interval)
|
||||
continue
|
||||
elif error == 'expired_token':
|
||||
raise ExtractorError(
|
||||
'Authorization timed out', expected=True, video_id=self._OAUTH_DISPLAY_ID)
|
||||
elif error == 'access_denied':
|
||||
raise ExtractorError(
|
||||
'You denied access to an account', expected=True, video_id=self._OAUTH_DISPLAY_ID)
|
||||
elif error == 'slow_down':
|
||||
# RFC8628 § 3.5: add 5 seconds to the poll interval
|
||||
poll_interval += 5
|
||||
time.sleep(poll_interval)
|
||||
continue
|
||||
else:
|
||||
raise ExtractorError(
|
||||
f'Authorization server returned an error when fetching access token: {error}',
|
||||
video_id=self._OAUTH_DISPLAY_ID)
|
||||
raise
|
||||
|
||||
return token_response
|
||||
|
||||
def _update_oauth(self):
|
||||
token = YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE.get(self._OAUTH_PROFILE)
|
||||
if token is None or token['expiry'] > time.time():
|
||||
return
|
||||
|
||||
self._set_oauth_info(self._refresh_token(token['refresh_token']))
|
||||
self.report_warning(
|
||||
f'Login with password is not supported for YouTube. {self._youtube_login_hint}')
|
||||
|
||||
@property
|
||||
def _youtube_login_hint(self):
|
||||
return ('Use --username=oauth[+PROFILE] --password="" to log in using oauth, '
|
||||
f'or else u{self._login_hint(method="cookies")[1:]}. '
|
||||
'See https://github.com/yt-dlp/yt-dlp/wiki/Extractors#logging-in-with-oauth for more on how to use oauth. '
|
||||
'See https://github.com/yt-dlp/yt-dlp/wiki/Extractors#exporting-youtube-cookies for help with cookies')
|
||||
return (f'{self._login_hint(method="cookies")}. Also see '
|
||||
'https://github.com/yt-dlp/yt-dlp/wiki/Extractors#exporting-youtube-cookies '
|
||||
'for tips on effectively exporting YouTube cookies')
|
||||
|
||||
def _check_login_required(self):
|
||||
if self._LOGIN_REQUIRED and not self.is_authenticated:
|
||||
@@ -928,7 +734,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
|
||||
@functools.cached_property
|
||||
def is_authenticated(self):
|
||||
return self._OAUTH_PROFILE or bool(self._generate_sapisidhash_header())
|
||||
return bool(self._generate_sapisidhash_header())
|
||||
|
||||
def extract_ytcfg(self, video_id, webpage):
|
||||
if not webpage:
|
||||
@@ -938,16 +744,6 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
|
||||
default='{}'), video_id, fatal=False) or {}
|
||||
|
||||
def _generate_oauth_headers(self):
|
||||
self._update_oauth()
|
||||
oauth_token = YoutubeBaseInfoExtractor._OAUTH_ACCESS_TOKEN_CACHE.get(self._OAUTH_PROFILE)
|
||||
if not oauth_token:
|
||||
return {}
|
||||
|
||||
return {
|
||||
'Authorization': f'{oauth_token["token_type"]} {oauth_token["access_token"]}',
|
||||
}
|
||||
|
||||
def _generate_cookie_auth_headers(self, *, ytcfg=None, account_syncid=None, session_index=None, origin=None, **kwargs):
|
||||
headers = {}
|
||||
account_syncid = account_syncid or self._extract_account_syncid(ytcfg)
|
||||
@@ -977,14 +773,10 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
'Origin': origin,
|
||||
'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg),
|
||||
'User-Agent': self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT']['client']['userAgent'], default_client=default_client),
|
||||
**self._generate_oauth_headers(),
|
||||
**self._generate_cookie_auth_headers(ytcfg=ytcfg, account_syncid=account_syncid, session_index=session_index, origin=origin),
|
||||
}
|
||||
return filter_dict(headers)
|
||||
|
||||
def _generate_webpage_headers(self):
|
||||
return self._generate_oauth_headers()
|
||||
|
||||
def _download_ytcfg(self, client, video_id):
|
||||
url = {
|
||||
'web': 'https://www.youtube.com',
|
||||
@@ -994,8 +786,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
if not url:
|
||||
return {}
|
||||
webpage = self._download_webpage(
|
||||
url, video_id, fatal=False, note=f'Downloading {client.replace("_", " ").strip()} client config',
|
||||
headers=self._generate_webpage_headers())
|
||||
url, video_id, fatal=False, note=f'Downloading {client.replace("_", " ").strip()} client config')
|
||||
return self.extract_ytcfg(video_id, webpage) or {}
|
||||
|
||||
@staticmethod
|
||||
@@ -1566,6 +1357,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
}
|
||||
_SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
|
||||
_DEFAULT_CLIENTS = ('ios', 'mweb')
|
||||
_DEFAULT_AUTHED_CLIENTS = ('web_creator', 'mweb')
|
||||
|
||||
_GEO_BYPASS = False
|
||||
|
||||
@@ -3139,7 +2931,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
# Obtain from MPD's maximum seq value
|
||||
old_mpd_url = mpd_url
|
||||
last_error = ctx.pop('last_error', None)
|
||||
expire_fast = immediate or last_error and isinstance(last_error, HTTPError) and last_error.status == 403
|
||||
expire_fast = immediate or (last_error and isinstance(last_error, HTTPError) and last_error.status == 403)
|
||||
mpd_url, stream_number, is_live = (mpd_feed(format_id, 5 if expire_fast else 18000)
|
||||
or (mpd_url, stream_number, False))
|
||||
if not refresh_sequence:
|
||||
@@ -3289,8 +3081,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
code = self._download_webpage(
|
||||
player_url, video_id, fatal=fatal,
|
||||
note='Downloading player ' + player_id,
|
||||
errnote=f'Download of {player_url} failed',
|
||||
headers=self._generate_webpage_headers())
|
||||
errnote=f'Download of {player_url} failed')
|
||||
if code:
|
||||
self._code_cache[player_id] = code
|
||||
return self._code_cache.get(player_id)
|
||||
@@ -3357,19 +3148,26 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
self.to_screen('Extracted signature function:\n' + code)
|
||||
|
||||
def _parse_sig_js(self, jscode):
|
||||
# Examples where `sig` is funcname:
|
||||
# sig=function(a){a=a.split(""); ... ;return a.join("")};
|
||||
# ;c&&(c=sig(decodeURIComponent(c)),a.set(b,encodeURIComponent(c)));return a};
|
||||
# {var l=f,m=h.sp,n=sig(decodeURIComponent(h.s));l.set(m,encodeURIComponent(n))}
|
||||
# sig=function(J){J=J.split(""); ... ;return J.join("")};
|
||||
# ;N&&(N=sig(decodeURIComponent(N)),J.set(R,encodeURIComponent(N)));return J};
|
||||
# {var H=u,k=f.sp,v=sig(decodeURIComponent(f.s));H.set(k,encodeURIComponent(v))}
|
||||
funcname = self._search_regex(
|
||||
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
(r'\b(?P<var>[a-zA-Z0-9$]+)&&\((?P=var)=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\((?P=var)\)\)',
|
||||
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*(?P<arg>[a-zA-Z0-9$]+)\s*\)\s*{\s*(?P=arg)\s*=\s*(?P=arg)\.split\(\s*""\s*\)\s*;\s*[^}]+;\s*return\s+(?P=arg)\.join\(\s*""\s*\)',
|
||||
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\))?',
|
||||
# Old patterns
|
||||
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
|
||||
r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
|
||||
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\))?',
|
||||
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
|
||||
# Obsolete patterns
|
||||
r'("|\')signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
|
||||
jscode, 'Initial JS player signature function name', group='sig')
|
||||
|
||||
@@ -3443,6 +3241,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
# * a.D&&(b="nn"[+a.D],c=a.get(b))&&(c=narray[idx](c),a.set(b,c),narray.length||nfunc("")
|
||||
# * a.D&&(PL(a),b=a.j.n||null)&&(b=narray[0](b),a.set("n",b),narray.length||nfunc("")
|
||||
# * a.D&&(b="nn"[+a.D],vL(a),c=a.j[b]||null)&&(c=narray[idx](c),a.set(b,c),narray.length||nfunc("")
|
||||
# * J.J="";J.url="";J.Z&&(R="nn"[+J.Z],mW(J),N=J.K[R]||null)&&(N=narray[idx](N),J.set(R,N))}};
|
||||
funcname, idx = self._search_regex(
|
||||
r'''(?x)
|
||||
(?:
|
||||
@@ -3459,7 +3258,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
)\)&&\(c=|
|
||||
\b(?P<var>[a-zA-Z0-9_$]+)=
|
||||
)(?P<nfunc>[a-zA-Z0-9_$]+)(?:\[(?P<idx>\d+)\])?\([a-zA-Z]\)
|
||||
(?(var),[a-zA-Z0-9_$]+\.set\("n"\,(?P=var)\),(?P=nfunc)\.length)''',
|
||||
(?(var),[a-zA-Z0-9_$]+\.set\((?:"n+"|[a-zA-Z0-9_$]+)\,(?P=var)\))''',
|
||||
jscode, 'n function name', group=('nfunc', 'idx'), default=(None, None))
|
||||
if not funcname:
|
||||
self.report_warning(join_nonempty(
|
||||
@@ -3468,7 +3267,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
return self._search_regex(
|
||||
r'''(?xs)
|
||||
;\s*(?P<name>[a-zA-Z0-9_$]+)\s*=\s*function\([a-zA-Z0-9_$]+\)
|
||||
\s*\{(?:(?!};).)+?["']enhanced_except_''',
|
||||
\s*\{(?:(?!};).)+?return\s*(?P<q>["'])[\w-]+_w8_(?P=q)\s*\+\s*[a-zA-Z0-9_$]+''',
|
||||
jscode, 'Initial JS player n function name', group='name')
|
||||
elif not idx:
|
||||
return funcname
|
||||
@@ -3477,6 +3276,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
rf'var {re.escape(funcname)}\s*=\s*(\[.+?\])\s*[,;]', jscode,
|
||||
f'Initial JS player n function list ({funcname}.{idx})')))[int(idx)]
|
||||
|
||||
def _fixup_n_function_code(self, argnames, code):
|
||||
return argnames, re.sub(
|
||||
rf';\s*if\s*\(\s*typeof\s+[a-zA-Z0-9_$]+\s*===?\s*(["\'])undefined\1\s*\)\s*return\s+{argnames[0]};',
|
||||
';', code)
|
||||
|
||||
def _extract_n_function_code(self, video_id, player_url):
|
||||
player_id = self._extract_player_info(player_url)
|
||||
func_code = self.cache.load('youtube-nsig', player_id, min_ver='2024.07.09')
|
||||
@@ -3488,7 +3292,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
|
||||
func_name = self._extract_n_function_name(jscode, player_url=player_url)
|
||||
|
||||
func_code = jsi.extract_function_code(func_name)
|
||||
# XXX: Workaround for the `typeof` gotcha
|
||||
func_code = self._fixup_n_function_code(*jsi.extract_function_code(func_name))
|
||||
|
||||
self.cache.store('youtube-nsig', player_id, func_code)
|
||||
return jsi, player_id, func_code
|
||||
@@ -3504,7 +3309,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
except Exception as e:
|
||||
raise JSInterpreter.Exception(traceback.format_exc(), cause=e)
|
||||
|
||||
if ret.startswith('enhanced_except_'):
|
||||
if ret.startswith('enhanced_except_') or ret.endswith(s):
|
||||
raise JSInterpreter.Exception('Signature function returned an exception')
|
||||
return ret
|
||||
|
||||
@@ -3573,8 +3378,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
|
||||
self._download_webpage(
|
||||
url, video_id, f'Marking {label}watched',
|
||||
'Unable to mark watched', fatal=False,
|
||||
headers=self._generate_webpage_headers())
|
||||
'Unable to mark watched', fatal=False)
|
||||
|
||||
@classmethod
|
||||
def _extract_from_webpage(cls, url, webpage):
|
||||
@@ -4063,12 +3867,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
def _get_requested_clients(self, url, smuggled_data):
|
||||
requested_clients = []
|
||||
excluded_clients = []
|
||||
default_clients = self._DEFAULT_AUTHED_CLIENTS if self.is_authenticated else self._DEFAULT_CLIENTS
|
||||
allowed_clients = sorted(
|
||||
(client for client in INNERTUBE_CLIENTS if client[:1] != '_'),
|
||||
key=lambda client: INNERTUBE_CLIENTS[client]['priority'], reverse=True)
|
||||
for client in self._configuration_arg('player_client'):
|
||||
if client == 'default':
|
||||
requested_clients.extend(self._DEFAULT_CLIENTS)
|
||||
requested_clients.extend(default_clients)
|
||||
elif client == 'all':
|
||||
requested_clients.extend(allowed_clients)
|
||||
elif client.startswith('-'):
|
||||
@@ -4078,7 +3883,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
else:
|
||||
requested_clients.append(client)
|
||||
if not requested_clients:
|
||||
requested_clients.extend(self._DEFAULT_CLIENTS)
|
||||
requested_clients.extend(default_clients)
|
||||
for excluded_client in excluded_clients:
|
||||
if excluded_client in requested_clients:
|
||||
requested_clients.remove(excluded_client)
|
||||
@@ -4088,9 +3893,19 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
if smuggled_data.get('is_music_url') or self.is_music_url(url):
|
||||
for requested_client in requested_clients:
|
||||
_, base_client, variant = _split_innertube_client(requested_client)
|
||||
music_client = f'{base_client}_music'
|
||||
music_client = f'{base_client}_music' if base_client != 'mweb' else 'web_music'
|
||||
if variant != 'music' and music_client in INNERTUBE_CLIENTS:
|
||||
requested_clients.append(music_client)
|
||||
client_info = INNERTUBE_CLIENTS[music_client]
|
||||
if not client_info['REQUIRE_AUTH'] or (self.is_authenticated and client_info['SUPPORTS_COOKIES']):
|
||||
requested_clients.append(music_client)
|
||||
|
||||
if self.is_authenticated:
|
||||
unsupported_clients = [
|
||||
client for client in requested_clients if not INNERTUBE_CLIENTS[client]['SUPPORTS_COOKIES']
|
||||
]
|
||||
for client in unsupported_clients:
|
||||
self.report_warning(f'Skipping client "{client}" since it does not support cookies', only_once=True)
|
||||
requested_clients.remove(client)
|
||||
|
||||
return orderedSet(requested_clients)
|
||||
|
||||
@@ -4197,16 +4012,18 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
else:
|
||||
prs.append(pr)
|
||||
|
||||
''' This code is pointless while web_creator is in _DEFAULT_AUTHED_CLIENTS
|
||||
# EU countries require age-verification for accounts to access age-restricted videos
|
||||
# If account is not age-verified, _is_agegated() will be truthy for non-embedded clients
|
||||
if self.is_authenticated and self._is_agegated(pr):
|
||||
self.to_screen(
|
||||
f'{video_id}: This video is age-restricted and YouTube is requiring '
|
||||
'account age-verification; some formats may be missing', only_once=True)
|
||||
# web_creator and mediaconnect can work around the age-verification requirement
|
||||
# _testsuite & _vr variants can also work around age-verification
|
||||
# web_creator can work around the age-verification requirement
|
||||
# android_vr may also be able to work around age-verification
|
||||
# tv_embedded may(?) still work around age-verification if the video is embeddable
|
||||
append_client('web_creator', 'mediaconnect')
|
||||
append_client('web_creator')
|
||||
'''
|
||||
|
||||
prs.extend(deprioritized_prs)
|
||||
|
||||
@@ -4222,8 +4039,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
return prs, player_url
|
||||
|
||||
def _needs_live_processing(self, live_status, duration):
|
||||
if (live_status == 'is_live' and self.get_param('live_from_start')
|
||||
or live_status == 'post_live' and (duration or 0) > 2 * 3600):
|
||||
if ((live_status == 'is_live' and self.get_param('live_from_start'))
|
||||
or (live_status == 'post_live' and (duration or 0) > 2 * 3600)):
|
||||
return live_status
|
||||
|
||||
def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, live_status, duration):
|
||||
@@ -4422,7 +4239,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
skip_manifests = set(self._configuration_arg('skip'))
|
||||
if (not self.get_param('youtube_include_hls_manifest', True)
|
||||
or needs_live_processing == 'is_live' # These will be filtered out by YoutubeDL anyway
|
||||
or needs_live_processing and skip_bad_formats):
|
||||
or (needs_live_processing and skip_bad_formats)):
|
||||
skip_manifests.add('hls')
|
||||
|
||||
if not self.get_param('youtube_include_dash_manifest', True):
|
||||
@@ -4558,7 +4375,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
if pp:
|
||||
query['pp'] = pp
|
||||
webpage = self._download_webpage(
|
||||
webpage_url, video_id, fatal=False, query=query, headers=self._generate_webpage_headers())
|
||||
webpage_url, video_id, fatal=False, query=query)
|
||||
|
||||
master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg()
|
||||
|
||||
@@ -4620,14 +4437,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
expected_type=dict)
|
||||
|
||||
translated_title = self._get_text(microformats, (..., 'title'))
|
||||
video_title = (self._preferred_lang and translated_title
|
||||
video_title = ((self._preferred_lang and translated_title)
|
||||
or get_first(video_details, 'title') # primary
|
||||
or translated_title
|
||||
or search_meta(['og:title', 'twitter:title', 'title']))
|
||||
translated_description = self._get_text(microformats, (..., 'description'))
|
||||
original_description = get_first(video_details, 'shortDescription')
|
||||
video_description = (
|
||||
self._preferred_lang and translated_description
|
||||
(self._preferred_lang and translated_description)
|
||||
# If original description is blank, it will be an empty string.
|
||||
# Do not prefer translated description in this case.
|
||||
or original_description if original_description is not None else translated_description)
|
||||
@@ -4701,6 +4518,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
self.raise_geo_restricted(subreason, countries, metadata_available=True)
|
||||
reason += f'. {subreason}'
|
||||
if reason:
|
||||
if 'sign in' in reason.lower():
|
||||
reason = remove_end(reason, 'This helps protect our community. Learn more')
|
||||
reason = f'{remove_end(reason.strip(), ".")}. {self._youtube_login_hint}'
|
||||
self.raise_no_formats(reason, expected=True)
|
||||
|
||||
keywords = get_first(video_details, 'keywords', expected_type=list) or []
|
||||
@@ -5225,6 +5045,10 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
|
||||
for item in grid_renderer['items']:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
if lockup_view_model := traverse_obj(item, ('lockupViewModel', {dict})):
|
||||
if entry := self._extract_lockup_view_model(lockup_view_model):
|
||||
yield entry
|
||||
continue
|
||||
renderer = self._extract_basic_item_renderer(item)
|
||||
if not isinstance(renderer, dict):
|
||||
continue
|
||||
@@ -5323,7 +5147,27 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
|
||||
continue
|
||||
yield self._extract_video(renderer)
|
||||
|
||||
def _extract_lockup_view_model(self, view_model):
|
||||
content_id = view_model.get('contentId')
|
||||
if not content_id:
|
||||
return
|
||||
content_type = view_model.get('contentType')
|
||||
if content_type not in ('LOCKUP_CONTENT_TYPE_PLAYLIST', 'LOCKUP_CONTENT_TYPE_PODCAST'):
|
||||
self.report_warning(
|
||||
f'Unsupported lockup view model content type "{content_type}"{bug_reports_message()}', only_once=True)
|
||||
return
|
||||
return self.url_result(
|
||||
f'https://www.youtube.com/playlist?list={content_id}', ie=YoutubeTabIE, video_id=content_id,
|
||||
title=traverse_obj(view_model, (
|
||||
'metadata', 'lockupMetadataViewModel', 'title', 'content', {str})),
|
||||
thumbnails=self._extract_thumbnails(view_model, (
|
||||
'contentImage', 'collectionThumbnailViewModel', 'primaryThumbnail', 'thumbnailViewModel', 'image'), final_key='sources'))
|
||||
|
||||
def _rich_entries(self, rich_grid_renderer):
|
||||
if lockup_view_model := traverse_obj(rich_grid_renderer, ('content', 'lockupViewModel', {dict})):
|
||||
if entry := self._extract_lockup_view_model(lockup_view_model):
|
||||
yield entry
|
||||
return
|
||||
renderer = traverse_obj(
|
||||
rich_grid_renderer,
|
||||
('content', ('videoRenderer', 'reelItemRenderer', 'playlistRenderer', 'shortsLockupViewModel'), any)) or {}
|
||||
@@ -5846,7 +5690,7 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
|
||||
webpage, data = None, None
|
||||
for retry in self.RetryManager(fatal=fatal):
|
||||
try:
|
||||
webpage = self._download_webpage(url, item_id, note='Downloading webpage', headers=self._generate_webpage_headers())
|
||||
webpage = self._download_webpage(url, item_id, note='Downloading webpage')
|
||||
data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {}
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, network_exceptions):
|
||||
@@ -6021,7 +5865,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'UCYO_jab_esuFRV4b17AJtAw',
|
||||
'title': '3Blue1Brown - Playlists',
|
||||
'description': 'md5:4d1da95432004b7ba840ebc895b6b4c9',
|
||||
'description': 'md5:602e3789e6a0cb7d9d352186b720e395',
|
||||
'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
|
||||
'channel': '3Blue1Brown',
|
||||
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
|
||||
@@ -6945,22 +6789,22 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
|
||||
},
|
||||
'playlist_count': 0,
|
||||
}, {
|
||||
# Podcasts tab, with rich entry playlistRenderers
|
||||
# Podcasts tab, with rich entry lockupViewModel
|
||||
'url': 'https://www.youtube.com/@99percentinvisiblepodcast/podcasts',
|
||||
'info_dict': {
|
||||
'id': 'UCVMF2HD4ZgC0QHpU9Yq5Xrw',
|
||||
'channel_id': 'UCVMF2HD4ZgC0QHpU9Yq5Xrw',
|
||||
'uploader_url': 'https://www.youtube.com/@99percentinvisiblepodcast',
|
||||
'description': 'md5:3a0ed38f1ad42a68ef0428c04a15695c',
|
||||
'title': '99 Percent Invisible - Podcasts',
|
||||
'uploader': '99 Percent Invisible',
|
||||
'title': '99% Invisible - Podcasts',
|
||||
'uploader': '99% Invisible',
|
||||
'channel_follower_count': int,
|
||||
'channel_url': 'https://www.youtube.com/channel/UCVMF2HD4ZgC0QHpU9Yq5Xrw',
|
||||
'tags': [],
|
||||
'channel': '99 Percent Invisible',
|
||||
'channel': '99% Invisible',
|
||||
'uploader_id': '@99percentinvisiblepodcast',
|
||||
},
|
||||
'playlist_count': 0,
|
||||
'playlist_count': 5,
|
||||
}, {
|
||||
# Releases tab, with rich entry playlistRenderers (same as Podcasts tab)
|
||||
'url': 'https://www.youtube.com/@AHimitsu/releases',
|
||||
@@ -7040,7 +6884,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
|
||||
tab_url = urljoin(base_url, traverse_obj(
|
||||
tab, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url')))
|
||||
|
||||
tab_id = (tab_url and self._get_url_mobj(tab_url)['tab'][1:]
|
||||
tab_id = ((tab_url and self._get_url_mobj(tab_url)['tab'][1:])
|
||||
or traverse_obj(tab, 'tabIdentifier', expected_type=str))
|
||||
if tab_id:
|
||||
return {
|
||||
|
||||
Reference in New Issue
Block a user