1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2025-12-09 09:45:19 +00:00

[cleanup] Misc (#5044)

Authored by: gamer191, pukkandan
This commit is contained in:
gamer191
2022-10-04 15:23:11 +11:00
committed by GitHub
parent 878eac3e2e
commit 304ad45a9b
20 changed files with 50 additions and 53 deletions

View File

@@ -84,7 +84,7 @@ class AcFunVideoIE(AcFunVideoBaseIE):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
json_all = self._search_json(r'window.videoInfo\s*=\s*', webpage, 'videoInfo', video_id)
json_all = self._search_json(r'window.videoInfo\s*=', webpage, 'videoInfo', video_id)
title = json_all.get('title')
video_list = json_all.get('videoList') or []
@@ -164,7 +164,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
video_id = f'{video_id}{format_field(ac_idx, template="__%s")}'
webpage = self._download_webpage(url, video_id)
json_bangumi_data = self._search_json(r'window.bangumiData\s*=\s*', webpage, 'bangumiData', video_id)
json_bangumi_data = self._search_json(r'window.bangumiData\s*=', webpage, 'bangumiData', video_id)
if ac_idx:
video_info = json_bangumi_data['hlVideoInfo']
@@ -181,7 +181,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
if v.get('id') == season_id), 1)
json_bangumi_list = self._search_json(
r'window\.bangumiList\s*=\s*', webpage, 'bangumiList', video_id, fatal=False)
r'window\.bangumiList\s*=', webpage, 'bangumiList', video_id, fatal=False)
video_internal_id = int_or_none(traverse_obj(json_bangumi_data, ('currentVideoInfo', 'id')))
episode_number = video_internal_id and next((
idx for idx, v in enumerate(json_bangumi_list.get('items') or [], 1)

View File

@@ -10,11 +10,11 @@ from ..aes import aes_encrypt
from ..utils import (
bytes_to_intlist,
determine_ext,
intlist_to_bytes,
int_or_none,
intlist_to_bytes,
join_nonempty,
strip_jsonp,
smuggle_url,
strip_jsonp,
traverse_obj,
unescapeHTML,
unsmuggle_url,

View File

@@ -1,10 +1,5 @@
from .common import InfoExtractor
from ..utils import (
clean_html,
float_or_none,
unescapeHTML,
traverse_obj,
)
from ..utils import clean_html, float_or_none, traverse_obj, unescapeHTML
class AudioBoomIE(InfoExtractor):

View File

@@ -5,16 +5,16 @@ import time
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
KNOWN_EXTENSIONS,
ExtractorError,
float_or_none,
int_or_none,
KNOWN_EXTENSIONS,
parse_filesize,
str_or_none,
try_get,
update_url_query,
unified_strdate,
unified_timestamp,
update_url_query,
url_or_none,
urljoin,
)

View File

@@ -1,6 +1,7 @@
import json
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
traverse_obj,
@@ -8,7 +9,6 @@ from ..utils import (
unescapeHTML,
unified_timestamp,
)
from .common import InfoExtractor
class HRFernsehenIE(InfoExtractor):

View File

@@ -54,7 +54,7 @@ class HuyaLiveIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id=video_id)
stream_data = self._search_json(r'stream:\s+', webpage, 'stream', video_id=video_id, default=None)
stream_data = self._search_json(r'stream:\s', webpage, 'stream', video_id=video_id, default=None)
room_info = try_get(stream_data, lambda x: x['data'][0]['gameLiveInfo'])
if not room_info:
raise ExtractorError('Can not extract the room info', expected=True)

View File

@@ -41,7 +41,7 @@ class IltalehtiIE(InfoExtractor):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
info = self._search_json(
r'<script>\s*window.App\s*=\s*', webpage, 'json', article_id,
r'<script>\s*window.App\s*=', webpage, 'json', article_id,
transform_source=js_to_json)
props = traverse_obj(info, (
'state', 'articles', ..., 'items', (('main_media', 'properties'), ('body', ..., 'properties'))))

View File

@@ -452,7 +452,7 @@ class InstagramIE(InstagramBaseIE):
webpage = self._download_webpage(
f'{url}/embed/', video_id, note='Downloading embed webpage', fatal=False)
additional_data = self._search_json(
r'window\.__additionalDataLoaded\s*\(\s*[^,]+,\s*', webpage, 'additional data', video_id, fatal=False)
r'window\.__additionalDataLoaded\s*\(\s*[^,]+,', webpage, 'additional data', video_id, fatal=False)
if not additional_data and not media:
self.raise_login_required('Requested content is not available, rate-limit reached or login required')

View File

@@ -57,7 +57,7 @@ class Liputan6IE(InfoExtractor):
webpage = self._download_webpage(url, display_id)
json_data = self._search_json(
r'window.kmklabs.gtm\s*=\s*', webpage, 'json_data', display_id)
r'window.kmklabs.gtm\s*=', webpage, 'json_data', display_id)
video_id = json_data['videos']['video_1']['video_id']
return self.url_result(

View File

@@ -1,9 +1,5 @@
from .common import InfoExtractor
from ..utils import (
int_or_none,
traverse_obj,
unified_timestamp,
)
from ..utils import int_or_none, traverse_obj, unified_timestamp
class MicrosoftEmbedIE(InfoExtractor):

View File

@@ -643,7 +643,7 @@ class NBCStationsIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
nbc_data = self._search_json(
r'<script>var\s*nbc\s*=\s*', webpage, 'NBC JSON data', video_id)
r'<script>var\s*nbc\s*=', webpage, 'NBC JSON data', video_id)
pdk_acct = nbc_data.get('pdkAcct') or 'Yh1nAC'
fw_ssid = traverse_obj(nbc_data, ('video', 'fwSSID'))
fw_network_id = traverse_obj(nbc_data, ('video', 'fwNetworkID'), default='382114')

View File

@@ -2,10 +2,10 @@ import re
from .common import InfoExtractor
from ..utils import (
clean_html,
ExtractorError,
js_to_json,
base_url,
clean_html,
js_to_json,
url_basename,
urljoin,
)

View File

@@ -7,9 +7,9 @@ from .common import InfoExtractor
from ..utils import (
ExtractorError,
format_field,
traverse_obj,
int_or_none,
str_or_none,
traverse_obj,
try_get,
)

View File

@@ -62,7 +62,7 @@ class TVIPlayerIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
json_data = self._search_json(
r'<script>\s*jsonData\s*=\s*', webpage, 'json_data', video_id)
r'<script>\s*jsonData\s*=', webpage, 'json_data', video_id)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
f'{json_data["videoUrl"]}?wmsAuthSign={self.wms_auth_sign_token}',

View File

@@ -6,9 +6,9 @@ from ..utils import (
determine_ext,
extract_attributes,
int_or_none,
lowercase_escape,
try_get,
url_or_none,
lowercase_escape,
)