mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-14 04:05:16 +00:00
[ie/telecinco] Fix extractor (#15311)
Closes #15240 Authored by: 0xvd, bashonly Co-authored-by: bashonly <88596187+bashonly@users.noreply.github.com>
This commit is contained in:
@@ -6,20 +6,21 @@ from ..networking.exceptions import HTTPError
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
clean_html,
|
clean_html,
|
||||||
|
extract_attributes,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
traverse_obj,
|
|
||||||
update_url,
|
update_url,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class TelecincoBaseIE(InfoExtractor):
|
class TelecincoBaseIE(InfoExtractor):
|
||||||
def _parse_content(self, content, url):
|
def _parse_content(self, content, url):
|
||||||
video_id = content['dataMediaId']
|
video_id = content['dataMediaId'][1]
|
||||||
config = self._download_json(
|
config = self._download_json(
|
||||||
content['dataConfig'], video_id, 'Downloading config JSON')
|
content['dataConfig'][1], video_id, 'Downloading config JSON')
|
||||||
services = config['services']
|
services = config['services']
|
||||||
caronte = self._download_json(services['caronte'], video_id)
|
caronte = self._download_json(services['caronte'], video_id)
|
||||||
if traverse_obj(caronte, ('dls', 0, 'drm', {bool})):
|
if traverse_obj(caronte, ('dls', 0, 'drm', {bool})):
|
||||||
@@ -57,9 +58,9 @@ class TelecincoBaseIE(InfoExtractor):
|
|||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': traverse_obj(config, ('info', 'title', {str})),
|
'title': traverse_obj(config, ('info', 'title', {str})),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'thumbnail': (traverse_obj(content, ('dataPoster', {url_or_none}))
|
'thumbnail': (traverse_obj(content, ('dataPoster', 1, {url_or_none}))
|
||||||
or traverse_obj(config, 'poster', 'imageUrl', expected_type=url_or_none)),
|
or traverse_obj(config, 'poster', 'imageUrl', expected_type=url_or_none)),
|
||||||
'duration': traverse_obj(content, ('dataDuration', {int_or_none})),
|
'duration': traverse_obj(content, ('dataDuration', 1, {int_or_none})),
|
||||||
'http_headers': headers,
|
'http_headers': headers,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -137,30 +138,45 @@ class TelecincoIE(TelecincoBaseIE):
|
|||||||
'url': 'http://www.cuatro.com/chesterinlove/a-carta/chester-chester_in_love-chester_edu_2_2331030022.html',
|
'url': 'http://www.cuatro.com/chesterinlove/a-carta/chester-chester_in_love-chester_edu_2_2331030022.html',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
_ASTRO_ISLAND_RE = re.compile(r'<astro-island\b[^>]+>')
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, display_id, impersonate=True)
|
webpage = self._download_webpage(url, display_id, impersonate=True)
|
||||||
article = self._search_json(
|
|
||||||
r'window\.\$REACTBASE_STATE\.article(?:_multisite)?\s*=',
|
|
||||||
webpage, 'article', display_id)['article']
|
|
||||||
description = traverse_obj(article, ('leadParagraph', {clean_html}, filter))
|
|
||||||
|
|
||||||
if article.get('editorialType') != 'VID':
|
props_list = traverse_obj(webpage, (
|
||||||
|
{self._ASTRO_ISLAND_RE.findall}, ...,
|
||||||
|
{extract_attributes}, 'props', {json.loads}))
|
||||||
|
|
||||||
|
description = traverse_obj(props_list, (..., 'leadParagraph', 1, {clean_html}, any, filter))
|
||||||
|
main_content = traverse_obj(props_list, (..., ('content', ('articleData', 1, 'opening')), 1, {dict}, any))
|
||||||
|
|
||||||
|
if traverse_obj(props_list, (..., 'editorialType', 1, {str}, any)) != 'VID': # e.g. 'ART'
|
||||||
entries = []
|
entries = []
|
||||||
|
|
||||||
for p in traverse_obj(article, ((('opening', all), 'body'), lambda _, v: v['content'])):
|
for p in traverse_obj(props_list, (..., 'articleData', 1, ('opening', ('body', 1, ...)), 1, {dict})):
|
||||||
content = p['content']
|
type_ = traverse_obj(p, ('type', 1, {str}))
|
||||||
type_ = p.get('type')
|
content = traverse_obj(p, ('content', 1, {str} if type_ == 'paragraph' else {dict}))
|
||||||
if type_ == 'paragraph' and isinstance(content, str):
|
if not content:
|
||||||
|
continue
|
||||||
|
if type_ == 'paragraph':
|
||||||
description = join_nonempty(description, content, delim='')
|
description = join_nonempty(description, content, delim='')
|
||||||
elif type_ == 'video' and isinstance(content, dict):
|
elif type_ == 'video':
|
||||||
entries.append(self._parse_content(content, url))
|
entries.append(self._parse_content(content, url))
|
||||||
|
else:
|
||||||
|
self.report_warning(
|
||||||
|
f'Skipping unsupported content type "{type_}"', display_id, only_once=True)
|
||||||
|
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
entries, str_or_none(article.get('id')),
|
entries,
|
||||||
traverse_obj(article, ('title', {str})), clean_html(description))
|
traverse_obj(props_list, (..., 'id', 1, {int}, {str_or_none}, any)) or display_id,
|
||||||
|
traverse_obj(main_content, ('dataTitle', 1, {str})),
|
||||||
|
clean_html(description))
|
||||||
|
|
||||||
info = self._parse_content(article['opening']['content'], url)
|
if not main_content:
|
||||||
|
raise ExtractorError('Unable to extract main content from webpage')
|
||||||
|
|
||||||
|
info = self._parse_content(main_content, url)
|
||||||
info['description'] = description
|
info['description'] = description
|
||||||
|
|
||||||
return info
|
return info
|
||||||
|
|||||||
Reference in New Issue
Block a user