1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2025-06-27 17:08:32 +00:00
This commit is contained in:
doe1080 2025-05-31 11:02:26 +09:00
parent a2a8c7e354
commit adf17002f2

View File

@ -332,33 +332,6 @@ def _extract_formats(self, api_data, video_id):
return formats
def _get_subtitles(self, video_id, api_data):
comments_info = traverse_obj(api_data, ('comment', 'nvComment', {dict}), default={})
if not comments_info.get('server'):
return
danmaku = traverse_obj(self._download_json(
f'{comments_info["server"]}/v1/threads', video_id,
'Downloading comments', 'Failed to download comments', headers={
'Content-Type': 'text/plain;charset=UTF-8',
'Origin': self._BASE_URL,
'Referer': f'{self._BASE_URL}/',
'X-Client-Os-Type': 'others',
**self._HEADERS,
}, data=json.dumps({
'additionals': {},
'params': comments_info.get('params'),
'threadKey': comments_info.get('threadKey'),
}).encode(), fatal=False,
), ('data', 'threads', ..., 'comments', ...))
return {
'comments': [{
'ext': 'json',
'data': json.dumps(danmaku),
}],
}
def _real_extract(self, url):
video_id = self._match_id(url)
@ -370,15 +343,15 @@ def _real_extract(self, url):
**self.geo_verification_headers(),
}, query={
'actionTrackId': f'AAAAAAAAAA_{round(time_seconds() * 1000)}',
}, expected_status=[400, 404],
)
}, expected_status=[400, 404])
api_data = api_resp.get('data')
api_data = api_resp['data']
release_timestamp = traverse_obj(api_data, ('publishScheduledAt', {parse_iso8601}))
if (meta := api_resp.get('meta')).get('status') != 200:
err_code = meta.get('errorCode')
reason_code = traverse_obj(api_data, 'reasonCode', {str_or_none})
meta = api_resp['meta']
if meta.get('status') != 200:
err_code = meta['errorCode']
reason_code = traverse_obj(api_data, ('reasonCode', {str_or_none}))
err_msg = 'Server busy, service temporarily unavailable'
if reason_code in ('DOMESTIC_VIDEO', 'HIGH_RISK_COUNTRY_VIDEO'):
@ -405,8 +378,9 @@ def _real_extract(self, url):
})),
})
if not (formats := self._extract_formats(api_data, video_id)):
if (err_msg := self._STATUS_MAP.get(availability)):
formats = self._extract_formats(api_data, video_id)
if not formats:
if err_msg := self._STATUS_MAP.get(availability):
self.raise_login_required(err_msg, metadata_available=True)
thumb_prefs = qualities(['url', 'middleUrl', 'largeUrl', 'player', 'ogp'])
@ -447,6 +421,33 @@ def _real_extract(self, url):
})),
}
def _get_subtitles(self, video_id, api_data):
comments_info = traverse_obj(api_data, ('comment', 'nvComment', {dict}), default={})
if not comments_info.get('server'):
return
danmaku = traverse_obj(self._download_json(
f'{comments_info["server"]}/v1/threads', video_id,
'Downloading comments', 'Failed to download comments', headers={
'Content-Type': 'text/plain;charset=UTF-8',
'Origin': self._BASE_URL,
'Referer': f'{self._BASE_URL}/',
'X-Client-Os-Type': 'others',
**self._HEADERS,
}, data=json.dumps({
'additionals': {},
'params': comments_info.get('params'),
'threadKey': comments_info.get('threadKey'),
}).encode(), fatal=False,
), ('data', 'threads', ..., 'comments', ...))
return {
'comments': [{
'ext': 'json',
'data': json.dumps(danmaku),
}],
}
class NiconicoPlaylistBaseIE(InfoExtractor):
_PAGE_SIZE = 100