mirror of
				https://github.com/yt-dlp/yt-dlp.git
				synced 2025-10-31 06:35:12 +00:00 
			
		
		
		
	[teamcoco] Fix extraction for full episodes(closes #16573)
This commit is contained in:
		| @@ -9,6 +9,7 @@ from ..utils import ( | ||||
|     xpath_text, | ||||
|     int_or_none, | ||||
|     determine_ext, | ||||
|     float_or_none, | ||||
|     parse_duration, | ||||
|     xpath_attr, | ||||
|     update_url_query, | ||||
| @@ -23,14 +24,17 @@ class TurnerBaseIE(AdobePassIE): | ||||
|     def _extract_timestamp(self, video_data): | ||||
|         return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts')) | ||||
|  | ||||
|     def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data): | ||||
|     def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data, custom_tokenizer_query=None): | ||||
|         secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*' | ||||
|         token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path) | ||||
|         if not token: | ||||
|             query = { | ||||
|                 'path': secure_path, | ||||
|                 'videoId': content_id, | ||||
|             } | ||||
|             if custom_tokenizer_query: | ||||
|                 query.update(custom_tokenizer_query) | ||||
|             else: | ||||
|                 query['videoId'] = content_id | ||||
|             if ap_data.get('auth_required'): | ||||
|                 query['accessToken'] = self._extract_mvpd_auth(ap_data['url'], content_id, ap_data['site_name'], ap_data['site_name']) | ||||
|             auth = self._download_xml( | ||||
| @@ -188,3 +192,42 @@ class TurnerBaseIE(AdobePassIE): | ||||
|             'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')), | ||||
|             'is_live': is_live, | ||||
|         } | ||||
|  | ||||
|     def _extract_ngtv_info(self, media_id, tokenizer_query, ap_data=None): | ||||
|         streams_data = self._download_json( | ||||
|             'http://medium.ngtv.io/media/%s/tv' % media_id, | ||||
|             media_id)['media']['tv'] | ||||
|         duration = None | ||||
|         chapters = [] | ||||
|         formats = [] | ||||
|         for supported_type in ('unprotected', 'bulkaes'): | ||||
|             stream_data = streams_data.get(supported_type, {}) | ||||
|             m3u8_url = stream_data.get('secureUrl') or stream_data.get('url') | ||||
|             if not m3u8_url: | ||||
|                 continue | ||||
|             if stream_data.get('playlistProtection') == 'spe': | ||||
|                 m3u8_url = self._add_akamai_spe_token( | ||||
|                     'http://token.ngtv.io/token/token_spe', | ||||
|                     m3u8_url, media_id, ap_data or {}, tokenizer_query) | ||||
|             formats.extend(self._extract_m3u8_formats( | ||||
|                 m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False)) | ||||
|  | ||||
|             duration = float_or_none(stream_data.get('totalRuntime')) | ||||
|  | ||||
|             if not chapters: | ||||
|                 for chapter in stream_data.get('contentSegments', []): | ||||
|                     start_time = float_or_none(chapter.get('start')) | ||||
|                     chapter_duration = float_or_none(chapter.get('duration')) | ||||
|                     if start_time is None or chapter_duration is None: | ||||
|                         continue | ||||
|                     chapters.append({ | ||||
|                         'start_time': start_time, | ||||
|                         'end_time': start_time + chapter_duration, | ||||
|                     }) | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'formats': formats, | ||||
|             'chapters': chapters, | ||||
|             'duration': duration, | ||||
|         } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Remita Amine
					Remita Amine