mirror of
				https://github.com/yt-dlp/yt-dlp.git
				synced 2025-10-30 22:25:19 +00:00 
			
		
		
		
	
							
								
								
									
										4
									
								
								.github/PULL_REQUEST_TEMPLATE.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/PULL_REQUEST_TEMPLATE.md
									
									
									
									
										vendored
									
									
								
							| @@ -2,8 +2,6 @@ | |||||||
| 
 | 
 | ||||||
| ### Description of your *pull request* and other information | ### Description of your *pull request* and other information | ||||||
| 
 | 
 | ||||||
| </details> |  | ||||||
| 
 |  | ||||||
| <!-- | <!-- | ||||||
| 
 | 
 | ||||||
| Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible | Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible | ||||||
| @@ -41,3 +39,5 @@ Fixes # | |||||||
| - [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy)) | - [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy)) | ||||||
| - [ ] Core bug fix/improvement | - [ ] Core bug fix/improvement | ||||||
| - [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes)) | - [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes)) | ||||||
|  | 
 | ||||||
|  | </details> | ||||||
|   | |||||||
							
								
								
									
										5
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -30,6 +30,7 @@ cookies | |||||||
| *.f4v | *.f4v | ||||||
| *.flac | *.flac | ||||||
| *.flv | *.flv | ||||||
|  | *.gif | ||||||
| *.jpeg | *.jpeg | ||||||
| *.jpg | *.jpg | ||||||
| *.m4a | *.m4a | ||||||
| @@ -120,5 +121,5 @@ yt-dlp.zip | |||||||
| */extractor/lazy_extractors.py | */extractor/lazy_extractors.py | ||||||
|  |  | ||||||
| # Plugins | # Plugins | ||||||
| ytdlp_plugins/* | ytdlp_plugins/ | ||||||
| yt-dlp-plugins/* | yt-dlp-plugins | ||||||
|   | |||||||
| @@ -42,7 +42,7 @@ You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [autho | |||||||
| * Improved/fixed support for HiDive, HotStar, Hungama, LBRY, LinkedInLearning, Mxplayer, SonyLiv, TV2, Vimeo, VLive etc | * Improved/fixed support for HiDive, HotStar, Hungama, LBRY, LinkedInLearning, Mxplayer, SonyLiv, TV2, Vimeo, VLive etc | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| ## [Lesmiscore](https://github.com/Lesmiscore) (nao20010128nao) | ## [Lesmiscore](https://github.com/Lesmiscore) <sup><sub>(nao20010128nao)</sup></sub> | ||||||
| 
 | 
 | ||||||
| **Bitcoin**: bc1qfd02r007cutfdjwjmyy9w23rjvtls6ncve7r3s   | **Bitcoin**: bc1qfd02r007cutfdjwjmyy9w23rjvtls6ncve7r3s   | ||||||
| **Monacoin**: mona1q3tf7dzvshrhfe3md379xtvt2n22duhglv5dskr | **Monacoin**: mona1q3tf7dzvshrhfe3md379xtvt2n22duhglv5dskr | ||||||
|   | |||||||
							
								
								
									
										4
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										4
									
								
								Makefile
									
									
									
									
									
								
							| @@ -17,8 +17,8 @@ pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites \ | |||||||
| clean-test: | clean-test: | ||||||
| 	rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \ | 	rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \ | ||||||
| 	*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \ | 	*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \ | ||||||
| 	*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.jpeg *.jpg *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 *.mp4 \ | 	*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.gif *.jpeg *.jpg *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 \ | ||||||
| 	*.mpga *.oga *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.tt *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp | 	*.mp4 *.mpga *.oga *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.tt *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp | ||||||
| clean-dist: | clean-dist: | ||||||
| 	rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \ | 	rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \ | ||||||
| 	yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap | 	yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap | ||||||
|   | |||||||
| @@ -10,7 +10,7 @@ | |||||||
| [](https://discord.gg/H5MNcFW63r "Discord") | [](https://discord.gg/H5MNcFW63r "Discord") | ||||||
| [](supportedsites.md "Supported Sites") | [](supportedsites.md "Supported Sites") | ||||||
| [](LICENSE "License") | [](LICENSE "License") | ||||||
| [](https://github.com/yt-dlp/yt-dlp/actions "CI Status") | [](https://github.com/yt-dlp/yt-dlp/actions "CI Status") | ||||||
| [](https://github.com/yt-dlp/yt-dlp/commits "Commit History") | [](https://github.com/yt-dlp/yt-dlp/commits "Commit History") | ||||||
| [](https://github.com/yt-dlp/yt-dlp/commits "Commit History") | [](https://github.com/yt-dlp/yt-dlp/commits "Commit History") | ||||||
| 
 | 
 | ||||||
|   | |||||||
| @@ -3392,6 +3392,7 @@ class YoutubeDL: | |||||||
|             reject = lambda k, v: v is None or k.startswith('__') or k in { |             reject = lambda k, v: v is None or k.startswith('__') or k in { | ||||||
|                 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries', |                 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries', | ||||||
|                 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber', |                 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber', | ||||||
|  |                 '_format_sort_fields', | ||||||
|             } |             } | ||||||
|         else: |         else: | ||||||
|             reject = lambda k, v: False |             reject = lambda k, v: False | ||||||
|   | |||||||
| @@ -332,7 +332,7 @@ def validate_options(opts): | |||||||
|                     mobj = range_ != '-' and re.fullmatch(r'([^-]+)?\s*-\s*([^-]+)?', range_) |                     mobj = range_ != '-' and re.fullmatch(r'([^-]+)?\s*-\s*([^-]+)?', range_) | ||||||
|                     dur = mobj and (parse_timestamp(mobj.group(1) or '0'), parse_timestamp(mobj.group(2) or 'inf')) |                     dur = mobj and (parse_timestamp(mobj.group(1) or '0'), parse_timestamp(mobj.group(2) or 'inf')) | ||||||
|                     if None in (dur or [None]): |                     if None in (dur or [None]): | ||||||
|                         raise ValueError(f'invalid {name} time range "{regex}". Must be of the form *start-end') |                         raise ValueError(f'invalid {name} time range "{regex}". Must be of the form "*start-end"') | ||||||
|                     ranges.append(dur) |                     ranges.append(dur) | ||||||
|                 continue |                 continue | ||||||
|             try: |             try: | ||||||
|   | |||||||
| @@ -1013,7 +1013,7 @@ class BiliIntlIE(BiliIntlBaseIE): | |||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class BiliIntlSeriesIE(BiliIntlBaseIE): | class BiliIntlSeriesIE(BiliIntlBaseIE): | ||||||
|     IE_NAME = 'biliintl:series' |     IE_NAME = 'biliIntl:series' | ||||||
|     _VALID_URL = r'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?play/(?P<id>\d+)/?(?:[?#]|$)' |     _VALID_URL = r'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?play/(?P<id>\d+)/?(?:[?#]|$)' | ||||||
|     _TESTS = [{ |     _TESTS = [{ | ||||||
|         'url': 'https://www.bilibili.tv/en/play/34613', |         'url': 'https://www.bilibili.tv/en/play/34613', | ||||||
|   | |||||||
| @@ -1262,7 +1262,9 @@ class InfoExtractor: | |||||||
|         Like _search_regex, but strips HTML tags and unescapes entities. |         Like _search_regex, but strips HTML tags and unescapes entities. | ||||||
|         """ |         """ | ||||||
|         res = self._search_regex(pattern, string, name, default, fatal, flags, group) |         res = self._search_regex(pattern, string, name, default, fatal, flags, group) | ||||||
|         if res: |         if isinstance(res, tuple): | ||||||
|  |             return [clean_html(r).strip() for r in res] | ||||||
|  |         elif res: | ||||||
|             return clean_html(res).strip() |             return clean_html(res).strip() | ||||||
|         else: |         else: | ||||||
|             return res |             return res | ||||||
| @@ -3512,7 +3514,7 @@ class InfoExtractor: | |||||||
|         elif cls.IE_DESC: |         elif cls.IE_DESC: | ||||||
|             desc += f' {cls.IE_DESC}' |             desc += f' {cls.IE_DESC}' | ||||||
|         if cls.SEARCH_KEY: |         if cls.SEARCH_KEY: | ||||||
|             desc += f'; "{cls.SEARCH_KEY}:" prefix' |             desc += f'{";" if cls.IE_DESC else ""} "{cls.SEARCH_KEY}:" prefix' | ||||||
|             if search_examples: |             if search_examples: | ||||||
|                 _COUNTS = ('', '5', '10', 'all') |                 _COUNTS = ('', '5', '10', 'all') | ||||||
|                 desc += f' (e.g. "{cls.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(search_examples)}")' |                 desc += f' (e.g. "{cls.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(search_examples)}")' | ||||||
|   | |||||||
| @@ -182,7 +182,7 @@ class CrunchyrollBetaIE(CrunchyrollBaseIE): | |||||||
|             self.to_screen( |             self.to_screen( | ||||||
|                 'To get all formats of a hardsub language, use ' |                 'To get all formats of a hardsub language, use ' | ||||||
|                 '"--extractor-args crunchyrollbeta:hardsub=<language_code or all>". ' |                 '"--extractor-args crunchyrollbeta:hardsub=<language_code or all>". ' | ||||||
|                 'See https://github.com/yt-dlp/yt-dlp#crunchyrollbeta for more info', |                 'See https://github.com/yt-dlp/yt-dlp#crunchyrollbeta-crunchyroll for more info', | ||||||
|                 only_once=True) |                 only_once=True) | ||||||
|         else: |         else: | ||||||
|             full_format_langs = set(map(str.lower, available_formats)) |             full_format_langs = set(map(str.lower, available_formats)) | ||||||
|   | |||||||
| @@ -270,9 +270,9 @@ class ZenYandexIE(InfoExtractor): | |||||||
|         for s_url in stream_urls: |         for s_url in stream_urls: | ||||||
|             ext = determine_ext(s_url) |             ext = determine_ext(s_url) | ||||||
|             if ext == 'mpd': |             if ext == 'mpd': | ||||||
|                 formats.extend(self._extract_mpd_formats(s_url, id, mpd_id='dash')) |                 formats.extend(self._extract_mpd_formats(s_url, video_id, mpd_id='dash')) | ||||||
|             elif ext == 'm3u8': |             elif ext == 'm3u8': | ||||||
|                 formats.extend(self._extract_m3u8_formats(s_url, id, 'mp4')) |                 formats.extend(self._extract_m3u8_formats(s_url, video_id, 'mp4')) | ||||||
|         return { |         return { | ||||||
|             'id': video_id, |             'id': video_id, | ||||||
|             'title': video_json.get('title') or self._og_search_title(webpage), |             'title': video_json.get('title') or self._og_search_title(webpage), | ||||||
|   | |||||||
| @@ -292,7 +292,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor): | |||||||
|     """Provide base functions for Youtube extractors""" |     """Provide base functions for Youtube extractors""" | ||||||
| 
 | 
 | ||||||
|     _RESERVED_NAMES = ( |     _RESERVED_NAMES = ( | ||||||
|         r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|' |         r'channel|c|user|playlist|watch|w|v|embed|e|live|watch_popup|clip|' | ||||||
|         r'shorts|movies|results|search|shared|hashtag|trending|explore|feed|feeds|' |         r'shorts|movies|results|search|shared|hashtag|trending|explore|feed|feeds|' | ||||||
|         r'browse|oembed|get_video_info|iframe_api|s/player|source|' |         r'browse|oembed|get_video_info|iframe_api|s/player|source|' | ||||||
|         r'storefront|oops|index|account|t/terms|about|upload|signin|logout') |         r'storefront|oops|index|account|t/terms|about|upload|signin|logout') | ||||||
| @@ -3683,7 +3683,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): | |||||||
|                 'url': fmt_url, |                 'url': fmt_url, | ||||||
|                 'width': int_or_none(fmt.get('width')), |                 'width': int_or_none(fmt.get('width')), | ||||||
|                 'language': join_nonempty(audio_track.get('id', '').split('.')[0], |                 'language': join_nonempty(audio_track.get('id', '').split('.')[0], | ||||||
|                                           'desc' if language_preference < -1 else ''), |                                           'desc' if language_preference < -1 else '') or None, | ||||||
|                 'language_preference': language_preference, |                 'language_preference': language_preference, | ||||||
|                 # Strictly de-prioritize damaged and 3gp formats |                 # Strictly de-prioritize damaged and 3gp formats | ||||||
|                 'preference': -10 if is_damaged else -2 if itag == '17' else None, |                 'preference': -10 if is_damaged else -2 if itag == '17' else None, | ||||||
|   | |||||||
| @@ -109,7 +109,7 @@ def parseOpts(overrideArguments=None, ignore_config_files='if_override'): | |||||||
|     opts = optparse.Values({'verbose': True, 'print_help': False}) |     opts = optparse.Values({'verbose': True, 'print_help': False}) | ||||||
|     try: |     try: | ||||||
|         try: |         try: | ||||||
|             if overrideArguments: |             if overrideArguments is not None: | ||||||
|                 root.append_config(overrideArguments, label='Override') |                 root.append_config(overrideArguments, label='Override') | ||||||
|             else: |             else: | ||||||
|                 root.append_config(sys.argv[1:], label='Command-line') |                 root.append_config(sys.argv[1:], label='Command-line') | ||||||
| @@ -904,11 +904,11 @@ def create_parser(): | |||||||
|             'This option can be used multiple times to set the sleep for the different retry types, ' |             'This option can be used multiple times to set the sleep for the different retry types, ' | ||||||
|             'e.g. --retry-sleep linear=1::2 --retry-sleep fragment:exp=1:20')) |             'e.g. --retry-sleep linear=1::2 --retry-sleep fragment:exp=1:20')) | ||||||
|     downloader.add_option( |     downloader.add_option( | ||||||
|         '--skip-unavailable-fragments', '--no-abort-on-unavailable-fragment', |         '--skip-unavailable-fragments', '--no-abort-on-unavailable-fragments', | ||||||
|         action='store_true', dest='skip_unavailable_fragments', default=True, |         action='store_true', dest='skip_unavailable_fragments', default=True, | ||||||
|         help='Skip unavailable fragments for DASH, hlsnative and ISM downloads (default) (Alias: --no-abort-on-unavailable-fragment)') |         help='Skip unavailable fragments for DASH, hlsnative and ISM downloads (default) (Alias: --no-abort-on-unavailable-fragments)') | ||||||
|     downloader.add_option( |     downloader.add_option( | ||||||
|         '--abort-on-unavailable-fragment', '--no-skip-unavailable-fragments', |         '--abort-on-unavailable-fragments', '--no-skip-unavailable-fragments', | ||||||
|         action='store_false', dest='skip_unavailable_fragments', |         action='store_false', dest='skip_unavailable_fragments', | ||||||
|         help='Abort download if a fragment is unavailable (Alias: --no-skip-unavailable-fragments)') |         help='Abort download if a fragment is unavailable (Alias: --no-skip-unavailable-fragments)') | ||||||
|     downloader.add_option( |     downloader.add_option( | ||||||
|   | |||||||
| @@ -407,7 +407,7 @@ class FFmpegPostProcessor(PostProcessor): | |||||||
|         """ |         """ | ||||||
|         concat_file = f'{out_file}.concat' |         concat_file = f'{out_file}.concat' | ||||||
|         self.write_debug(f'Writing concat spec to {concat_file}') |         self.write_debug(f'Writing concat spec to {concat_file}') | ||||||
|         with open(concat_file, 'wt', encoding='utf-8') as f: |         with open(concat_file, 'w', encoding='utf-8') as f: | ||||||
|             f.writelines(self._concat_spec(in_files, concat_opts)) |             f.writelines(self._concat_spec(in_files, concat_opts)) | ||||||
| 
 | 
 | ||||||
|         out_flags = list(self.stream_copy_opts(ext=determine_ext(out_file))) |         out_flags = list(self.stream_copy_opts(ext=determine_ext(out_file))) | ||||||
| @@ -711,7 +711,7 @@ class FFmpegMetadataPP(FFmpegPostProcessor): | |||||||
| 
 | 
 | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def _get_chapter_opts(chapters, metadata_filename): |     def _get_chapter_opts(chapters, metadata_filename): | ||||||
|         with open(metadata_filename, 'wt', encoding='utf-8') as f: |         with open(metadata_filename, 'w', encoding='utf-8') as f: | ||||||
|             def ffmpeg_escape(text): |             def ffmpeg_escape(text): | ||||||
|                 return re.sub(r'([\\=;#\n])', r'\\\1', text) |                 return re.sub(r'([\\=;#\n])', r'\\\1', text) | ||||||
| 
 | 
 | ||||||
| @@ -981,7 +981,7 @@ class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor): | |||||||
|                 with open(dfxp_file, 'rb') as f: |                 with open(dfxp_file, 'rb') as f: | ||||||
|                     srt_data = dfxp2srt(f.read()) |                     srt_data = dfxp2srt(f.read()) | ||||||
| 
 | 
 | ||||||
|                 with open(srt_file, 'wt', encoding='utf-8') as f: |                 with open(srt_file, 'w', encoding='utf-8') as f: | ||||||
|                     f.write(srt_data) |                     f.write(srt_data) | ||||||
|                 old_file = srt_file |                 old_file = srt_file | ||||||
| 
 | 
 | ||||||
|   | |||||||
| @@ -3368,7 +3368,7 @@ def js_to_json(code, vars={}, *, strict=False): | |||||||
|             try: |             try: | ||||||
|                 if not strict: |                 if not strict: | ||||||
|                     json.loads(vars[v]) |                     json.loads(vars[v]) | ||||||
|             except json.decoder.JSONDecodeError: |             except json.JSONDecodeError: | ||||||
|                 return json.dumps(vars[v]) |                 return json.dumps(vars[v]) | ||||||
|             else: |             else: | ||||||
|                 return vars[v] |                 return vars[v] | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 pukkandan
					pukkandan