mirror of
				https://github.com/yt-dlp/yt-dlp.git
				synced 2025-10-31 06:35:12 +00:00 
			
		
		
		
	[cleanup] Misc
This commit is contained in:
		
							
								
								
									
										11
									
								
								.github/workflows/core.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								.github/workflows/core.yml
									
									
									
									
										vendored
									
									
								
							| @@ -12,13 +12,13 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         os: [ubuntu-latest] | ||||
|         # CPython 3.9 is in quick-test | ||||
|         python-version: ['3.7', '3.10', 3.11-dev, pypy-3.7, pypy-3.8] | ||||
|         # CPython 3.11 is in quick-test | ||||
|         python-version: ['3.8', '3.9', '3.10', pypy-3.7, pypy-3.8] | ||||
|         run-tests-ext: [sh] | ||||
|         include: | ||||
|         # atleast one of each CPython/PyPy tests must be in windows | ||||
|         - os: windows-latest | ||||
|           python-version: '3.8' | ||||
|           python-version: '3.7' | ||||
|           run-tests-ext: bat | ||||
|         - os: windows-latest | ||||
|           python-version: pypy-3.9 | ||||
| @@ -33,5 +33,6 @@ jobs: | ||||
|       run: pip install pytest | ||||
|     - name: Run tests | ||||
|       continue-on-error: False | ||||
|       run: ./devscripts/run_tests.${{ matrix.run-tests-ext }} core | ||||
|   # Linter is in quick-test | ||||
|       run: | | ||||
|         python3 -m yt_dlp -v || true  # Print debug head | ||||
|         ./devscripts/run_tests.${{ matrix.run-tests-ext }} core | ||||
|   | ||||
							
								
								
									
										13
									
								
								.github/workflows/quick-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/workflows/quick-test.yml
									
									
									
									
										vendored
									
									
								
							| @@ -10,24 +10,23 @@ jobs: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Set up Python | ||||
|     - name: Set up Python 3.11 | ||||
|       uses: actions/setup-python@v4 | ||||
|       with: | ||||
|         python-version: 3.9 | ||||
|         python-version: '3.11' | ||||
|     - name: Install test requirements | ||||
|       run: pip install pytest pycryptodomex | ||||
|     - name: Run tests | ||||
|       run: ./devscripts/run_tests.sh core | ||||
|       run: | | ||||
|         python3 -m yt_dlp -v || true | ||||
|         ./devscripts/run_tests.sh core | ||||
|   flake8: | ||||
|     name: Linter | ||||
|     if: "!contains(github.event.head_commit.message, 'ci skip all')" | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - uses: actions/checkout@v3 | ||||
|     - name: Set up Python | ||||
|       uses: actions/setup-python@v4 | ||||
|       with: | ||||
|         python-version: 3.9 | ||||
|     - uses: actions/setup-python@v4 | ||||
|     - name: Install flake8 | ||||
|       run: pip install flake8 | ||||
|     - name: Make lazy extractors | ||||
|   | ||||
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -71,6 +71,7 @@ dist/ | ||||
| zip/ | ||||
| tmp/ | ||||
| venv/ | ||||
| .venv/ | ||||
| completions/ | ||||
|  | ||||
| # Misc | ||||
|   | ||||
| @@ -351,8 +351,9 @@ Say you extracted a list of thumbnails into `thumbnail_data` and want to iterate | ||||
| ```python | ||||
| thumbnail_data = data.get('thumbnails') or [] | ||||
| thumbnails = [{ | ||||
|     'url': item['url'] | ||||
| } for item in thumbnail_data]  # correct | ||||
|     'url': item['url'], | ||||
|     'height': item.get('h'), | ||||
| } for item in thumbnail_data if item.get('url')]  # correct | ||||
| ``` | ||||
| 
 | ||||
| and not like: | ||||
| @@ -360,12 +361,27 @@ and not like: | ||||
| ```python | ||||
| thumbnail_data = data.get('thumbnails') | ||||
| thumbnails = [{ | ||||
|     'url': item['url'] | ||||
|     'url': item['url'], | ||||
|     'height': item.get('h'), | ||||
| } for item in thumbnail_data]  # incorrect | ||||
| ``` | ||||
| 
 | ||||
| In this case, `thumbnail_data` will be `None` if the field was not found and this will cause the loop `for item in thumbnail_data` to raise a fatal error. Using `or []` avoids this error and results in setting an empty list in `thumbnails` instead. | ||||
| 
 | ||||
| Alternately, this can be further simplified by using `traverse_obj` | ||||
| 
 | ||||
| ```python | ||||
| thumbnails = [{ | ||||
|     'url': item['url'], | ||||
|     'height': item.get('h'), | ||||
| } for item in traverse_obj(data, ('thumbnails', lambda _, v: v['url']))] | ||||
| ``` | ||||
| 
 | ||||
| or, even better, | ||||
| 
 | ||||
| ```python | ||||
| thumbnails = traverse_obj(data, ('thumbnails', ..., {'url': 'url', 'height': 'h'})) | ||||
| ``` | ||||
| 
 | ||||
| ### Provide fallbacks | ||||
| 
 | ||||
|   | ||||
							
								
								
									
										26
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										26
									
								
								README.md
									
									
									
									
									
								
							| @@ -432,19 +432,19 @@ You can also fork the project on GitHub and run your fork's [build workflow](.gi | ||||
|                                     explicitly provided IP block in CIDR notation | ||||
| 
 | ||||
| ## Video Selection: | ||||
|     -I, --playlist-items ITEM_SPEC  Comma separated playlist_index of the videos | ||||
|     -I, --playlist-items ITEM_SPEC  Comma separated playlist_index of the items | ||||
|                                     to download. You can specify a range using | ||||
|                                     "[START]:[STOP][:STEP]". For backward | ||||
|                                     compatibility, START-STOP is also supported. | ||||
|                                     Use negative indices to count from the right | ||||
|                                     and negative STEP to download in reverse | ||||
|                                     order. E.g. "-I 1:3,7,-5::2" used on a | ||||
|                                     playlist of size 15 will download the videos | ||||
|                                     playlist of size 15 will download the items | ||||
|                                     at index 1,2,3,7,11,13,15 | ||||
|     --min-filesize SIZE             Do not download any videos smaller than | ||||
|     --min-filesize SIZE             Abort download if filesize is smaller than | ||||
|                                     SIZE, e.g. 50k or 44.6M | ||||
|     --max-filesize SIZE             Abort download if filesize is larger than | ||||
|                                     SIZE, e.g. 50k or 44.6M | ||||
|     --max-filesize SIZE             Do not download any videos larger than SIZE, | ||||
|                                     e.g. 50k or 44.6M | ||||
|     --date DATE                     Download only videos uploaded on this date. | ||||
|                                     The date can be "YYYYMMDD" or in the format  | ||||
|                                     [now|today|yesterday][-N[day|week|month|year]]. | ||||
| @@ -491,9 +491,9 @@ You can also fork the project on GitHub and run your fork's [build workflow](.gi | ||||
|                                     a file that is in the archive | ||||
|     --break-on-reject               Stop the download process when encountering | ||||
|                                     a file that has been filtered out | ||||
|     --break-per-input               --break-on-existing, --break-on-reject, | ||||
|                                     --max-downloads, and autonumber resets per | ||||
|                                     input URL | ||||
|     --break-per-input               Alters --max-downloads, --break-on-existing, | ||||
|                                     --break-on-reject, and autonumber to reset | ||||
|                                     per input URL | ||||
|     --no-break-per-input            --break-on-existing and similar options | ||||
|                                     terminates the entire download queue | ||||
|     --skip-playlist-after-errors N  Number of allowed failures until the rest of | ||||
| @@ -1046,10 +1046,10 @@ Make chapter entries for, or remove various segments (sponsor, | ||||
|                                     for, separated by commas. Available | ||||
|                                     categories are sponsor, intro, outro, | ||||
|                                     selfpromo, preview, filler, interaction, | ||||
|                                     music_offtopic, poi_highlight, chapter, all and | ||||
|                                     default (=all). You can prefix the category | ||||
|                                     with a "-" to exclude it. See [1] for | ||||
|                                     description of the categories. E.g. | ||||
|                                     music_offtopic, poi_highlight, chapter, all | ||||
|                                     and default (=all). You can prefix the | ||||
|                                     category with a "-" to exclude it. See [1] | ||||
|                                     for description of the categories. E.g. | ||||
|                                     --sponsorblock-mark all,-preview | ||||
|                                     [1] https://wiki.sponsor.ajay.app/w/Segment_Categories | ||||
|     --sponsorblock-remove CATS      SponsorBlock categories to be removed from | ||||
| @@ -1058,7 +1058,7 @@ Make chapter entries for, or remove various segments (sponsor, | ||||
|                                     remove takes precedence. The syntax and | ||||
|                                     available categories are the same as for | ||||
|                                     --sponsorblock-mark except that "default" | ||||
|                                     refers to "all,-filler" and poi_highlight and | ||||
|                                     refers to "all,-filler" and poi_highlight, | ||||
|                                     chapter are not available | ||||
|     --sponsorblock-chapter-title TEMPLATE | ||||
|                                     An output template for the title of the | ||||
|   | ||||
| @@ -3123,7 +3123,7 @@ class YoutubeDL: | ||||
|                 fd, success = None, True | ||||
|                 if info_dict.get('protocol') or info_dict.get('url'): | ||||
|                     fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-') | ||||
|                     if fd is not FFmpegFD and ( | ||||
|                     if fd is not FFmpegFD and 'no-direct-merge' not in self.params['compat_opts'] and ( | ||||
|                             info_dict.get('section_start') or info_dict.get('section_end')): | ||||
|                         msg = ('This format cannot be partially downloaded' if FFmpegFD.available() | ||||
|                                else 'You have requested downloading the video partially, but ffmpeg is not installed') | ||||
|   | ||||
| @@ -91,12 +91,11 @@ def get_urls(urls, batchfile, verbose): | ||||
| 
 | ||||
| 
 | ||||
| def print_extractor_information(opts, urls): | ||||
|     # Importing GenericIE is currently slow since it imports other extractors | ||||
|     # TODO: Move this back to module level after generalization of embed detection | ||||
|     from .extractor.generic import GenericIE | ||||
| 
 | ||||
|     out = '' | ||||
|     if opts.list_extractors: | ||||
|         # Importing GenericIE is currently slow since it imports YoutubeIE | ||||
|         from .extractor.generic import GenericIE | ||||
| 
 | ||||
|         urls = dict.fromkeys(urls, False) | ||||
|         for ie in list_extractor_classes(opts.age_limit): | ||||
|             out += ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie.working() else '') + '\n' | ||||
|   | ||||
| @@ -20,6 +20,7 @@ from ..utils import ( | ||||
|     RetryManager, | ||||
|     classproperty, | ||||
|     decodeArgument, | ||||
|     deprecation_warning, | ||||
|     encodeFilename, | ||||
|     format_bytes, | ||||
|     join_nonempty, | ||||
| @@ -180,7 +181,9 @@ class FileDownloader: | ||||
|     @staticmethod | ||||
|     def parse_bytes(bytestr): | ||||
|         """Parse a string indicating a byte quantity into an integer.""" | ||||
|         parse_bytes(bytestr) | ||||
|         deprecation_warning('yt_dlp.FileDownloader.parse_bytes is deprecated and ' | ||||
|                             'may be removed in the future. Use yt_dlp.utils.parse_bytes instead') | ||||
|         return parse_bytes(bytestr) | ||||
| 
 | ||||
|     def slow_down(self, start_time, now, byte_counter): | ||||
|         """Sleep if the download speed is over the rate limit.""" | ||||
|   | ||||
| @@ -71,6 +71,7 @@ from ..utils import ( | ||||
|     str_to_int, | ||||
|     strip_or_none, | ||||
|     traverse_obj, | ||||
|     truncate_string, | ||||
|     try_call, | ||||
|     try_get, | ||||
|     unescapeHTML, | ||||
| @@ -674,7 +675,8 @@ class InfoExtractor: | ||||
|             for _ in range(2): | ||||
|                 try: | ||||
|                     self.initialize() | ||||
|                     self.write_debug('Extracting URL: %s' % url) | ||||
|                     self.to_screen('Extracting URL: %s' % ( | ||||
|                         url if self.get_param('verbose') else truncate_string(url, 100, 20))) | ||||
|                     ie_result = self._real_extract(url) | ||||
|                     if ie_result is None: | ||||
|                         return None | ||||
| @@ -1906,6 +1908,14 @@ class InfoExtractor: | ||||
|             errnote=None, fatal=True, live=False, data=None, headers={}, | ||||
|             query={}): | ||||
| 
 | ||||
|         if not m3u8_url: | ||||
|             if errnote is not False: | ||||
|                 errnote = errnote or 'Failed to obtain m3u8 URL' | ||||
|                 if fatal: | ||||
|                     raise ExtractorError(errnote, video_id=video_id) | ||||
|                 self.report_warning(f'{errnote}{bug_reports_message()}') | ||||
|             return [], {} | ||||
| 
 | ||||
|         res = self._download_webpage_handle( | ||||
|             m3u8_url, video_id, | ||||
|             note='Downloading m3u8 information' if note is None else note, | ||||
|   | ||||
| @@ -535,10 +535,10 @@ def create_parser(): | ||||
|         '-I', '--playlist-items', | ||||
|         dest='playlist_items', metavar='ITEM_SPEC', default=None, | ||||
|         help=( | ||||
|             'Comma separated playlist_index of the videos to download. ' | ||||
|             'Comma separated playlist_index of the items to download. ' | ||||
|             'You can specify a range using "[START]:[STOP][:STEP]". For backward compatibility, START-STOP is also supported. ' | ||||
|             'Use negative indices to count from the right and negative STEP to download in reverse order. ' | ||||
|             'E.g. "-I 1:3,7,-5::2" used on a playlist of size 15 will download the videos at index 1,2,3,7,11,13,15')) | ||||
|             'E.g. "-I 1:3,7,-5::2" used on a playlist of size 15 will download the items at index 1,2,3,7,11,13,15')) | ||||
|     selection.add_option( | ||||
|         '--match-title', | ||||
|         dest='matchtitle', metavar='REGEX', | ||||
| @@ -554,7 +554,7 @@ def create_parser(): | ||||
|     selection.add_option( | ||||
|         '--max-filesize', | ||||
|         metavar='SIZE', dest='max_filesize', default=None, | ||||
|         help='Abort download if filesize if larger than SIZE, e.g. 50k or 44.6M') | ||||
|         help='Abort download if filesize is larger than SIZE, e.g. 50k or 44.6M') | ||||
|     selection.add_option( | ||||
|         '--date', | ||||
|         metavar='DATE', dest='date', default=None, | ||||
| @@ -635,7 +635,7 @@ def create_parser(): | ||||
|     selection.add_option( | ||||
|         '--break-per-input', | ||||
|         action='store_true', dest='break_per_url', default=False, | ||||
|         help='--break-on-existing, --break-on-reject, --max-downloads, and autonumber resets per input URL') | ||||
|         help='Alters --max-downloads, --break-on-existing, --break-on-reject, and autonumber to reset per input URL') | ||||
|     selection.add_option( | ||||
|         '--no-break-per-input', | ||||
|         action='store_false', dest='break_per_url', | ||||
|   | ||||
| @@ -3872,6 +3872,9 @@ class download_range_func: | ||||
|         return (isinstance(other, download_range_func) | ||||
|                 and self.chapters == other.chapters and self.ranges == other.ranges) | ||||
| 
 | ||||
|     def __repr__(self): | ||||
|         return f'{type(self).__name__}({self.chapters}, {self.ranges})' | ||||
| 
 | ||||
| 
 | ||||
| def parse_dfxp_time_expr(time_expr): | ||||
|     if not time_expr: | ||||
| @@ -5976,7 +5979,7 @@ def truncate_string(s, left, right=0): | ||||
|     assert left > 3 and right >= 0 | ||||
|     if s is None or len(s) <= left + right: | ||||
|         return s | ||||
|     return f'{s[:left-3]}...{s[-right:]}' | ||||
|     return f'{s[:left-3]}...{s[-right:] if right else ""}' | ||||
| 
 | ||||
| 
 | ||||
| def orderedSet_from_options(options, alias_dict, *, use_regex=False, start=None): | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 pukkandan
					pukkandan