1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2025-12-29 02:51:30 +00:00

[docs,cleanup] Some minor refactoring and improve docs

This commit is contained in:
pukkandan
2021-09-17 23:53:55 +05:30
parent d710cc6d36
commit e6f21b3d92
14 changed files with 55 additions and 39 deletions

View File

@@ -454,13 +454,12 @@ class YoutubeDL(object):
_NUMERIC_FIELDS = set((
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'timestamp', 'upload_year', 'upload_month', 'upload_day',
'timestamp', 'release_timestamp',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
'average_rating', 'comment_count', 'age_limit',
'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year',
'playlist_index',
))
params = None
@@ -579,8 +578,8 @@ class YoutubeDL(object):
self._setup_opener()
"""Preload the archive, if any is specified"""
def preload_download_archive(fn):
"""Preload the archive, if any is specified"""
if fn is None:
return False
self.write_debug('Loading archive file %r\n' % fn)
@@ -934,10 +933,11 @@ class YoutubeDL(object):
if info_dict.get('resolution') is None:
info_dict['resolution'] = self.format_resolution(info_dict, default=None)
# For fields playlist_index and autonumber convert all occurrences
# For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
# of %(field)s to %(field)0Nd for backward compatibility
field_size_compat_map = {
'playlist_index': len(str(info_dict.get('_last_playlist_index') or '')),
'playlist_autonumber': len(str(info_dict.get('n_entries') or '')),
'autonumber': self.params.get('autonumber_size') or 5,
}

View File

@@ -513,6 +513,7 @@ def _real_main(argv=None):
'add_chapters': opts.addchapters,
'add_metadata': opts.addmetadata,
})
# Note: Deprecated
# This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment
# but must be below EmbedSubtitle and FFmpegMetadata
# See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29

View File

@@ -50,6 +50,7 @@ class Cache(object):
except OSError as ose:
if ose.errno != errno.EEXIST:
raise
self._ydl.write_debug(f'Saving {section}.{key} to cache')
write_json_file(data, fn)
except Exception:
tb = traceback.format_exc()
@@ -66,6 +67,7 @@ class Cache(object):
try:
try:
with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
self._ydl.write_debug(f'Loading {section}.{key} from cache')
return json.load(cachef)
except ValueError:
try:

View File

@@ -33,6 +33,8 @@ class compat_HTMLParseError(Exception):
pass
# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE
# will not work since ctypes.WINFUNCTYPE does not exist in UNIX machines
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
return ctypes.WINFUNCTYPE(*args, **kwargs)

View File

@@ -406,6 +406,10 @@ class InfoExtractor(object):
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Subclasses may also override suitable() if necessary, but ensure the function
signature is preserved and that this function imports everything it needs
(except other extractors), so that lazy_extractors works correctly
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
@@ -421,7 +425,7 @@ class InfoExtractor(object):
will be used by geo restriction bypass mechanism similarly
to _GEO_COUNTRIES.
Finally, the _WORKING attribute should be set to False for broken IEs
The _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""

View File

@@ -621,7 +621,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
return delegated_sid
sync_ids = (try_get(
data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
lambda x: x['DATASYNC_ID']), compat_str) or '').split("||")
lambda x: x['DATASYNC_ID']), compat_str) or '').split('||')
if len(sync_ids) >= 2 and sync_ids[1]:
# datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
# and just "user_syncid||" for primary channel. We only want the channel_syncid

View File

@@ -261,7 +261,7 @@ def parseOpts(overrideArguments=None):
general.add_option(
'--mark-watched',
action='store_true', dest='mark_watched', default=False,
help='Mark videos watched (YouTube only)')
help='Mark videos watched (even with --simulate). Currently only supported for YouTube')
general.add_option(
'--no-mark-watched',
action='store_false', dest='mark_watched',
@@ -768,7 +768,7 @@ def parseOpts(overrideArguments=None):
dest='encoding', metavar='ENCODING',
help='Force the specified encoding (experimental)')
workarounds.add_option(
'--no-check-certificate',
'--no-check-certificates',
action='store_true', dest='no_check_certificate', default=False,
help='Suppress HTTPS certificate validation')
workarounds.add_option(

View File

@@ -478,7 +478,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
class FFmpegVideoConvertorPP(FFmpegPostProcessor):
SUPPORTED_EXTS = ('mp4', 'mkv', 'flv', 'webm', 'mov', 'avi', 'mp3', 'mka', 'm4a', 'ogg', 'opus')
FORMAT_RE = re.compile(r'{0}(?:/{0})*$'.format(r'(?:\w+>)?(?:%s)' % '|'.join(SUPPORTED_EXTS)))
_action = 'converting'
_ACTION = 'converting'
def __init__(self, downloader=None, preferedformat=None):
super(FFmpegVideoConvertorPP, self).__init__(downloader)
@@ -497,29 +497,28 @@ class FFmpegVideoConvertorPP(FFmpegPostProcessor):
return []
@PostProcessor._restrict_to(images=False)
def run(self, information):
path, source_ext = information['filepath'], information['ext'].lower()
def run(self, info):
filename, source_ext = info['filepath'], info['ext'].lower()
target_ext = self._target_ext(source_ext)
_skip_msg = (
'could not find a mapping for %s' if not target_ext
else 'already is in target format %s' if source_ext == target_ext
f'could not find a mapping for {source_ext}' if not target_ext
else f'already is in target format {source_ext}' if source_ext == target_ext
else None)
if _skip_msg:
self.to_screen('Not %s media file "%s"; %s' % (self._action, path, _skip_msg % source_ext))
return [], information
self.to_screen(f'Not {self._ACTION} media file {filename!r}; {_skip_msg}')
return [], info
prefix, sep, oldext = path.rpartition('.')
outpath = prefix + sep + target_ext
self.to_screen('%s video from %s to %s; Destination: %s' % (self._action.title(), source_ext, target_ext, outpath))
self.run_ffmpeg(path, outpath, self._options(target_ext))
outpath = replace_extension(filename, target_ext, source_ext)
self.to_screen(f'{self._ACTION.title()} video from {source_ext} to {target_ext}; Destination: {outpath}')
self.run_ffmpeg(filename, outpath, self._options(target_ext))
information['filepath'] = outpath
information['format'] = information['ext'] = target_ext
return [path], information
info['filepath'] = outpath
info['format'] = info['ext'] = target_ext
return [filename], info
class FFmpegVideoRemuxerPP(FFmpegVideoConvertorPP):
_action = 'remuxing'
_ACTION = 'remuxing'
@staticmethod
def _options(target_ext):

View File

@@ -4,7 +4,7 @@ from hashlib import sha256
from .ffmpeg import FFmpegPostProcessor
from ..compat import compat_urllib_parse_urlencode, compat_HTTPError
from ..utils import PostProcessingError, sanitized_Request
from ..utils import PostProcessingError, network_exceptions, sanitized_Request
class SponsorBlockPP(FFmpegPostProcessor):
@@ -88,9 +88,9 @@ class SponsorBlockPP(FFmpegPostProcessor):
self.write_debug(f'SponsorBlock query: {url}')
try:
rsp = self._downloader.urlopen(sanitized_Request(url))
except compat_HTTPError as e:
if e.code == 404:
except network_exceptions as e:
if isinstance(e, compat_HTTPError) and e.code == 404:
return []
raise PostProcessingError(f'Error communicating with SponsorBlock API - {e}')
raise PostProcessingError(f'Unable to communicate with SponsorBlock API - {e}')
return json.loads(rsp.read().decode(rsp.info().get_param('charset') or 'utf-8'))