mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-15 12:45:27 +00:00
@@ -272,7 +272,7 @@ class YoutubeDL:
|
||||
subtitleslangs: List of languages of the subtitles to download (can be regex).
|
||||
The list may contain "all" to refer to all the available
|
||||
subtitles. The language can be prefixed with a "-" to
|
||||
exclude it from the requested languages. Eg: ['all', '-live_chat']
|
||||
exclude it from the requested languages, e.g. ['all', '-live_chat']
|
||||
keepvideo: Keep the video file after post-processing
|
||||
daterange: A DateRange object, download only if the upload_date is in the range.
|
||||
skip_download: Skip the actual download of the video file
|
||||
@@ -302,7 +302,7 @@ class YoutubeDL:
|
||||
cookiefile: File name or text stream from where cookies should be read and dumped to
|
||||
cookiesfrombrowser: A tuple containing the name of the browser, the profile
|
||||
name/pathfrom where cookies are loaded, and the name of the
|
||||
keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
|
||||
keyring, e.g. ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
|
||||
legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
|
||||
support RFC 5746 secure renegotiation
|
||||
nocheckcertificate: Do not verify SSL certificates
|
||||
@@ -470,7 +470,7 @@ class YoutubeDL:
|
||||
discontinuities such as ad breaks (default: False)
|
||||
extractor_args: A dictionary of arguments to be passed to the extractors.
|
||||
See "EXTRACTOR ARGUMENTS" for details.
|
||||
Eg: {'youtube': {'skip': ['dash', 'hls']}}
|
||||
E.g. {'youtube': {'skip': ['dash', 'hls']}}
|
||||
mark_watched: Mark videos watched (even with --simulate). Only for YouTube
|
||||
|
||||
The following options are deprecated and may be removed in the future:
|
||||
@@ -1046,7 +1046,7 @@ class YoutubeDL:
|
||||
|
||||
# outtmpl should be expand_path'ed before template dict substitution
|
||||
# because meta fields may contain env variables we don't want to
|
||||
# be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
|
||||
# be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
|
||||
# title "Hello $PATH", we don't want `$PATH` to be expanded.
|
||||
return expand_path(outtmpl).replace(sep, '')
|
||||
|
||||
@@ -1977,8 +1977,8 @@ class YoutubeDL:
|
||||
filter_parts.append(string)
|
||||
|
||||
def _remove_unused_ops(tokens):
|
||||
# Remove operators that we don't use and join them with the surrounding strings
|
||||
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
|
||||
# Remove operators that we don't use and join them with the surrounding strings.
|
||||
# E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
|
||||
ALLOWED_OPS = ('/', '+', ',', '(', ')')
|
||||
last_string, last_start, last_end, last_line = None, None, None, None
|
||||
for type, string, start, end, line in tokens:
|
||||
|
||||
@@ -184,7 +184,7 @@ def build_fragments_list(boot_info):
|
||||
first_frag_number = fragment_run_entry_table[0]['first']
|
||||
fragments_counter = itertools.count(first_frag_number)
|
||||
for segment, fragments_count in segment_run_table['segment_run']:
|
||||
# In some live HDS streams (for example Rai), `fragments_count` is
|
||||
# In some live HDS streams (e.g. Rai), `fragments_count` is
|
||||
# abnormal and causing out-of-memory errors. It's OK to change the
|
||||
# number of fragments for live streams as they are updated periodically
|
||||
if fragments_count == 4294967295 and boot_info['live']:
|
||||
|
||||
@@ -365,7 +365,7 @@ class AbemaTVIE(AbemaTVBaseIE):
|
||||
# read breadcrumb on top of page
|
||||
breadcrumb = self._extract_breadcrumb_list(webpage, video_id)
|
||||
if breadcrumb:
|
||||
# breadcrumb list translates to: (example is 1st test for this IE)
|
||||
# breadcrumb list translates to: (e.g. 1st test for this IE)
|
||||
# Home > Anime (genre) > Isekai Shokudo 2 (series name) > Episode 1 "Cheese cakes" "Morning again" (episode title)
|
||||
# hence this works
|
||||
info['series'] = breadcrumb[-2]
|
||||
|
||||
@@ -331,7 +331,7 @@ class InfoExtractor:
|
||||
playable_in_embed: Whether this video is allowed to play in embedded
|
||||
players on other sites. Can be True (=always allowed),
|
||||
False (=never allowed), None (=unknown), or a string
|
||||
specifying the criteria for embedability (Eg: 'whitelist')
|
||||
specifying the criteria for embedability; e.g. 'whitelist'
|
||||
availability: Under what condition the video is available. One of
|
||||
'private', 'premium_only', 'subscriber_only', 'needs_auth',
|
||||
'unlisted' or 'public'. Use 'InfoExtractor._availability'
|
||||
@@ -452,8 +452,8 @@ class InfoExtractor:
|
||||
|
||||
_extract_from_webpage may raise self.StopExtraction() to stop further
|
||||
processing of the webpage and obtain exclusive rights to it. This is useful
|
||||
when the extractor cannot reliably be matched using just the URL.
|
||||
Eg: invidious/peertube instances
|
||||
when the extractor cannot reliably be matched using just the URL,
|
||||
e.g. invidious/peertube instances
|
||||
|
||||
Embed-only extractors can be defined by setting _VALID_URL = False.
|
||||
|
||||
@@ -2367,7 +2367,7 @@ class InfoExtractor:
|
||||
audio_group_id = last_stream_inf.get('AUDIO')
|
||||
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
|
||||
# references a rendition group MUST have a CODECS attribute.
|
||||
# However, this is not always respected, for example, [2]
|
||||
# However, this is not always respected. E.g. [2]
|
||||
# contains EXT-X-STREAM-INF tag which references AUDIO
|
||||
# rendition group but does not have CODECS and despite
|
||||
# referencing an audio group it represents a complete
|
||||
@@ -3003,8 +3003,8 @@ class InfoExtractor:
|
||||
segment_number += 1
|
||||
segment_time += segment_d
|
||||
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
|
||||
# No media template
|
||||
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
|
||||
# No media template,
|
||||
# e.g. https://www.youtube.com/watch?v=iXZV5uAYMJI
|
||||
# or any YouTube dashsegments video
|
||||
fragments = []
|
||||
segment_index = 0
|
||||
@@ -3021,7 +3021,7 @@ class InfoExtractor:
|
||||
representation_ms_info['fragments'] = fragments
|
||||
elif 'segment_urls' in representation_ms_info:
|
||||
# Segment URLs with no SegmentTimeline
|
||||
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
|
||||
# E.g. https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
|
||||
# https://github.com/ytdl-org/youtube-dl/pull/14844
|
||||
fragments = []
|
||||
segment_duration = float_or_none(
|
||||
@@ -3249,8 +3249,8 @@ class InfoExtractor:
|
||||
media_tags.extend(re.findall(
|
||||
# We only allow video|audio followed by a whitespace or '>'.
|
||||
# Allowing more characters may end up in significant slow down (see
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
|
||||
# http://www.porntrex.com/maps/videositemap.xml).
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/11979,
|
||||
# e.g. http://www.porntrex.com/maps/videositemap.xml).
|
||||
r'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE, webpage))
|
||||
for media_tag, _, media_type, media_content in media_tags:
|
||||
media_info = {
|
||||
@@ -3706,7 +3706,7 @@ class InfoExtractor:
|
||||
desc += f'; "{cls.SEARCH_KEY}:" prefix'
|
||||
if search_examples:
|
||||
_COUNTS = ('', '5', '10', 'all')
|
||||
desc += f' (Example: "{cls.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(search_examples)}")'
|
||||
desc += f' (e.g. "{cls.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(search_examples)}")'
|
||||
if not cls.working():
|
||||
desc += ' (**Currently broken**)' if markdown else ' (Currently broken)'
|
||||
|
||||
|
||||
@@ -3035,7 +3035,7 @@ class GenericIE(InfoExtractor):
|
||||
self.report_detected('Twitter card')
|
||||
if not found:
|
||||
# We look for Open Graph info:
|
||||
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
|
||||
# We have to match any number spaces between elements, some sites try to align them, e.g.: statigr.am
|
||||
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
|
||||
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
|
||||
if m_video_type is not None:
|
||||
|
||||
@@ -169,7 +169,7 @@ class PhantomJSwrapper:
|
||||
In most cases you don't need to add any `jscode`.
|
||||
It is executed in `page.onLoadFinished`.
|
||||
`saveAndExit();` is mandatory, use it instead of `phantom.exit()`
|
||||
It is possible to wait for some element on the webpage, for example:
|
||||
It is possible to wait for some element on the webpage, e.g.
|
||||
var check = function() {
|
||||
var elementFound = page.evaluate(function() {
|
||||
return document.querySelector('#b.done') !== null;
|
||||
|
||||
@@ -3247,9 +3247,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
else -10 if 'descriptive' in (audio_track.get('displayName') or '').lower() and -10
|
||||
else -1)
|
||||
# Some formats may have much smaller duration than others (possibly damaged during encoding)
|
||||
# Eg: 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
|
||||
# E.g. 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
|
||||
# Make sure to avoid false positives with small duration differences.
|
||||
# Eg: __2ABJjxzNo, ySuUZEjARPY
|
||||
# E.g. __2ABJjxzNo, ySuUZEjARPY
|
||||
is_damaged = try_get(fmt, lambda x: float(x['approxDurationMs']) / duration < 500)
|
||||
if is_damaged:
|
||||
self.report_warning(
|
||||
@@ -5834,7 +5834,7 @@ class YoutubeSearchURLIE(YoutubeTabBaseInfoExtractor):
|
||||
|
||||
|
||||
class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor):
|
||||
IE_DESC = 'YouTube music search URLs with selectable sections (Eg: #songs)'
|
||||
IE_DESC = 'YouTube music search URLs with selectable sections, e.g. #songs'
|
||||
IE_NAME = 'youtube:music:search_url'
|
||||
_VALID_URL = r'https?://music\.youtube\.com/search\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
|
||||
_TESTS = [{
|
||||
|
||||
@@ -34,7 +34,7 @@ def format_text(text, f):
|
||||
'''
|
||||
@param f String representation of formatting to apply in the form:
|
||||
[style] [light] font_color [on [light] bg_color]
|
||||
Eg: "red", "bold green on light blue"
|
||||
E.g. "red", "bold green on light blue"
|
||||
'''
|
||||
f = f.upper()
|
||||
tokens = f.strip().split()
|
||||
|
||||
@@ -77,7 +77,7 @@ def parseOpts(overrideArguments=None, ignore_config_files='if_override'):
|
||||
if root.parse_known_args()[0].ignoreconfig:
|
||||
return False
|
||||
# Multiple package names can be given here
|
||||
# Eg: ('yt-dlp', 'youtube-dlc', 'youtube-dl') will look for
|
||||
# E.g. ('yt-dlp', 'youtube-dlc', 'youtube-dl') will look for
|
||||
# the configuration file of any of these three packages
|
||||
for package in ('yt-dlp',):
|
||||
if user:
|
||||
@@ -374,7 +374,7 @@ def create_parser():
|
||||
dest='default_search', metavar='PREFIX',
|
||||
help=(
|
||||
'Use this prefix for unqualified URLs. '
|
||||
'Eg: "gvsearch2:python" downloads two videos from google videos for the search term "python". '
|
||||
'E.g. "gvsearch2:python" downloads two videos from google videos for the search term "python". '
|
||||
'Use the value "auto" to let yt-dlp guess ("auto_warning" to emit a warning when guessing). '
|
||||
'"error" just throws an error. The default value "fixup_error" repairs broken URLs, '
|
||||
'but emits an error if this is not possible instead of searching'))
|
||||
@@ -459,7 +459,7 @@ def create_parser():
|
||||
help=(
|
||||
'Create aliases for an option string. Unless an alias starts with a dash "-", it is prefixed with "--". '
|
||||
'Arguments are parsed according to the Python string formatting mini-language. '
|
||||
'Eg: --alias get-audio,-X "-S=aext:{0},abr -x --audio-format {0}" creates options '
|
||||
'E.g. --alias get-audio,-X "-S=aext:{0},abr -x --audio-format {0}" creates options '
|
||||
'"--get-audio" and "-X" that takes an argument (ARG0) and expands to '
|
||||
'"-S=aext:ARG0,abr -x --audio-format ARG0". All defined aliases are listed in the --help output. '
|
||||
'Alias options can trigger more aliases; so be careful to avoid defining recursive options. '
|
||||
@@ -471,8 +471,8 @@ def create_parser():
|
||||
'--proxy', dest='proxy',
|
||||
default=None, metavar='URL',
|
||||
help=(
|
||||
'Use the specified HTTP/HTTPS/SOCKS proxy. To enable SOCKS proxy, specify a proper scheme. '
|
||||
'Eg: socks5://user:pass@127.0.0.1:1080/. Pass in an empty string (--proxy "") for direct connection'))
|
||||
'Use the specified HTTP/HTTPS/SOCKS proxy. To enable SOCKS proxy, specify a proper scheme, '
|
||||
'e.g. socks5://user:pass@127.0.0.1:1080/. Pass in an empty string (--proxy "") for direct connection'))
|
||||
network.add_option(
|
||||
'--socket-timeout',
|
||||
dest='socket_timeout', type=float, default=None, metavar='SECONDS',
|
||||
@@ -537,7 +537,7 @@ def create_parser():
|
||||
'Comma separated playlist_index of the videos to download. '
|
||||
'You can specify a range using "[START]:[STOP][:STEP]". For backward compatibility, START-STOP is also supported. '
|
||||
'Use negative indices to count from the right and negative STEP to download in reverse order. '
|
||||
'Eg: "-I 1:3,7,-5::2" used on a playlist of size 15 will download the videos at index 1,2,3,7,11,13,15'))
|
||||
'E.g. "-I 1:3,7,-5::2" used on a playlist of size 15 will download the videos at index 1,2,3,7,11,13,15'))
|
||||
selection.add_option(
|
||||
'--match-title',
|
||||
dest='matchtitle', metavar='REGEX',
|
||||
@@ -549,17 +549,17 @@ def create_parser():
|
||||
selection.add_option(
|
||||
'--min-filesize',
|
||||
metavar='SIZE', dest='min_filesize', default=None,
|
||||
help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
|
||||
help='Do not download any videos smaller than SIZE, e.g. 50k or 44.6M')
|
||||
selection.add_option(
|
||||
'--max-filesize',
|
||||
metavar='SIZE', dest='max_filesize', default=None,
|
||||
help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
|
||||
help='Do not download any videos larger than SIZE, e.g. 50k or 44.6M')
|
||||
selection.add_option(
|
||||
'--date',
|
||||
metavar='DATE', dest='date', default=None,
|
||||
help=(
|
||||
'Download only videos uploaded on this date. The date can be "YYYYMMDD" or in the format '
|
||||
'[now|today|yesterday][-N[day|week|month|year]]. Eg: --date today-2weeks'))
|
||||
'[now|today|yesterday][-N[day|week|month|year]]. E.g. --date today-2weeks'))
|
||||
selection.add_option(
|
||||
'--datebefore',
|
||||
metavar='DATE', dest='datebefore', default=None,
|
||||
@@ -589,7 +589,7 @@ def create_parser():
|
||||
'You can also simply specify a field to match if the field is present, '
|
||||
'use "!field" to check if the field is not present, and "&" to check multiple conditions. '
|
||||
'Use a "\\" to escape "&" or quotes if needed. If used multiple times, '
|
||||
'the filter matches if atleast one of the conditions are met. Eg: --match-filter '
|
||||
'the filter matches if atleast one of the conditions are met. E.g. --match-filter '
|
||||
'!is_live --match-filter "like_count>?100 & description~=\'(?i)\\bcats \\& dogs\\b\'" '
|
||||
'matches only videos that are not live OR those that have a like count more than 100 '
|
||||
'(or the like field is not available) and also has a description '
|
||||
@@ -785,7 +785,7 @@ def create_parser():
|
||||
'--merge-output-format',
|
||||
action='store', dest='merge_output_format', metavar='FORMAT', default=None,
|
||||
help=(
|
||||
'Containers that may be used when merging formats, separated by "/" (Eg: "mp4/mkv"). '
|
||||
'Containers that may be used when merging formats, separated by "/", e.g. "mp4/mkv". '
|
||||
'Ignored if no merge is required. '
|
||||
f'(currently supported: {", ".join(sorted(FFmpegMergerPP.SUPPORTED_EXTS))})'))
|
||||
video_format.add_option(
|
||||
@@ -825,14 +825,14 @@ def create_parser():
|
||||
subtitles.add_option(
|
||||
'--sub-format',
|
||||
action='store', dest='subtitlesformat', metavar='FORMAT', default='best',
|
||||
help='Subtitle format; accepts formats preference, Eg: "srt" or "ass/srt/best"')
|
||||
help='Subtitle format; accepts formats preference, e.g. "srt" or "ass/srt/best"')
|
||||
subtitles.add_option(
|
||||
'--sub-langs', '--srt-langs',
|
||||
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
|
||||
default=[], callback=_list_from_options_callback,
|
||||
help=(
|
||||
'Languages of the subtitles to download (can be regex) or "all" separated by commas. (Eg: --sub-langs "en.*,ja") '
|
||||
'You can prefix the language code with a "-" to exclude it from the requested languages. (Eg: --sub-langs all,-live_chat) '
|
||||
'Languages of the subtitles to download (can be regex) or "all" separated by commas, e.g. --sub-langs "en.*,ja". '
|
||||
'You can prefix the language code with a "-" to exclude it from the requested languages, e.g. --sub-langs all,-live_chat. '
|
||||
'Use --list-subs for a list of available language tags'))
|
||||
|
||||
downloader = optparse.OptionGroup(parser, 'Download Options')
|
||||
@@ -843,11 +843,11 @@ def create_parser():
|
||||
downloader.add_option(
|
||||
'-r', '--limit-rate', '--rate-limit',
|
||||
dest='ratelimit', metavar='RATE',
|
||||
help='Maximum download rate in bytes per second (e.g. 50K or 4.2M)')
|
||||
help='Maximum download rate in bytes per second, e.g. 50K or 4.2M')
|
||||
downloader.add_option(
|
||||
'--throttled-rate',
|
||||
dest='throttledratelimit', metavar='RATE',
|
||||
help='Minimum download rate in bytes per second below which throttling is assumed and the video data is re-extracted (e.g. 100K)')
|
||||
help='Minimum download rate in bytes per second below which throttling is assumed and the video data is re-extracted, e.g. 100K')
|
||||
downloader.add_option(
|
||||
'-R', '--retries',
|
||||
dest='retries', metavar='RETRIES', default=10,
|
||||
@@ -871,8 +871,8 @@ def create_parser():
|
||||
'Time to sleep between retries in seconds (optionally) prefixed by the type of retry '
|
||||
'(http (default), fragment, file_access, extractor) to apply the sleep to. '
|
||||
'EXPR can be a number, linear=START[:END[:STEP=1]] or exp=START[:END[:BASE=2]]. '
|
||||
'This option can be used multiple times to set the sleep for the different retry types. '
|
||||
'Eg: --retry-sleep linear=1::2 --retry-sleep fragment:exp=1:20'))
|
||||
'This option can be used multiple times to set the sleep for the different retry types, '
|
||||
'e.g. --retry-sleep linear=1::2 --retry-sleep fragment:exp=1:20'))
|
||||
downloader.add_option(
|
||||
'--skip-unavailable-fragments', '--no-abort-on-unavailable-fragment',
|
||||
action='store_true', dest='skip_unavailable_fragments', default=True,
|
||||
@@ -892,7 +892,7 @@ def create_parser():
|
||||
downloader.add_option(
|
||||
'--buffer-size',
|
||||
dest='buffersize', metavar='SIZE', default='1024',
|
||||
help='Size of download buffer (e.g. 1024 or 16K) (default is %default)')
|
||||
help='Size of download buffer, e.g. 1024 or 16K (default is %default)')
|
||||
downloader.add_option(
|
||||
'--resize-buffer',
|
||||
action='store_false', dest='noresizebuffer',
|
||||
@@ -905,7 +905,7 @@ def create_parser():
|
||||
'--http-chunk-size',
|
||||
dest='http_chunk_size', metavar='SIZE', default=None,
|
||||
help=(
|
||||
'Size of a chunk for chunk-based HTTP downloading (e.g. 10485760 or 10M) (default is disabled). '
|
||||
'Size of a chunk for chunk-based HTTP downloading, e.g. 10485760 or 10M (default is disabled). '
|
||||
'May be useful for bypassing bandwidth throttling imposed by a webserver (experimental)'))
|
||||
downloader.add_option(
|
||||
'--test',
|
||||
@@ -963,8 +963,8 @@ def create_parser():
|
||||
help=(
|
||||
'Download only chapters whose title matches the given regular expression. '
|
||||
'Time ranges prefixed by a "*" can also be used in place of chapters to download the specified range. '
|
||||
'Eg: --download-sections "*10:15-15:00" --download-sections "intro". '
|
||||
'Needs ffmpeg. This option can be used multiple times to download multiple sections'))
|
||||
'Needs ffmpeg. This option can be used multiple times to download multiple sections, '
|
||||
'e.g. --download-sections "*10:15-15:00" --download-sections "intro"'))
|
||||
downloader.add_option(
|
||||
'--downloader', '--external-downloader',
|
||||
dest='external_downloader', metavar='[PROTO:]NAME', default={}, type='str',
|
||||
@@ -978,7 +978,7 @@ def create_parser():
|
||||
'the protocols (http, ftp, m3u8, dash, rstp, rtmp, mms) to use it for. '
|
||||
f'Currently supports native, {", ".join(sorted(list_external_downloaders()))}. '
|
||||
'You can use this option multiple times to set different downloaders for different protocols. '
|
||||
'For example, --downloader aria2c --downloader "dash,m3u8:native" will use '
|
||||
'E.g. --downloader aria2c --downloader "dash,m3u8:native" will use '
|
||||
'aria2c for http/ftp downloads, and the native downloader for dash/m3u8 downloads '
|
||||
'(Alias: --external-downloader)'))
|
||||
downloader.add_option(
|
||||
@@ -1188,7 +1188,7 @@ def create_parser():
|
||||
'Template for progress outputs, optionally prefixed with one of "download:" (default), '
|
||||
'"download-title:" (the console title), "postprocess:", or "postprocess-title:". '
|
||||
'The video\'s fields are accessible under the "info" key and '
|
||||
'the progress attributes are accessible under "progress" key. E.g.: '
|
||||
'the progress attributes are accessible under "progress" key. E.g. '
|
||||
# TODO: Document the fields inside "progress"
|
||||
'--console-title --progress-template "download-title:%(info.id)s-%(progress.eta)s"'))
|
||||
verbosity.add_option(
|
||||
@@ -1488,7 +1488,7 @@ def create_parser():
|
||||
'Remux the video into another container if necessary '
|
||||
f'(currently supported: {", ".join(FFmpegVideoRemuxerPP.SUPPORTED_EXTS)}). '
|
||||
'If target container does not support the video/audio codec, remuxing will fail. You can specify multiple rules; '
|
||||
'Eg. "aac>m4a/mov>mp4/mkv" will remux aac to m4a, mov to mp4 and anything else to mkv'))
|
||||
'e.g. "aac>m4a/mov>mp4/mkv" will remux aac to m4a, mov to mp4 and anything else to mkv'))
|
||||
postproc.add_option(
|
||||
'--recode-video',
|
||||
metavar='FORMAT', dest='recodevideo', default=None,
|
||||
@@ -1513,7 +1513,7 @@ def create_parser():
|
||||
'You can also specify "PP+EXE:ARGS" to give the arguments to the specified executable '
|
||||
'only when being used by the specified postprocessor. Additionally, for ffmpeg/ffprobe, '
|
||||
'"_i"/"_o" can be appended to the prefix optionally followed by a number to pass the argument '
|
||||
'before the specified input/output file. Eg: --ppa "Merger+ffmpeg_i1:-v quiet". '
|
||||
'before the specified input/output file, e.g. --ppa "Merger+ffmpeg_i1:-v quiet". '
|
||||
'You can use this option multiple times to give different arguments to different '
|
||||
'postprocessors. (Alias: --ppa)'))
|
||||
postproc.add_option(
|
||||
@@ -1729,7 +1729,7 @@ def create_parser():
|
||||
'SponsorBlock categories to create chapters for, separated by commas. '
|
||||
f'Available categories are {", ".join(SponsorBlockPP.CATEGORIES.keys())}, all and default (=all). '
|
||||
'You can prefix the category with a "-" to exclude it. See [1] for description of the categories. '
|
||||
'Eg: --sponsorblock-mark all,-preview [1] https://wiki.sponsor.ajay.app/w/Segment_Categories'))
|
||||
'E.g. --sponsorblock-mark all,-preview [1] https://wiki.sponsor.ajay.app/w/Segment_Categories'))
|
||||
sponsorblock.add_option(
|
||||
'--sponsorblock-remove', metavar='CATS',
|
||||
dest='sponsorblock_remove', default=set(), action='callback', type='str',
|
||||
|
||||
@@ -610,7 +610,7 @@ def sanitize_open(filename, open_mode):
|
||||
if sys.platform == 'win32':
|
||||
import msvcrt
|
||||
|
||||
# stdout may be any IO stream. Eg, when using contextlib.redirect_stdout
|
||||
# stdout may be any IO stream, e.g. when using contextlib.redirect_stdout
|
||||
with contextlib.suppress(io.UnsupportedOperation):
|
||||
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
|
||||
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
|
||||
@@ -786,8 +786,8 @@ def _htmlentity_transform(entity_with_semicolon):
|
||||
if entity in html.entities.name2codepoint:
|
||||
return chr(html.entities.name2codepoint[entity])
|
||||
|
||||
# TODO: HTML5 allows entities without a semicolon. For example,
|
||||
# 'Éric' should be decoded as 'Éric'.
|
||||
# TODO: HTML5 allows entities without a semicolon.
|
||||
# E.g. 'Éric' should be decoded as 'Éric'.
|
||||
if entity_with_semicolon in html.entities.html5:
|
||||
return html.entities.html5[entity_with_semicolon]
|
||||
|
||||
|
||||
Reference in New Issue
Block a user