mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-10-17 07:48:36 +00:00
parent
22ea0688ed
commit
ba80446855
@ -8,7 +8,7 @@ def main():
|
|||||||
return # This is unused in yt-dlp
|
return # This is unused in yt-dlp
|
||||||
|
|
||||||
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
||||||
options, args = parser.parse_args()
|
_, args = parser.parse_args()
|
||||||
if len(args) != 2:
|
if len(args) != 2:
|
||||||
parser.error('Expected an input and an output filename')
|
parser.error('Expected an input and an output filename')
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ dev = [
|
|||||||
]
|
]
|
||||||
static-analysis = [
|
static-analysis = [
|
||||||
"autopep8~=2.0",
|
"autopep8~=2.0",
|
||||||
"ruff~=0.12.0",
|
"ruff~=0.13.0",
|
||||||
]
|
]
|
||||||
test = [
|
test = [
|
||||||
"pytest~=8.1",
|
"pytest~=8.1",
|
||||||
|
@ -1945,7 +1945,7 @@ def test_response_with_expected_status_returns_content(self):
|
|||||||
server_thread.daemon = True
|
server_thread.daemon = True
|
||||||
server_thread.start()
|
server_thread.start()
|
||||||
|
|
||||||
(content, urlh) = self.ie._download_webpage_handle(
|
content, _ = self.ie._download_webpage_handle(
|
||||||
f'http://127.0.0.1:{port}/teapot', None,
|
f'http://127.0.0.1:{port}/teapot', None,
|
||||||
expected_status=TEAPOT_RESPONSE_STATUS)
|
expected_status=TEAPOT_RESPONSE_STATUS)
|
||||||
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
|
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
|
||||||
|
@ -29,7 +29,7 @@ def test_default_overwrites(self):
|
|||||||
'-o', 'test.webm',
|
'-o', 'test.webm',
|
||||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||||
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
sout, _ = outp.communicate()
|
||||||
self.assertTrue(b'has already been downloaded' in sout)
|
self.assertTrue(b'has already been downloaded' in sout)
|
||||||
# if the file has no content, it has not been redownloaded
|
# if the file has no content, it has not been redownloaded
|
||||||
self.assertTrue(os.path.getsize(download_file) < 1)
|
self.assertTrue(os.path.getsize(download_file) < 1)
|
||||||
@ -41,7 +41,7 @@ def test_yes_overwrites(self):
|
|||||||
'-o', 'test.webm',
|
'-o', 'test.webm',
|
||||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||||
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
sout, _ = outp.communicate()
|
||||||
self.assertTrue(b'has already been downloaded' not in sout)
|
self.assertTrue(b'has already been downloaded' not in sout)
|
||||||
# if the file has no content, it has not been redownloaded
|
# if the file has no content, it has not been redownloaded
|
||||||
self.assertTrue(os.path.getsize(download_file) > 1)
|
self.assertTrue(os.path.getsize(download_file) > 1)
|
||||||
|
@ -153,7 +153,7 @@ def test_provider_unsupported_proxy_scheme(self, ie, logger, pot_request):
|
|||||||
|
|
||||||
with pytest.raises(
|
with pytest.raises(
|
||||||
PoTokenProviderRejectedRequest,
|
PoTokenProviderRejectedRequest,
|
||||||
match='External requests by "example" provider do not support proxy scheme "socks4". Supported proxy '
|
match=r'External requests by "example" provider do not support proxy scheme "socks4"\. Supported proxy '
|
||||||
'schemes: http, socks5h',
|
'schemes: http, socks5h',
|
||||||
):
|
):
|
||||||
provider.request_pot(pot_request)
|
provider.request_pot(pot_request)
|
||||||
|
@ -22,7 +22,7 @@ def test_private_info_arg(self):
|
|||||||
'--username', 'johnsmith@gmail.com',
|
'--username', 'johnsmith@gmail.com',
|
||||||
'--password', 'my_secret_password',
|
'--password', 'my_secret_password',
|
||||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
_, serr = outp.communicate()
|
||||||
self.assertTrue(b'--username' in serr)
|
self.assertTrue(b'--username' in serr)
|
||||||
self.assertTrue(b'johnsmith' not in serr)
|
self.assertTrue(b'johnsmith' not in serr)
|
||||||
self.assertTrue(b'--password' in serr)
|
self.assertTrue(b'--password' in serr)
|
||||||
@ -36,7 +36,7 @@ def test_private_info_shortarg(self):
|
|||||||
'-u', 'johnsmith@gmail.com',
|
'-u', 'johnsmith@gmail.com',
|
||||||
'-p', 'my_secret_password',
|
'-p', 'my_secret_password',
|
||||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
_, serr = outp.communicate()
|
||||||
self.assertTrue(b'-u' in serr)
|
self.assertTrue(b'-u' in serr)
|
||||||
self.assertTrue(b'johnsmith' not in serr)
|
self.assertTrue(b'johnsmith' not in serr)
|
||||||
self.assertTrue(b'-p' in serr)
|
self.assertTrue(b'-p' in serr)
|
||||||
@ -50,7 +50,7 @@ def test_private_info_eq(self):
|
|||||||
'--username=johnsmith@gmail.com',
|
'--username=johnsmith@gmail.com',
|
||||||
'--password=my_secret_password',
|
'--password=my_secret_password',
|
||||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
_, serr = outp.communicate()
|
||||||
self.assertTrue(b'--username' in serr)
|
self.assertTrue(b'--username' in serr)
|
||||||
self.assertTrue(b'johnsmith' not in serr)
|
self.assertTrue(b'johnsmith' not in serr)
|
||||||
self.assertTrue(b'--password' in serr)
|
self.assertTrue(b'--password' in serr)
|
||||||
@ -64,7 +64,7 @@ def test_private_info_shortarg_eq(self):
|
|||||||
'-u=johnsmith@gmail.com',
|
'-u=johnsmith@gmail.com',
|
||||||
'-p=my_secret_password',
|
'-p=my_secret_password',
|
||||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
_, serr = outp.communicate()
|
||||||
self.assertTrue(b'-u' in serr)
|
self.assertTrue(b'-u' in serr)
|
||||||
self.assertTrue(b'johnsmith' not in serr)
|
self.assertTrue(b'johnsmith' not in serr)
|
||||||
self.assertTrue(b'-p' in serr)
|
self.assertTrue(b'-p' in serr)
|
||||||
|
@ -149,14 +149,14 @@ def read_abst(self):
|
|||||||
segments_count = self.read_unsigned_char()
|
segments_count = self.read_unsigned_char()
|
||||||
segments = []
|
segments = []
|
||||||
for _ in range(segments_count):
|
for _ in range(segments_count):
|
||||||
box_size, box_type, box_data = self.read_box_info()
|
_box_size, box_type, box_data = self.read_box_info()
|
||||||
assert box_type == b'asrt'
|
assert box_type == b'asrt'
|
||||||
segment = FlvReader(box_data).read_asrt()
|
segment = FlvReader(box_data).read_asrt()
|
||||||
segments.append(segment)
|
segments.append(segment)
|
||||||
fragments_run_count = self.read_unsigned_char()
|
fragments_run_count = self.read_unsigned_char()
|
||||||
fragments = []
|
fragments = []
|
||||||
for _ in range(fragments_run_count):
|
for _ in range(fragments_run_count):
|
||||||
box_size, box_type, box_data = self.read_box_info()
|
_box_size, box_type, box_data = self.read_box_info()
|
||||||
assert box_type == b'afrt'
|
assert box_type == b'afrt'
|
||||||
fragments.append(FlvReader(box_data).read_afrt())
|
fragments.append(FlvReader(box_data).read_afrt())
|
||||||
|
|
||||||
@ -167,7 +167,7 @@ def read_abst(self):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def read_bootstrap_info(self):
|
def read_bootstrap_info(self):
|
||||||
total_size, box_type, box_data = self.read_box_info()
|
_, box_type, box_data = self.read_box_info()
|
||||||
assert box_type == b'abst'
|
assert box_type == b'abst'
|
||||||
return FlvReader(box_data).read_abst()
|
return FlvReader(box_data).read_abst()
|
||||||
|
|
||||||
@ -324,9 +324,9 @@ def real_download(self, filename, info_dict):
|
|||||||
if requested_bitrate is None or len(formats) == 1:
|
if requested_bitrate is None or len(formats) == 1:
|
||||||
# get the best format
|
# get the best format
|
||||||
formats = sorted(formats, key=lambda f: f[0])
|
formats = sorted(formats, key=lambda f: f[0])
|
||||||
rate, media = formats[-1]
|
_, media = formats[-1]
|
||||||
else:
|
else:
|
||||||
rate, media = next(filter(
|
_, media = next(filter(
|
||||||
lambda f: int(f[0]) == requested_bitrate, formats))
|
lambda f: int(f[0]) == requested_bitrate, formats))
|
||||||
|
|
||||||
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
||||||
|
@ -1366,7 +1366,7 @@ def get_entries(page_data):
|
|||||||
else:
|
else:
|
||||||
yield self.url_result(f'https://www.bilibili.com/video/{entry["bvid"]}', BiliBiliIE, entry['bvid'])
|
yield self.url_result(f'https://www.bilibili.com/video/{entry["bvid"]}', BiliBiliIE, entry['bvid'])
|
||||||
|
|
||||||
metadata, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
_, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
||||||
return self.playlist_result(paged_list, playlist_id)
|
return self.playlist_result(paged_list, playlist_id)
|
||||||
|
|
||||||
|
|
||||||
@ -1400,7 +1400,7 @@ def get_entries(page_data):
|
|||||||
for entry in page_data.get('data') or []:
|
for entry in page_data.get('data') or []:
|
||||||
yield self.url_result(f'https://www.bilibili.com/audio/au{entry["id"]}', BilibiliAudioIE, entry['id'])
|
yield self.url_result(f'https://www.bilibili.com/audio/au{entry["id"]}', BilibiliAudioIE, entry['id'])
|
||||||
|
|
||||||
metadata, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
_, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
||||||
return self.playlist_result(paged_list, playlist_id)
|
return self.playlist_result(paged_list, playlist_id)
|
||||||
|
|
||||||
|
|
||||||
|
@ -174,7 +174,7 @@ def _parse_js_topic_data(self, topic_data, display_id, token):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
slug, display_id = self._match_valid_url(url).group('slug', 'id')
|
display_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
topic_data = self._search_json(
|
topic_data = self._search_json(
|
||||||
r'var\s+content\s*=\s*', webpage, 'content data',
|
r'var\s+content\s*=\s*', webpage, 'content data',
|
||||||
|
@ -272,6 +272,7 @@ def _real_extract(self, url):
|
|||||||
return merge_dicts(json_ld_data, {
|
return merge_dicts(json_ld_data, {
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
'url': embed_url,
|
'url': embed_url,
|
||||||
|
'id': video_id,
|
||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
'tags': try_call(lambda: self._html_search_meta('keywords', webpage).split(', ')),
|
'tags': try_call(lambda: self._html_search_meta('keywords', webpage).split(', ')),
|
||||||
})
|
})
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
|
|
||||||
class FifaIE(InfoExtractor):
|
class FifaIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://www\.fifa\.com/fifaplus/(?P<locale>\w{2})/watch/([^#?]+/)?(?P<id>\w+)'
|
_VALID_URL = r'https?://www\.fifa\.com/fifaplus/\w{2}/watch/([^#?]+/)?(?P<id>\w+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.fifa.com/fifaplus/en/watch/7on10qPcnyLajDDU3ntg6y',
|
'url': 'https://www.fifa.com/fifaplus/en/watch/7on10qPcnyLajDDU3ntg6y',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -51,7 +51,7 @@ class FifaIE(InfoExtractor):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id, locale = self._match_valid_url(url).group('id', 'locale')
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
preconnect_link = self._search_regex(
|
preconnect_link = self._search_regex(
|
||||||
|
@ -129,7 +129,7 @@ class NownessSeriesIE(NownessBaseIE):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id, series = self._api_request(url, 'series/getBySlug/%s')
|
_, series = self._api_request(url, 'series/getBySlug/%s')
|
||||||
entries = [self._extract_url_result(post) for post in series['posts']]
|
entries = [self._extract_url_result(post) for post in series['posts']]
|
||||||
series_title = None
|
series_title = None
|
||||||
series_description = None
|
series_description = None
|
||||||
|
@ -414,7 +414,7 @@ class RadioFranceProgramScheduleIE(RadioFranceBaseIE):
|
|||||||
_VALID_URL = rf'''(?x)
|
_VALID_URL = rf'''(?x)
|
||||||
{RadioFranceBaseIE._VALID_URL_BASE}
|
{RadioFranceBaseIE._VALID_URL_BASE}
|
||||||
/(?P<station>{RadioFranceBaseIE._STATIONS_RE})
|
/(?P<station>{RadioFranceBaseIE._STATIONS_RE})
|
||||||
/grille-programmes(?:\?date=(?P<date>[\d-]+))?
|
/grille-programmes
|
||||||
'''
|
'''
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
@ -463,7 +463,7 @@ def _generate_playlist_entries(self, webpage_url, api_response):
|
|||||||
}))
|
}))
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
station, date = self._match_valid_url(url).group('station', 'date')
|
station = self._match_valid_url(url).group('station')
|
||||||
webpage = self._download_webpage(url, station)
|
webpage = self._download_webpage(url, station)
|
||||||
grid_data = self._extract_data_from_webpage(webpage, station, 'grid')
|
grid_data = self._extract_data_from_webpage(webpage, station, 'grid')
|
||||||
upload_date = strftime_or_none(grid_data.get('date'), '%Y%m%d')
|
upload_date = strftime_or_none(grid_data.get('date'), '%Y%m%d')
|
||||||
|
@ -321,7 +321,7 @@ def _real_extract(self, url):
|
|||||||
f'Only {video_type} will be downloaded. '
|
f'Only {video_type} will be downloaded. '
|
||||||
f'To download everything from the series, remove "/{video_type}" from the URL')
|
f'To download everything from the series, remove "/{video_type}" from the URL')
|
||||||
|
|
||||||
series_meta, meta_paths = self._call_api(
|
series_meta, _ = self._call_api(
|
||||||
f'https://api.rctiplus.com/api/v1/program/{series_id}/detail', display_id, 'Downloading series metadata')
|
f'https://api.rctiplus.com/api/v1/program/{series_id}/detail', display_id, 'Downloading series metadata')
|
||||||
metadata = {
|
metadata = {
|
||||||
'age_limit': try_get(series_meta, lambda x: self._AGE_RATINGS[x['age_restriction'][0]['code']]),
|
'age_limit': try_get(series_meta, lambda x: self._AGE_RATINGS[x['age_restriction'][0]['code']]),
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
|
|
||||||
class SubstackIE(InfoExtractor):
|
class SubstackIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?P<username>[\w-]+)\.substack\.com/p/(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://[\w-]+\.substack\.com/p/(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://haleynahman.substack.com/p/i-made-a-vlog?s=r',
|
'url': 'https://haleynahman.substack.com/p/i-made-a-vlog?s=r',
|
||||||
'md5': 'f27e4fc6252001d48d479f45e65cdfd5',
|
'md5': 'f27e4fc6252001d48d479f45e65cdfd5',
|
||||||
@ -116,7 +116,7 @@ def _extract_video_formats(self, video_id, url):
|
|||||||
return formats, subtitles
|
return formats, subtitles
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id, username = self._match_valid_url(url).group('id', 'username')
|
display_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
webpage_info = self._parse_json(self._search_json(
|
webpage_info = self._parse_json(self._search_json(
|
||||||
|
@ -723,7 +723,7 @@ def _decode(self, enc):
|
|||||||
def _unmask_url(self, mask_url, vk_id):
|
def _unmask_url(self, mask_url, vk_id):
|
||||||
if 'audio_api_unavailable' in mask_url:
|
if 'audio_api_unavailable' in mask_url:
|
||||||
extra = mask_url.split('?extra=')[1].split('#')
|
extra = mask_url.split('?extra=')[1].split('#')
|
||||||
func, base = self._decode(extra[1]).split(chr(11))
|
_, base = self._decode(extra[1]).split(chr(11))
|
||||||
mask_url = list(self._decode(extra[0]))
|
mask_url = list(self._decode(extra[0]))
|
||||||
url_len = len(mask_url)
|
url_len = len(mask_url)
|
||||||
indexes = [None] * url_len
|
indexes = [None] * url_len
|
||||||
|
@ -2760,7 +2760,7 @@ def extract_thread(contents, entity_payloads):
|
|||||||
if max_depth == 1 and parent:
|
if max_depth == 1 and parent:
|
||||||
return
|
return
|
||||||
|
|
||||||
max_comments, max_parents, max_replies, max_replies_per_thread, *_ = (
|
_max_comments, max_parents, max_replies, max_replies_per_thread, *_ = (
|
||||||
int_or_none(p, default=sys.maxsize) for p in self._configuration_arg('max_comments') + [''] * 4)
|
int_or_none(p, default=sys.maxsize) for p in self._configuration_arg('max_comments') + [''] * 4)
|
||||||
|
|
||||||
continuation = self._extract_continuation(root_continuation_data)
|
continuation = self._extract_continuation(root_continuation_data)
|
||||||
|
@ -476,7 +476,7 @@ def _real_extract(self, url):
|
|||||||
|
|
||||||
class ZingMp3HubIE(ZingMp3BaseIE):
|
class ZingMp3HubIE(ZingMp3BaseIE):
|
||||||
IE_NAME = 'zingmp3:hub'
|
IE_NAME = 'zingmp3:hub'
|
||||||
_VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<type>hub)/(?P<regions>[^/]+)/(?P<id>[^\.]+)'
|
_VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<type>hub)/[^/?#]+/(?P<id>[^./?#]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://zingmp3.vn/hub/Nhac-Moi/IWZ9Z0CA.html',
|
'url': 'https://zingmp3.vn/hub/Nhac-Moi/IWZ9Z0CA.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -496,7 +496,7 @@ class ZingMp3HubIE(ZingMp3BaseIE):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
song_id, regions, url_type = self._match_valid_url(url).group('id', 'regions', 'type')
|
song_id, url_type = self._match_valid_url(url).group('id', 'type')
|
||||||
hub_detail = self._call_api(url_type, {'id': song_id})
|
hub_detail = self._call_api(url_type, {'id': song_id})
|
||||||
entries = self._parse_items(traverse_obj(hub_detail, (
|
entries = self._parse_items(traverse_obj(hub_detail, (
|
||||||
'sections', lambda _, v: v['sectionId'] == 'hub', 'items', ...)))
|
'sections', lambda _, v: v['sectionId'] == 'hub', 'items', ...)))
|
||||||
|
@ -200,7 +200,7 @@ def wrapper(self, *args, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
def _socket_connect(ip_addr, timeout, source_address):
|
def _socket_connect(ip_addr, timeout, source_address):
|
||||||
af, socktype, proto, canonname, sa = ip_addr
|
af, socktype, proto, _canonname, sa = ip_addr
|
||||||
sock = socket.socket(af, socktype, proto)
|
sock = socket.socket(af, socktype, proto)
|
||||||
try:
|
try:
|
||||||
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
|
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
|
||||||
@ -215,7 +215,7 @@ def _socket_connect(ip_addr, timeout, source_address):
|
|||||||
|
|
||||||
|
|
||||||
def create_socks_proxy_socket(dest_addr, proxy_args, proxy_ip_addr, timeout, source_address):
|
def create_socks_proxy_socket(dest_addr, proxy_args, proxy_ip_addr, timeout, source_address):
|
||||||
af, socktype, proto, canonname, sa = proxy_ip_addr
|
af, socktype, proto, _canonname, sa = proxy_ip_addr
|
||||||
sock = sockssocket(af, socktype, proto)
|
sock = sockssocket(af, socktype, proto)
|
||||||
try:
|
try:
|
||||||
connect_proxy_args = proxy_args.copy()
|
connect_proxy_args = proxy_args.copy()
|
||||||
|
@ -4770,7 +4770,7 @@ def jwt_b64encode(bytestring):
|
|||||||
|
|
||||||
# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
|
# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
|
||||||
def jwt_decode_hs256(jwt):
|
def jwt_decode_hs256(jwt):
|
||||||
header_b64, payload_b64, signature_b64 = jwt.split('.')
|
_header_b64, payload_b64, _signature_b64 = jwt.split('.')
|
||||||
# add trailing ='s that may have been stripped, superfluous ='s are ignored
|
# add trailing ='s that may have been stripped, superfluous ='s are ignored
|
||||||
return json.loads(base64.urlsafe_b64decode(f'{payload_b64}==='))
|
return json.loads(base64.urlsafe_b64decode(f'{payload_b64}==='))
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user