mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-09 17:55:28 +00:00
[compat] Remove more functions
Removing any more will require changes to a large number of extractors
This commit is contained in:
@@ -7,12 +7,13 @@ import json
|
||||
import re
|
||||
import struct
|
||||
import time
|
||||
import urllib.request
|
||||
import urllib.response
|
||||
import uuid
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..aes import aes_ecb_decrypt
|
||||
from ..compat import compat_urllib_parse_urlparse, compat_urllib_request
|
||||
from ..compat import compat_urllib_parse_urlparse
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
bytes_to_intlist,
|
||||
@@ -33,7 +34,7 @@ def add_opener(ydl, handler):
|
||||
''' Add a handler for opening URLs, like _download_webpage '''
|
||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
|
||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
|
||||
assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector)
|
||||
assert isinstance(ydl._opener, urllib.request.OpenerDirector)
|
||||
ydl._opener.add_handler(handler)
|
||||
|
||||
|
||||
@@ -46,7 +47,7 @@ def remove_opener(ydl, handler):
|
||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
|
||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
|
||||
opener = ydl._opener
|
||||
assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector)
|
||||
assert isinstance(ydl._opener, urllib.request.OpenerDirector)
|
||||
if isinstance(handler, (type, tuple)):
|
||||
find_cp = lambda x: isinstance(x, handler)
|
||||
else:
|
||||
@@ -96,7 +97,7 @@ def remove_opener(ydl, handler):
|
||||
opener.handlers[:] = [x for x in opener.handlers if not find_cp(x)]
|
||||
|
||||
|
||||
class AbemaLicenseHandler(compat_urllib_request.BaseHandler):
|
||||
class AbemaLicenseHandler(urllib.request.BaseHandler):
|
||||
handler_order = 499
|
||||
STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
||||
HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E'
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import getpass
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
@@ -5,19 +6,15 @@ import urllib.error
|
||||
import xml.etree.ElementTree as etree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urlparse,
|
||||
compat_getpass
|
||||
)
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
unescapeHTML,
|
||||
urlencode_postdata,
|
||||
unified_timestamp,
|
||||
ExtractorError,
|
||||
NO_DEFAULT,
|
||||
ExtractorError,
|
||||
unescapeHTML,
|
||||
unified_timestamp,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
|
||||
MSO_INFO = {
|
||||
'DTV': {
|
||||
'name': 'DIRECTV',
|
||||
@@ -1506,7 +1503,7 @@ class AdobePassIE(InfoExtractor):
|
||||
'send_confirm_link': False,
|
||||
'send_token': True
|
||||
}))
|
||||
philo_code = compat_getpass('Type auth code you have received [Return]: ')
|
||||
philo_code = getpass.getpass('Type auth code you have received [Return]: ')
|
||||
self._download_webpage(
|
||||
'https://idp.philo.com/auth/update/login_code', video_id, 'Submitting token', data=urlencode_postdata({
|
||||
'token': philo_code
|
||||
|
||||
@@ -1,36 +1,34 @@
|
||||
import re
|
||||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeIE, YoutubeBaseInfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
compat_HTTPError
|
||||
)
|
||||
from .youtube import YoutubeBaseInfoExtractor, YoutubeIE
|
||||
from ..compat import compat_HTTPError, compat_urllib_parse_unquote
|
||||
from ..utils import (
|
||||
KNOWN_EXTENSIONS,
|
||||
ExtractorError,
|
||||
HEADRequest,
|
||||
bug_reports_message,
|
||||
clean_html,
|
||||
dict_get,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
get_element_by_id,
|
||||
HEADRequest,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
KNOWN_EXTENSIONS,
|
||||
merge_dicts,
|
||||
mimetype2ext,
|
||||
orderedSet,
|
||||
parse_duration,
|
||||
parse_qs,
|
||||
str_to_int,
|
||||
str_or_none,
|
||||
str_to_int,
|
||||
traverse_obj,
|
||||
try_get,
|
||||
unified_strdate,
|
||||
unified_timestamp,
|
||||
url_or_none,
|
||||
urlhandle_detect_ext,
|
||||
url_or_none
|
||||
)
|
||||
|
||||
|
||||
@@ -143,7 +141,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
return json.loads(extract_attributes(element)['value'])
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = compat_urllib_parse_unquote_plus(self._match_id(url))
|
||||
video_id = urllib.parse.unquote_plus(self._match_id(url))
|
||||
identifier, entry_id = (video_id.split('/', 1) + [None])[:2]
|
||||
|
||||
# Archive.org metadata API doesn't clearly demarcate playlist entries
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
import xml.etree.ElementTree
|
||||
import functools
|
||||
import itertools
|
||||
import json
|
||||
import re
|
||||
import urllib.error
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..compat import compat_HTTPError, compat_str, compat_urlparse
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
OnDemandPagedList,
|
||||
@@ -391,7 +387,7 @@ class BBCCoUkIE(InfoExtractor):
|
||||
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id=format_id, fatal=False)
|
||||
except ExtractorError as e:
|
||||
if not (isinstance(e.exc_info[1], compat_urllib_error.HTTPError)
|
||||
if not (isinstance(e.exc_info[1], urllib.error.HTTPError)
|
||||
and e.exc_info[1].code in (403, 404)):
|
||||
raise
|
||||
fmts = []
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
import codecs
|
||||
import re
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_chr,
|
||||
compat_ord,
|
||||
compat_urllib_parse_unquote,
|
||||
)
|
||||
from ..compat import compat_ord, compat_urllib_parse_unquote
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
@@ -16,8 +12,8 @@ from ..utils import (
|
||||
multipart_encode,
|
||||
parse_duration,
|
||||
random_birthday,
|
||||
urljoin,
|
||||
try_get,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
@@ -144,7 +140,7 @@ class CDAIE(InfoExtractor):
|
||||
b = []
|
||||
for c in a:
|
||||
f = compat_ord(c)
|
||||
b.append(compat_chr(33 + (f + 14) % 94) if 33 <= f <= 126 else compat_chr(f))
|
||||
b.append(chr(33 + (f + 14) % 94) if 33 <= f <= 126 else chr(f))
|
||||
a = ''.join(b)
|
||||
a = a.replace('.cda.mp4', '')
|
||||
for p in ('.2cda.pl', '.3cda.pl'):
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import itertools
|
||||
import json
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_unquote_plus
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
int_or_none,
|
||||
str_to_int,
|
||||
url_or_none,
|
||||
@@ -47,8 +47,8 @@ class ChingariBaseIE(InfoExtractor):
|
||||
'id': id,
|
||||
'extractor_key': ChingariIE.ie_key(),
|
||||
'extractor': 'Chingari',
|
||||
'title': compat_urllib_parse_unquote_plus(clean_html(post_data.get('caption'))),
|
||||
'description': compat_urllib_parse_unquote_plus(clean_html(post_data.get('caption'))),
|
||||
'title': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))),
|
||||
'description': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))),
|
||||
'duration': media_data.get('duration'),
|
||||
'thumbnail': url_or_none(thumbnail),
|
||||
'like_count': post_data.get('likeCount'),
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import base64
|
||||
import collections
|
||||
import getpass
|
||||
import hashlib
|
||||
import itertools
|
||||
import json
|
||||
@@ -9,22 +10,20 @@ import os
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
import urllib.request
|
||||
import xml.etree.ElementTree
|
||||
import http.client
|
||||
import http.cookiejar
|
||||
import http.cookies
|
||||
|
||||
from ..compat import functools, re # isort: split
|
||||
from ..compat import (
|
||||
compat_cookiejar_Cookie,
|
||||
compat_cookies_SimpleCookie,
|
||||
compat_etree_fromstring,
|
||||
compat_expanduser,
|
||||
compat_getpass,
|
||||
compat_http_client,
|
||||
compat_os_name,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..downloader import FileDownloader
|
||||
@@ -671,7 +670,7 @@ class InfoExtractor:
|
||||
if hasattr(e, 'countries'):
|
||||
kwargs['countries'] = e.countries
|
||||
raise type(e)(e.orig_msg, **kwargs)
|
||||
except compat_http_client.IncompleteRead as e:
|
||||
except http.client.IncompleteRead as e:
|
||||
raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
|
||||
except (KeyError, StopIteration) as e:
|
||||
raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
|
||||
@@ -730,7 +729,7 @@ class InfoExtractor:
|
||||
|
||||
@staticmethod
|
||||
def __can_accept_status_code(err, expected_status):
|
||||
assert isinstance(err, compat_urllib_error.HTTPError)
|
||||
assert isinstance(err, urllib.error.HTTPError)
|
||||
if expected_status is None:
|
||||
return False
|
||||
elif callable(expected_status):
|
||||
@@ -739,7 +738,7 @@ class InfoExtractor:
|
||||
return err.code in variadic(expected_status)
|
||||
|
||||
def _create_request(self, url_or_request, data=None, headers={}, query={}):
|
||||
if isinstance(url_or_request, compat_urllib_request.Request):
|
||||
if isinstance(url_or_request, urllib.request.Request):
|
||||
return update_Request(url_or_request, data=data, headers=headers, query=query)
|
||||
if query:
|
||||
url_or_request = update_url_query(url_or_request, query)
|
||||
@@ -779,7 +778,7 @@ class InfoExtractor:
|
||||
try:
|
||||
return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query))
|
||||
except network_exceptions as err:
|
||||
if isinstance(err, compat_urllib_error.HTTPError):
|
||||
if isinstance(err, urllib.error.HTTPError):
|
||||
if self.__can_accept_status_code(err, expected_status):
|
||||
# Retain reference to error to prevent file object from
|
||||
# being closed before it can be read. Works around the
|
||||
@@ -807,7 +806,7 @@ class InfoExtractor:
|
||||
|
||||
Arguments:
|
||||
url_or_request -- plain text URL as a string or
|
||||
a compat_urllib_request.Requestobject
|
||||
a urllib.request.Request object
|
||||
video_id -- Video/playlist/item identifier (string)
|
||||
|
||||
Keyword arguments:
|
||||
@@ -1056,7 +1055,7 @@ class InfoExtractor:
|
||||
while True:
|
||||
try:
|
||||
return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
|
||||
except compat_http_client.IncompleteRead as e:
|
||||
except http.client.IncompleteRead as e:
|
||||
try_count += 1
|
||||
if try_count >= tries:
|
||||
raise e
|
||||
@@ -1292,7 +1291,7 @@ class InfoExtractor:
|
||||
if tfa is not None:
|
||||
return tfa
|
||||
|
||||
return compat_getpass('Type %s and press [Return]: ' % note)
|
||||
return getpass.getpass('Type %s and press [Return]: ' % note)
|
||||
|
||||
# Helper functions for extracting OpenGraph info
|
||||
@staticmethod
|
||||
@@ -3597,15 +3596,15 @@ class InfoExtractor:
|
||||
|
||||
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
|
||||
path='/', secure=False, discard=False, rest={}, **kwargs):
|
||||
cookie = compat_cookiejar_Cookie(
|
||||
cookie = http.cookiejar.Cookie(
|
||||
0, name, value, port, port is not None, domain, True,
|
||||
domain.startswith('.'), path, True, secure, expire_time,
|
||||
discard, None, None, rest)
|
||||
self.cookiejar.set_cookie(cookie)
|
||||
|
||||
def _get_cookies(self, url):
|
||||
""" Return a compat_cookies_SimpleCookie with the cookies for the url """
|
||||
return compat_cookies_SimpleCookie(self._downloader._calc_cookies(url))
|
||||
""" Return a http.cookies.SimpleCookie with the cookies for the url """
|
||||
return http.cookies.SimpleCookie(self._downloader._calc_cookies(url))
|
||||
|
||||
def _apply_first_set_cookie_header(self, url_handle, cookie):
|
||||
"""
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
import base64
|
||||
import re
|
||||
import json
|
||||
import zlib
|
||||
|
||||
import re
|
||||
import urllib.request
|
||||
import xml.etree.ElementTree
|
||||
import zlib
|
||||
from hashlib import sha1
|
||||
from math import pow, sqrt, floor
|
||||
from math import floor, pow, sqrt
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .vrv import VRVBaseIE
|
||||
from ..aes import aes_cbc_decrypt
|
||||
from ..compat import (
|
||||
compat_b64decode,
|
||||
compat_etree_fromstring,
|
||||
compat_str,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
@@ -22,8 +23,8 @@ from ..utils import (
|
||||
extract_attributes,
|
||||
float_or_none,
|
||||
format_field,
|
||||
intlist_to_bytes,
|
||||
int_or_none,
|
||||
intlist_to_bytes,
|
||||
join_nonempty,
|
||||
lowercase_escape,
|
||||
merge_dicts,
|
||||
@@ -34,9 +35,6 @@ from ..utils import (
|
||||
try_get,
|
||||
xpath_text,
|
||||
)
|
||||
from ..aes import (
|
||||
aes_cbc_decrypt,
|
||||
)
|
||||
|
||||
|
||||
class CrunchyrollBaseIE(InfoExtractor):
|
||||
@@ -259,7 +257,7 @@ class CrunchyrollIE(CrunchyrollBaseIE, VRVBaseIE):
|
||||
}
|
||||
|
||||
def _download_webpage(self, url_or_request, *args, **kwargs):
|
||||
request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
|
||||
request = (url_or_request if isinstance(url_or_request, urllib.request.Request)
|
||||
else sanitized_Request(url_or_request))
|
||||
# Accept-Language must be set explicitly to accept any language to avoid issues
|
||||
# similar to https://github.com/ytdl-org/youtube-dl/issues/6797.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import base64
|
||||
import json
|
||||
import re
|
||||
import urllib
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .adobepass import AdobePassIE
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_str,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
determine_ext,
|
||||
error_to_compat_str,
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
get_element_by_id,
|
||||
get_first,
|
||||
@@ -467,7 +467,7 @@ class FacebookIE(InfoExtractor):
|
||||
dash_manifest = video.get('dash_manifest')
|
||||
if dash_manifest:
|
||||
formats.extend(self._parse_mpd_formats(
|
||||
compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
|
||||
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest))))
|
||||
|
||||
def process_formats(formats):
|
||||
# Downloads with browser's User-Agent are rate limited. Working around
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import itertools
|
||||
import re
|
||||
import urllib
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import random
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_unquote_plus
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
timeconvert,
|
||||
update_url_query,
|
||||
xpath_text,
|
||||
@@ -66,7 +66,7 @@ class KUSIIE(InfoExtractor):
|
||||
formats = []
|
||||
for quality in quality_options:
|
||||
formats.append({
|
||||
'url': compat_urllib_parse_unquote_plus(quality.attrib['url']),
|
||||
'url': urllib.parse.unquote_plus(quality.attrib['url']),
|
||||
'height': int_or_none(quality.attrib.get('height')),
|
||||
'width': int_or_none(quality.attrib.get('width')),
|
||||
'vbr': float_or_none(quality.attrib.get('bitratebits'), scale=1000),
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_unquote,
|
||||
)
|
||||
from ..compat import compat_parse_qs, compat_urllib_parse_unquote
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
determine_ext,
|
||||
get_element_by_attribute,
|
||||
int_or_none,
|
||||
mimetype2ext,
|
||||
)
|
||||
|
||||
@@ -143,7 +140,7 @@ class MetacafeIE(InfoExtractor):
|
||||
|
||||
headers = {
|
||||
# Disable family filter
|
||||
'Cookie': 'user=%s; ' % compat_urllib_parse.quote(json.dumps({'ffilter': False}))
|
||||
'Cookie': 'user=%s; ' % urllib.parse.quote(json.dumps({'ffilter': False}))
|
||||
}
|
||||
|
||||
# AnyClip videos require the flashversion cookie so that we get the link
|
||||
|
||||
@@ -3,7 +3,6 @@ import itertools
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_b64decode,
|
||||
compat_chr,
|
||||
compat_ord,
|
||||
compat_str,
|
||||
compat_urllib_parse_unquote,
|
||||
@@ -72,7 +71,7 @@ class MixcloudIE(MixcloudBaseIE):
|
||||
def _decrypt_xor_cipher(key, ciphertext):
|
||||
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
|
||||
return ''.join([
|
||||
compat_chr(compat_ord(ch) ^ compat_ord(k))
|
||||
chr(compat_ord(ch) ^ compat_ord(k))
|
||||
for ch, k in zip(ciphertext, itertools.cycle(key))])
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
@@ -1,13 +1,7 @@
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse_unquote_plus
|
||||
)
|
||||
from ..utils import (
|
||||
parse_duration,
|
||||
remove_end,
|
||||
unified_strdate,
|
||||
urljoin
|
||||
)
|
||||
from ..utils import parse_duration, remove_end, unified_strdate, urljoin
|
||||
|
||||
|
||||
class NDTVIE(InfoExtractor):
|
||||
@@ -80,7 +74,7 @@ class NDTVIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
# '__title' does not contain extra words such as sub-site name, "Video" etc.
|
||||
title = compat_urllib_parse_unquote_plus(
|
||||
title = urllib.parse.unquote_plus(
|
||||
self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None)
|
||||
or self._og_search_title(webpage))
|
||||
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
import itertools
|
||||
import json
|
||||
import time
|
||||
import urllib
|
||||
import urllib.parse
|
||||
import urllib.error
|
||||
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
parse_iso8601,
|
||||
try_get,
|
||||
)
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError, parse_iso8601, try_get
|
||||
|
||||
|
||||
class NebulaBaseIE(InfoExtractor):
|
||||
|
||||
@@ -1,18 +1,12 @@
|
||||
from hashlib import md5
|
||||
import itertools
|
||||
import re
|
||||
from base64 import b64encode
|
||||
from datetime import datetime
|
||||
import re
|
||||
from hashlib import md5
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_str,
|
||||
compat_itertools_count,
|
||||
)
|
||||
from ..utils import (
|
||||
sanitized_Request,
|
||||
float_or_none,
|
||||
)
|
||||
from ..compat import compat_str, compat_urllib_parse_urlencode
|
||||
from ..utils import float_or_none, sanitized_Request
|
||||
|
||||
|
||||
class NetEaseMusicBaseIE(InfoExtractor):
|
||||
@@ -449,7 +443,7 @@ class NetEaseMusicDjRadioIE(NetEaseMusicBaseIE):
|
||||
name = None
|
||||
desc = None
|
||||
entries = []
|
||||
for offset in compat_itertools_count(start=0, step=self._PAGE_SIZE):
|
||||
for offset in itertools.count(start=0, step=self._PAGE_SIZE):
|
||||
info = self.query_api(
|
||||
'dj/program/byradio?asc=false&limit=%d&radioId=%s&offset=%d'
|
||||
% (self._PAGE_SIZE, dj_id, offset),
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_urllib_parse,
|
||||
)
|
||||
from ..compat import compat_HTTPError
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
@@ -125,7 +123,7 @@ class PelotonIE(InfoExtractor):
|
||||
|
||||
is_live = False
|
||||
if ride_data.get('content_format') == 'audio':
|
||||
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('vod_stream_url'), compat_urllib_parse.quote(token))
|
||||
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('vod_stream_url'), urllib.parse.quote(token))
|
||||
formats = [{
|
||||
'url': url,
|
||||
'ext': 'm4a',
|
||||
@@ -138,9 +136,9 @@ class PelotonIE(InfoExtractor):
|
||||
url = 'https://members.onepeloton.com/.netlify/functions/m3u8-proxy?displayLanguage=en&acceptedSubtitles=%s&url=%s?hdnea=%s' % (
|
||||
','.join([re.sub('^([a-z]+)-([A-Z]+)$', r'\1', caption) for caption in ride_data['captions']]),
|
||||
ride_data['vod_stream_url'],
|
||||
compat_urllib_parse.quote(compat_urllib_parse.quote(token)))
|
||||
urllib.parse.quote(urllib.parse.quote(token)))
|
||||
elif ride_data.get('live_stream_url'):
|
||||
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('live_stream_url'), compat_urllib_parse.quote(token))
|
||||
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('live_stream_url'), urllib.parse.quote(token))
|
||||
is_live = True
|
||||
else:
|
||||
raise ExtractorError('Missing video URL')
|
||||
|
||||
@@ -1,14 +1,9 @@
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
)
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
ExtractorError,
|
||||
)
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
from ..utils import ExtractorError, clean_html
|
||||
|
||||
|
||||
class PlayvidIE(InfoExtractor):
|
||||
@@ -62,7 +57,7 @@ class PlayvidIE(InfoExtractor):
|
||||
val = videovars_match.group(2)
|
||||
|
||||
if key == 'title':
|
||||
video_title = compat_urllib_parse_unquote_plus(val)
|
||||
video_title = urllib.parse.unquote_plus(val)
|
||||
if key == 'duration':
|
||||
try:
|
||||
duration = int(val)
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_b64decode,
|
||||
compat_chr,
|
||||
)
|
||||
from ..compat import compat_b64decode
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
@@ -50,7 +47,7 @@ class PopcorntimesIE(InfoExtractor):
|
||||
c_ord += 13
|
||||
if upper < c_ord:
|
||||
c_ord -= 26
|
||||
loc_b64 += compat_chr(c_ord)
|
||||
loc_b64 += chr(c_ord)
|
||||
|
||||
video_url = compat_b64decode(loc_b64).decode('utf-8')
|
||||
|
||||
|
||||
@@ -3,29 +3,26 @@ import itertools
|
||||
import math
|
||||
import operator
|
||||
import re
|
||||
import urllib.request
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_str,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from .openload import PhantomJSwrapper
|
||||
from ..compat import compat_HTTPError, compat_str
|
||||
from ..utils import (
|
||||
NO_DEFAULT,
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
format_field,
|
||||
int_or_none,
|
||||
merge_dicts,
|
||||
NO_DEFAULT,
|
||||
orderedSet,
|
||||
remove_quotes,
|
||||
remove_start,
|
||||
str_to_int,
|
||||
update_url_query,
|
||||
urlencode_postdata,
|
||||
url_or_none,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
|
||||
@@ -50,7 +47,7 @@ class PornHubBaseIE(InfoExtractor):
|
||||
r'document\.location\.reload\(true\)')):
|
||||
url_or_request = args[0]
|
||||
url = (url_or_request.get_full_url()
|
||||
if isinstance(url_or_request, compat_urllib_request.Request)
|
||||
if isinstance(url_or_request, urllib.request.Request)
|
||||
else url_or_request)
|
||||
phantom = PhantomJSwrapper(self, required_version='2.0')
|
||||
phantom.get(url, html=webpage)
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
import base64
|
||||
import io
|
||||
import struct
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_b64decode,
|
||||
compat_struct_unpack,
|
||||
)
|
||||
from ..compat import compat_b64decode
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
determine_ext,
|
||||
float_or_none,
|
||||
qualities,
|
||||
remove_end,
|
||||
@@ -73,7 +71,7 @@ class RTVEALaCartaIE(InfoExtractor):
|
||||
def _decrypt_url(png):
|
||||
encrypted_data = io.BytesIO(compat_b64decode(png)[8:])
|
||||
while True:
|
||||
length = compat_struct_unpack('!I', encrypted_data.read(4))[0]
|
||||
length = struct.unpack('!I', encrypted_data.read(4))[0]
|
||||
chunk_type = encrypted_data.read(4)
|
||||
if chunk_type == b'IEND':
|
||||
break
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
import urllib.request
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
from ..compat import compat_parse_qs
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class ScreencastIE(InfoExtractor):
|
||||
@@ -75,7 +72,7 @@ class ScreencastIE(InfoExtractor):
|
||||
flash_vars_s = flash_vars_s.replace(',', '&')
|
||||
if flash_vars_s:
|
||||
flash_vars = compat_parse_qs(flash_vars_s)
|
||||
video_url_raw = compat_urllib_request.quote(
|
||||
video_url_raw = urllib.request.quote(
|
||||
flash_vars['content'][0])
|
||||
video_url = video_url_raw.replace('http%3A', 'http:')
|
||||
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
|
||||
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_b64decode,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
)
|
||||
from ..compat import compat_b64decode
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
KNOWN_EXTENSIONS,
|
||||
ExtractorError,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
KNOWN_EXTENSIONS,
|
||||
parse_filesize,
|
||||
rot47,
|
||||
url_or_none,
|
||||
@@ -130,7 +131,7 @@ class VivoIE(SharedBaseIE):
|
||||
return stream_url
|
||||
|
||||
def decode_url(encoded_url):
|
||||
return rot47(compat_urllib_parse_unquote_plus(encoded_url))
|
||||
return rot47(urllib.parse.unquote_plus(encoded_url))
|
||||
|
||||
return decode_url(self._parse_json(
|
||||
self._search_regex(
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
import re
|
||||
import urllib.request
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_str,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..compat import compat_HTTPError, compat_str, compat_urlparse
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
determine_ext,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
@@ -155,7 +151,7 @@ class UdemyIE(InfoExtractor):
|
||||
headers['X-Udemy-Bearer-Token'] = cookie.value
|
||||
headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value
|
||||
|
||||
if isinstance(url_or_request, compat_urllib_request.Request):
|
||||
if isinstance(url_or_request, urllib.request.Request):
|
||||
for header, value in headers.items():
|
||||
url_or_request.add_header(header, value)
|
||||
else:
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
|
||||
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
)
|
||||
from ..utils import (
|
||||
unified_strdate,
|
||||
)
|
||||
from ..utils import unified_strdate
|
||||
|
||||
|
||||
class UrortIE(InfoExtractor):
|
||||
@@ -31,7 +30,7 @@ class UrortIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
fstr = compat_urllib_parse.quote("InternalBandUrl eq '%s'" % playlist_id)
|
||||
fstr = urllib.parse.quote("InternalBandUrl eq '%s'" % playlist_id)
|
||||
json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr
|
||||
songs = self._download_json(json_url, playlist_id)
|
||||
entries = []
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
import struct
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_b64decode, compat_ord
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
@@ -14,11 +16,6 @@ from ..utils import (
|
||||
xpath_element,
|
||||
xpath_text,
|
||||
)
|
||||
from ..compat import (
|
||||
compat_b64decode,
|
||||
compat_ord,
|
||||
compat_struct_pack,
|
||||
)
|
||||
|
||||
|
||||
class VideaIE(InfoExtractor):
|
||||
@@ -102,7 +99,7 @@ class VideaIE(InfoExtractor):
|
||||
j = (j + S[i]) % 256
|
||||
S[i], S[j] = S[j], S[i]
|
||||
k = S[(S[i] + S[j]) % 256]
|
||||
res += compat_struct_pack('B', k ^ compat_ord(cipher_text[m]))
|
||||
res += struct.pack('B', k ^ compat_ord(cipher_text[m]))
|
||||
|
||||
return res.decode()
|
||||
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
import base64
|
||||
import json
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
import time
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urllib_parse,
|
||||
)
|
||||
from ..compat import compat_HTTPError, compat_urllib_parse_urlencode
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
@@ -46,12 +43,12 @@ class VRVBaseIE(InfoExtractor):
|
||||
headers['Content-Type'] = 'application/json'
|
||||
base_string = '&'.join([
|
||||
'POST' if data else 'GET',
|
||||
compat_urllib_parse.quote(base_url, ''),
|
||||
compat_urllib_parse.quote(encoded_query, '')])
|
||||
urllib.parse.quote(base_url, ''),
|
||||
urllib.parse.quote(encoded_query, '')])
|
||||
oauth_signature = base64.b64encode(hmac.new(
|
||||
(self._API_PARAMS['oAuthSecret'] + '&' + self._TOKEN_SECRET).encode('ascii'),
|
||||
base_string.encode(), hashlib.sha1).digest()).decode()
|
||||
encoded_query += '&oauth_signature=' + compat_urllib_parse.quote(oauth_signature, '')
|
||||
encoded_query += '&oauth_signature=' + urllib.parse.quote(oauth_signature, '')
|
||||
try:
|
||||
return self._download_json(
|
||||
'?'.join([base_url, encoded_query]), video_id,
|
||||
|
||||
@@ -1,11 +1,7 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_chr
|
||||
from ..utils import (
|
||||
decode_packed_codes,
|
||||
ExtractorError,
|
||||
)
|
||||
from ..utils import ExtractorError, decode_packed_codes
|
||||
|
||||
|
||||
class VShareIE(InfoExtractor):
|
||||
@@ -37,7 +33,7 @@ class VShareIE(InfoExtractor):
|
||||
digits = [int(digit) for digit in digits.split(',')]
|
||||
key_digit = self._search_regex(
|
||||
r'fromCharCode\(.+?(\d+)\)}', unpacked, 'key digit')
|
||||
chars = [compat_chr(d - int(key_digit)) for d in digits]
|
||||
chars = [chr(d - int(key_digit)) for d in digits]
|
||||
return ''.join(chars)
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_chr
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
decode_packed_codes,
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
urlencode_postdata,
|
||||
@@ -32,11 +31,11 @@ def aa_decode(aa_code):
|
||||
aa_char = aa_char.replace('+ ', '')
|
||||
m = re.match(r'^\d+', aa_char)
|
||||
if m:
|
||||
ret += compat_chr(int(m.group(0), 8))
|
||||
ret += chr(int(m.group(0), 8))
|
||||
else:
|
||||
m = re.match(r'^u([\da-f]+)', aa_char)
|
||||
if m:
|
||||
ret += compat_chr(int(m.group(1), 16))
|
||||
ret += chr(int(m.group(1), 16))
|
||||
return ret
|
||||
|
||||
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
import hashlib
|
||||
import itertools
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .brightcove import BrightcoveNewIE
|
||||
from .common import InfoExtractor, SearchInfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
)
|
||||
from .youtube import YoutubeIE
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
int_or_none,
|
||||
mimetype2ext,
|
||||
parse_iso8601,
|
||||
@@ -18,9 +18,6 @@ from ..utils import (
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
from .brightcove import BrightcoveNewIE
|
||||
from .youtube import YoutubeIE
|
||||
|
||||
|
||||
class YahooIE(InfoExtractor):
|
||||
IE_DESC = 'Yahoo screen and movies'
|
||||
@@ -333,7 +330,7 @@ class YahooSearchIE(SearchInfoExtractor):
|
||||
|
||||
def _search_results(self, query):
|
||||
for pagenum in itertools.count(0):
|
||||
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
|
||||
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (urllib.parse.quote_plus(query), pagenum * 30)
|
||||
info = self._download_json(result_url, query,
|
||||
note='Downloading results page ' + str(pagenum + 1))
|
||||
yield from (self.url_result(result['rurl']) for result in info['results'])
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import re
|
||||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_unquote_plus
|
||||
|
||||
|
||||
class YnetIE(InfoExtractor):
|
||||
@@ -31,7 +31,7 @@ class YnetIE(InfoExtractor):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
content = compat_urllib_parse_unquote_plus(self._og_search_video_url(webpage))
|
||||
content = urllib.parse.unquote_plus(self._og_search_video_url(webpage))
|
||||
config = json.loads(self._search_regex(r'config=({.+?})$', content, 'video config'))
|
||||
f4m_url = config['clip']['url']
|
||||
title = self._og_search_title(webpage)
|
||||
|
||||
@@ -13,15 +13,14 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor, SearchInfoExtractor
|
||||
from ..compat import functools # isort: split
|
||||
from ..compat import (
|
||||
compat_chr,
|
||||
compat_HTTPError,
|
||||
compat_parse_qs,
|
||||
compat_str,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urlparse,
|
||||
@@ -2483,7 +2482,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
if code:
|
||||
res = self._parse_sig_js(code)
|
||||
|
||||
test_string = ''.join(map(compat_chr, range(len(example_sig))))
|
||||
test_string = ''.join(map(chr, range(len(example_sig))))
|
||||
cache_res = res(test_string)
|
||||
cache_spec = [ord(c) for c in cache_res]
|
||||
|
||||
@@ -2522,7 +2521,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
else:
|
||||
yield _genslice(start, i, step)
|
||||
|
||||
test_string = ''.join(map(compat_chr, range(len(example_sig))))
|
||||
test_string = ''.join(map(chr, range(len(example_sig))))
|
||||
cache_res = func(test_string)
|
||||
cache_spec = [ord(c) for c in cache_res]
|
||||
expr_code = ' + '.join(gen_sig_code(cache_spec))
|
||||
@@ -3421,7 +3420,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
# fields may contain comma as well (see
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/8536)
|
||||
feed_data = compat_parse_qs(
|
||||
compat_urllib_parse_unquote_plus(feed))
|
||||
urllib.parse.unquote_plus(feed))
|
||||
|
||||
def feed_entry(name):
|
||||
return try_get(
|
||||
@@ -5846,7 +5845,7 @@ class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor):
|
||||
if params:
|
||||
section = next((k for k, v in self._SECTIONS.items() if v == params), params)
|
||||
else:
|
||||
section = compat_urllib_parse_unquote_plus((url.split('#') + [''])[1]).lower()
|
||||
section = urllib.parse.unquote_plus((url.split('#') + [''])[1]).lower()
|
||||
params = self._SECTIONS.get(section)
|
||||
if not params:
|
||||
section = None
|
||||
|
||||
Reference in New Issue
Block a user