mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-15 20:48:58 +00:00
[cleanup] Standardize import datetime as dt (#8978)
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
import datetime
|
||||
import datetime as dt
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
@@ -71,9 +71,9 @@ class ATVAtIE(InfoExtractor):
|
||||
content_ids = [{'id': id, 'subclip_start': content['start'], 'subclip_end': content['end']}
|
||||
for id, content in enumerate(contentResource)]
|
||||
|
||||
time_of_request = datetime.datetime.now()
|
||||
not_before = time_of_request - datetime.timedelta(minutes=5)
|
||||
expire = time_of_request + datetime.timedelta(minutes=5)
|
||||
time_of_request = dt.datetime.now()
|
||||
not_before = time_of_request - dt.timedelta(minutes=5)
|
||||
expire = time_of_request + dt.timedelta(minutes=5)
|
||||
payload = {
|
||||
'content_ids': {
|
||||
content_id: content_ids,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import hashlib
|
||||
import hmac
|
||||
|
||||
@@ -12,7 +12,7 @@ class AWSIE(InfoExtractor): # XXX: Conventionally, base classes should end with
|
||||
|
||||
def _aws_execute_api(self, aws_dict, video_id, query=None):
|
||||
query = query or {}
|
||||
amz_date = datetime.datetime.now(datetime.timezone.utc).strftime('%Y%m%dT%H%M%SZ')
|
||||
amz_date = dt.datetime.now(dt.timezone.utc).strftime('%Y%m%dT%H%M%SZ')
|
||||
date = amz_date[:8]
|
||||
headers = {
|
||||
'Accept': 'application/json',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import base64
|
||||
import codecs
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
@@ -134,7 +134,7 @@ class CDAIE(InfoExtractor):
|
||||
self._API_HEADERS['User-Agent'] = f'pl.cda 1.0 (version {app_version}; Android {android_version}; {phone_model})'
|
||||
|
||||
cached_bearer = self.cache.load(self._BEARER_CACHE, username) or {}
|
||||
if cached_bearer.get('valid_until', 0) > datetime.datetime.now().timestamp() + 5:
|
||||
if cached_bearer.get('valid_until', 0) > dt.datetime.now().timestamp() + 5:
|
||||
self._API_HEADERS['Authorization'] = f'Bearer {cached_bearer["token"]}'
|
||||
return
|
||||
|
||||
@@ -154,7 +154,7 @@ class CDAIE(InfoExtractor):
|
||||
})
|
||||
self.cache.store(self._BEARER_CACHE, username, {
|
||||
'token': token_res['access_token'],
|
||||
'valid_until': token_res['expires_in'] + datetime.datetime.now().timestamp(),
|
||||
'valid_until': token_res['expires_in'] + dt.datetime.now().timestamp(),
|
||||
})
|
||||
self._API_HEADERS['Authorization'] = f'Bearer {token_res["access_token"]}'
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import base64
|
||||
import binascii
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
@@ -422,7 +422,7 @@ class AwsIdp:
|
||||
months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
|
||||
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
|
||||
|
||||
time_now = datetime.datetime.now(datetime.timezone.utc)
|
||||
time_now = dt.datetime.now(dt.timezone.utc)
|
||||
format_string = "{} {} {} %H:%M:%S UTC %Y".format(days[time_now.weekday()], months[time_now.month], time_now.day)
|
||||
time_string = time_now.strftime(format_string)
|
||||
return time_string
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -50,8 +50,8 @@ class JoqrAgIE(InfoExtractor):
|
||||
|
||||
def _extract_start_timestamp(self, video_id, is_live):
|
||||
def extract_start_time_from(date_str):
|
||||
dt = datetime_from_str(date_str) + datetime.timedelta(hours=9)
|
||||
date = dt.strftime('%Y%m%d')
|
||||
dt_ = datetime_from_str(date_str) + dt.timedelta(hours=9)
|
||||
date = dt_.strftime('%Y%m%d')
|
||||
start_time = self._search_regex(
|
||||
r'<h3[^>]+\bclass="dailyProgram-itemHeaderTime"[^>]*>[\s\d:]+–\s*(\d{1,2}:\d{1,2})',
|
||||
self._download_webpage(
|
||||
@@ -60,7 +60,7 @@ class JoqrAgIE(InfoExtractor):
|
||||
errnote=f'Failed to download program list of {date}') or '',
|
||||
'start time', default=None)
|
||||
if start_time:
|
||||
return unified_timestamp(f'{dt.strftime("%Y/%m/%d")} {start_time} +09:00')
|
||||
return unified_timestamp(f'{dt_.strftime("%Y/%m/%d")} {start_time} +09:00')
|
||||
return None
|
||||
|
||||
start_timestamp = extract_start_time_from('today')
|
||||
@@ -87,7 +87,7 @@ class JoqrAgIE(InfoExtractor):
|
||||
msg = 'This stream is not currently live'
|
||||
if release_timestamp:
|
||||
msg += (' and will start at '
|
||||
+ datetime.datetime.fromtimestamp(release_timestamp).strftime('%Y-%m-%d %H:%M:%S'))
|
||||
+ dt.datetime.fromtimestamp(release_timestamp).strftime('%Y-%m-%d %H:%M:%S'))
|
||||
self.raise_no_formats(msg, expected=True)
|
||||
else:
|
||||
m3u8_path = self._search_regex(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import hashlib
|
||||
import re
|
||||
import time
|
||||
@@ -185,7 +185,7 @@ class LeIE(InfoExtractor):
|
||||
|
||||
publish_time = parse_iso8601(self._html_search_regex(
|
||||
r'发布时间 ([^<>]+) ', page, 'publish time', default=None),
|
||||
delimiter=' ', timezone=datetime.timedelta(hours=8))
|
||||
delimiter=' ', timezone=dt.timedelta(hours=8))
|
||||
description = self._html_search_meta('description', page, fatal=False)
|
||||
|
||||
return {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
@@ -151,7 +151,7 @@ class MotherlessIE(InfoExtractor):
|
||||
'd': 'days',
|
||||
}
|
||||
kwargs = {_AGO_UNITS.get(uploaded_ago[-1]): delta}
|
||||
upload_date = (datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(**kwargs)).strftime('%Y%m%d')
|
||||
upload_date = (dt.datetime.now(dt.timezone.utc) - dt.timedelta(**kwargs)).strftime('%Y%m%d')
|
||||
|
||||
comment_count = len(re.findall(r'''class\s*=\s*['"]media-comment-contents\b''', webpage))
|
||||
uploader_id = self._html_search_regex(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import functools
|
||||
import itertools
|
||||
import json
|
||||
@@ -819,12 +819,12 @@ class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor):
|
||||
'playlist_mincount': 1610,
|
||||
}]
|
||||
|
||||
_START_DATE = datetime.date(2007, 1, 1)
|
||||
_START_DATE = dt.date(2007, 1, 1)
|
||||
_RESULTS_PER_PAGE = 32
|
||||
_MAX_PAGES = 50
|
||||
|
||||
def _entries(self, url, item_id, start_date=None, end_date=None):
|
||||
start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date()
|
||||
start_date, end_date = start_date or self._START_DATE, end_date or dt.datetime.now().date()
|
||||
|
||||
# If the last page has a full page of videos, we need to break down the query interval further
|
||||
last_page_len = len(list(self._get_entries_for_date(
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import calendar
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import functools
|
||||
import json
|
||||
import random
|
||||
@@ -243,7 +243,7 @@ class PanoptoIE(PanoptoBaseIE):
|
||||
invocation_id = delivery_info.get('InvocationId')
|
||||
stream_id = traverse_obj(delivery_info, ('Delivery', 'Streams', ..., 'PublicID'), get_all=False, expected_type=str)
|
||||
if invocation_id and stream_id and duration:
|
||||
timestamp_str = f'/Date({calendar.timegm(datetime.datetime.now(datetime.timezone.utc).timetuple())}000)/'
|
||||
timestamp_str = f'/Date({calendar.timegm(dt.datetime.now(dt.timezone.utc).timetuple())}000)/'
|
||||
data = {
|
||||
'streamRequests': [
|
||||
{
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import json
|
||||
import urllib.parse
|
||||
|
||||
@@ -197,7 +197,7 @@ class Pr0grammIE(InfoExtractor):
|
||||
'like_count': ('up', {int}),
|
||||
'dislike_count': ('down', {int}),
|
||||
'timestamp': ('created', {int}),
|
||||
'upload_date': ('created', {int}, {datetime.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}),
|
||||
'upload_date': ('created', {int}, {dt.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}),
|
||||
'thumbnail': ('thumb', {lambda x: urljoin('https://thumb.pr0gramm.com', x)})
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import itertools
|
||||
import json
|
||||
import re
|
||||
@@ -156,7 +156,7 @@ class RokfinIE(InfoExtractor):
|
||||
self.raise_login_required('This video is only available to premium users', True, method='cookies')
|
||||
elif scheduled:
|
||||
self.raise_no_formats(
|
||||
f'Stream is offline; scheduled for {datetime.datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
|
||||
f'Stream is offline; scheduled for {dt.datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
|
||||
video_id=video_id, expected=True)
|
||||
|
||||
uploader = traverse_obj(metadata, ('createdBy', 'username'), ('creator', 'username'))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import datetime
|
||||
import datetime as dt
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .redge import RedCDNLivxIE
|
||||
@@ -13,16 +13,16 @@ from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
def is_dst(date):
|
||||
last_march = datetime.datetime(date.year, 3, 31)
|
||||
last_october = datetime.datetime(date.year, 10, 31)
|
||||
last_sunday_march = last_march - datetime.timedelta(days=last_march.isoweekday() % 7)
|
||||
last_sunday_october = last_october - datetime.timedelta(days=last_october.isoweekday() % 7)
|
||||
last_march = dt.datetime(date.year, 3, 31)
|
||||
last_october = dt.datetime(date.year, 10, 31)
|
||||
last_sunday_march = last_march - dt.timedelta(days=last_march.isoweekday() % 7)
|
||||
last_sunday_october = last_october - dt.timedelta(days=last_october.isoweekday() % 7)
|
||||
return last_sunday_march.replace(hour=2) <= date <= last_sunday_october.replace(hour=3)
|
||||
|
||||
|
||||
def rfc3339_to_atende(date):
|
||||
date = datetime.datetime.fromisoformat(date)
|
||||
date = date + datetime.timedelta(hours=1 if is_dst(date) else 0)
|
||||
date = dt.datetime.fromisoformat(date)
|
||||
date = date + dt.timedelta(hours=1 if is_dst(date) else 0)
|
||||
return int((date.timestamp() - 978307200) * 1000)
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import itertools
|
||||
import json
|
||||
import math
|
||||
@@ -94,7 +94,7 @@ class SonyLIVIE(InfoExtractor):
|
||||
'mobileNumber': username,
|
||||
'channelPartnerID': 'MSMIND',
|
||||
'country': 'IN',
|
||||
'timestamp': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
|
||||
'timestamp': dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
|
||||
'otpSize': 6,
|
||||
'loginType': 'REGISTERORSIGNIN',
|
||||
'isMobileMandatory': True,
|
||||
@@ -111,7 +111,7 @@ class SonyLIVIE(InfoExtractor):
|
||||
'otp': self._get_tfa_info('OTP'),
|
||||
'dmaId': 'IN',
|
||||
'ageConfirmation': True,
|
||||
'timestamp': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
|
||||
'timestamp': dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
|
||||
'isMobileMandatory': True,
|
||||
}).encode())
|
||||
if otp_verify_json['resultCode'] == 'KO':
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import base64
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import functools
|
||||
import itertools
|
||||
|
||||
@@ -70,7 +70,7 @@ class TenPlayIE(InfoExtractor):
|
||||
username, password = self._get_login_info()
|
||||
if username is None or password is None:
|
||||
self.raise_login_required('Your 10play account\'s details must be provided with --username and --password.')
|
||||
_timestamp = datetime.datetime.now().strftime('%Y%m%d000000')
|
||||
_timestamp = dt.datetime.now().strftime('%Y%m%d000000')
|
||||
_auth_header = base64.b64encode(_timestamp.encode('ascii')).decode('ascii')
|
||||
data = self._download_json('https://10play.com.au/api/user/auth', video_id, 'Getting bearer token', headers={
|
||||
'X-Network-Ten-Auth': _auth_header,
|
||||
|
||||
@@ -2,7 +2,7 @@ import base64
|
||||
import calendar
|
||||
import collections
|
||||
import copy
|
||||
import datetime
|
||||
import datetime as dt
|
||||
import enum
|
||||
import hashlib
|
||||
import itertools
|
||||
@@ -924,10 +924,10 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
def _parse_time_text(self, text):
|
||||
if not text:
|
||||
return
|
||||
dt = self.extract_relative_time(text)
|
||||
dt_ = self.extract_relative_time(text)
|
||||
timestamp = None
|
||||
if isinstance(dt, datetime.datetime):
|
||||
timestamp = calendar.timegm(dt.timetuple())
|
||||
if isinstance(dt_, dt.datetime):
|
||||
timestamp = calendar.timegm(dt_.timetuple())
|
||||
|
||||
if timestamp is None:
|
||||
timestamp = (
|
||||
@@ -4568,7 +4568,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
|
||||
if upload_date and live_status not in ('is_live', 'post_live', 'is_upcoming'):
|
||||
# Newly uploaded videos' HLS formats are potentially problematic and need to be checked
|
||||
upload_datetime = datetime_from_str(upload_date).replace(tzinfo=datetime.timezone.utc)
|
||||
upload_datetime = datetime_from_str(upload_date).replace(tzinfo=dt.timezone.utc)
|
||||
if upload_datetime >= datetime_from_str('today-2days'):
|
||||
for fmt in info['formats']:
|
||||
if fmt.get('protocol') == 'm3u8_native':
|
||||
|
||||
Reference in New Issue
Block a user