mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-08-16 01:18:29 +00:00
Merge branch 'yt-dlp:master' into generic_tests
This commit is contained in:
commit
1ca3c53479
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@ -242,7 +242,7 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write # For cleaning up cache
|
||||
runs-on: macos-13
|
||||
runs-on: macos-14
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@ -261,6 +261,8 @@ jobs:
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
brew install coreutils
|
||||
# We need to use system Python in order to roll our own universal2 curl_cffi wheel
|
||||
brew uninstall --ignore-dependencies python3
|
||||
python3 -m venv ~/yt-dlp-build-venv
|
||||
source ~/yt-dlp-build-venv/bin/activate
|
||||
python3 devscripts/install_deps.py -o --include build
|
||||
|
4
.github/workflows/core.yml
vendored
4
.github/workflows/core.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
# CPython 3.9 is in quick-test
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13', pypy-3.10]
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13', pypy-3.11]
|
||||
include:
|
||||
# atleast one of each CPython/PyPy tests must be in windows
|
||||
- os: windows-latest
|
||||
@ -49,7 +49,7 @@ jobs:
|
||||
- os: windows-latest
|
||||
python-version: '3.13'
|
||||
- os: windows-latest
|
||||
python-version: pypy-3.10
|
||||
python-version: pypy-3.11
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
|
4
.github/workflows/download.yml
vendored
4
.github/workflows/download.yml
vendored
@ -28,13 +28,13 @@ jobs:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13', pypy-3.10]
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13', pypy-3.11]
|
||||
include:
|
||||
# atleast one of each CPython/PyPy tests must be in windows
|
||||
- os: windows-latest
|
||||
python-version: '3.9'
|
||||
- os: windows-latest
|
||||
python-version: pypy-3.10
|
||||
python-version: pypy-3.11
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
|
2
.github/workflows/signature-tests.yml
vendored
2
.github/workflows/signature-tests.yml
vendored
@ -25,7 +25,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
python-version: ['3.9', '3.10', '3.11', '3.12', '3.13', pypy-3.10, pypy-3.11]
|
||||
python-version: ['3.9', '3.10', '3.11', '3.12', '3.13', pypy-3.11]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
|
@ -272,7 +272,7 @@ ## Adding support for a new site
|
||||
|
||||
You can use `hatch fmt` to automatically fix problems. Rules that the linter/formatter enforces should not be disabled with `# noqa` unless a maintainer requests it. The only exception allowed is for old/printf-style string formatting in GraphQL query templates (use `# noqa: UP031`).
|
||||
|
||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython >=3.9 and PyPy >=3.10. Backward compatibility is not required for even older versions of Python.
|
||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython >=3.9 and PyPy >=3.11. Backward compatibility is not required for even older versions of Python.
|
||||
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
```shell
|
||||
|
@ -172,7 +172,7 @@ # To install nightly with pip:
|
||||
```
|
||||
|
||||
## DEPENDENCIES
|
||||
Python versions 3.9+ (CPython) and 3.10+ (PyPy) are supported. Other versions and implementations may or may not work correctly.
|
||||
Python versions 3.9+ (CPython) and 3.11+ (PyPy) are supported. Other versions and implementations may or may not work correctly.
|
||||
|
||||
<!-- Python 3.5+ uses VC++14 and it is already embedded in the binary created
|
||||
<!x-- https://www.microsoft.com/en-us/download/details.aspx?id=26999 --x>
|
||||
|
@ -62,16 +62,22 @@ def parse_options():
|
||||
|
||||
def exe(onedir):
|
||||
"""@returns (name, path)"""
|
||||
platform_name, machine, extension = {
|
||||
'win32': (None, MACHINE, '.exe'),
|
||||
'darwin': ('macos', None, None),
|
||||
}.get(OS_NAME, (OS_NAME, MACHINE, None))
|
||||
|
||||
name = '_'.join(filter(None, (
|
||||
'yt-dlp',
|
||||
{'win32': '', 'darwin': 'macos'}.get(OS_NAME, OS_NAME),
|
||||
MACHINE,
|
||||
platform_name,
|
||||
machine,
|
||||
)))
|
||||
|
||||
return name, ''.join(filter(None, (
|
||||
'dist/',
|
||||
onedir and f'{name}/',
|
||||
name,
|
||||
OS_NAME == 'win32' and '.exe',
|
||||
extension,
|
||||
)))
|
||||
|
||||
|
||||
|
@ -1335,7 +1335,7 @@ def prepare_line(line):
|
||||
if len(cookie_list) != self._ENTRY_LEN:
|
||||
raise http.cookiejar.LoadError(f'invalid length {len(cookie_list)}')
|
||||
cookie = self._CookieFileEntry(*cookie_list)
|
||||
if cookie.expires_at and not cookie.expires_at.isdigit():
|
||||
if cookie.expires_at and not re.fullmatch(r'[0-9]+(?:\.[0-9]+)?', cookie.expires_at):
|
||||
raise http.cookiejar.LoadError(f'invalid expires at {cookie.expires_at}')
|
||||
return line
|
||||
|
||||
|
@ -205,7 +205,7 @@ def is_ad_fragment_end(s):
|
||||
line = line.strip()
|
||||
if line:
|
||||
if not line.startswith('#'):
|
||||
if format_index and discontinuity_count != format_index:
|
||||
if format_index is not None and discontinuity_count != format_index:
|
||||
continue
|
||||
if ad_frag_next:
|
||||
continue
|
||||
@ -231,7 +231,7 @@ def is_ad_fragment_end(s):
|
||||
byte_range = {}
|
||||
|
||||
elif line.startswith('#EXT-X-MAP'):
|
||||
if format_index and discontinuity_count != format_index:
|
||||
if format_index is not None and discontinuity_count != format_index:
|
||||
continue
|
||||
if frag_index > 0:
|
||||
self.report_error(
|
||||
|
@ -1781,6 +1781,7 @@
|
||||
RTVEALaCartaIE,
|
||||
RTVEAudioIE,
|
||||
RTVELiveIE,
|
||||
RTVEProgramIE,
|
||||
RTVETelevisionIE,
|
||||
)
|
||||
from .rtvs import RTVSIE
|
||||
@ -2234,6 +2235,7 @@
|
||||
from .tvplayer import TVPlayerIE
|
||||
from .tvw import (
|
||||
TvwIE,
|
||||
TvwNewsIE,
|
||||
TvwTvChannelsIE,
|
||||
)
|
||||
from .tweakers import TweakersIE
|
||||
|
@ -33,7 +33,6 @@
|
||||
unified_timestamp,
|
||||
url_or_none,
|
||||
urlhandle_detect_ext,
|
||||
variadic,
|
||||
)
|
||||
|
||||
|
||||
@ -232,6 +231,23 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
'release_date': '19950402',
|
||||
'timestamp': 1084927901,
|
||||
},
|
||||
}, {
|
||||
# metadata['metadata']['description'] is a list of strings instead of str
|
||||
'url': 'https://archive.org/details/pra-KZ1908.02',
|
||||
'info_dict': {
|
||||
'id': 'pra-KZ1908.02',
|
||||
'ext': 'mp3',
|
||||
'display_id': 'KZ1908.02_01.wav',
|
||||
'title': 'Crips and Bloods speak about gang life',
|
||||
'description': 'md5:2b56b35ff021311e3554b47a285e70b3',
|
||||
'uploader': 'jake@archive.org',
|
||||
'duration': 1733.74,
|
||||
'track': 'KZ1908.02 01',
|
||||
'track_number': 1,
|
||||
'timestamp': 1336026026,
|
||||
'upload_date': '20120503',
|
||||
'release_year': 1992,
|
||||
},
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
@ -274,34 +290,40 @@ def _real_extract(self, url):
|
||||
m = metadata['metadata']
|
||||
identifier = m['identifier']
|
||||
|
||||
info = {
|
||||
info = traverse_obj(m, {
|
||||
'title': ('title', {str}),
|
||||
'description': ('description', ({str}, (..., all, {' '.join})), {clean_html}, filter, any),
|
||||
'uploader': (('uploader', 'adder'), {str}, any),
|
||||
'creators': ('creator', (None, ...), {str}, filter, all, filter),
|
||||
'license': ('licenseurl', {url_or_none}),
|
||||
'release_date': ('date', {unified_strdate}),
|
||||
'timestamp': (('publicdate', 'addeddate'), {unified_timestamp}, any),
|
||||
'location': ('venue', {str}),
|
||||
'release_year': ('year', {int_or_none}),
|
||||
})
|
||||
info.update({
|
||||
'id': identifier,
|
||||
'title': m['title'],
|
||||
'description': clean_html(m.get('description')),
|
||||
'uploader': dict_get(m, ['uploader', 'adder']),
|
||||
'creators': traverse_obj(m, ('creator', {variadic}, {lambda x: x[0] and list(x)})),
|
||||
'license': m.get('licenseurl'),
|
||||
'release_date': unified_strdate(m.get('date')),
|
||||
'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
|
||||
'webpage_url': f'https://archive.org/details/{identifier}',
|
||||
'location': m.get('venue'),
|
||||
'release_year': int_or_none(m.get('year'))}
|
||||
})
|
||||
|
||||
for f in metadata['files']:
|
||||
if f['name'] in entries:
|
||||
entries[f['name']] = merge_dicts(entries[f['name']], {
|
||||
'id': identifier + '/' + f['name'],
|
||||
'title': f.get('title') or f['name'],
|
||||
'display_id': f['name'],
|
||||
'description': clean_html(f.get('description')),
|
||||
'creators': traverse_obj(f, ('creator', {variadic}, {lambda x: x[0] and list(x)})),
|
||||
'duration': parse_duration(f.get('length')),
|
||||
'track_number': int_or_none(f.get('track')),
|
||||
'album': f.get('album'),
|
||||
'discnumber': int_or_none(f.get('disc')),
|
||||
'release_year': int_or_none(f.get('year'))})
|
||||
**traverse_obj(f, {
|
||||
'title': (('title', 'name'), {str}, any),
|
||||
'display_id': ('name', {str}),
|
||||
'description': ('description', ({str}, (..., all, {' '.join})), {clean_html}, filter, any),
|
||||
'creators': ('creator', (None, ...), {str}, filter, all, filter),
|
||||
'duration': ('length', {parse_duration}),
|
||||
'track_number': ('track', {int_or_none}),
|
||||
'album': ('album', {str}),
|
||||
'discnumber': ('disc', {int_or_none}),
|
||||
'release_year': ('year', {int_or_none}),
|
||||
}),
|
||||
})
|
||||
entry = entries[f['name']]
|
||||
elif traverse_obj(f, 'original', expected_type=str) in entries:
|
||||
elif traverse_obj(f, ('original', {str})) in entries:
|
||||
entry = entries[f['original']]
|
||||
else:
|
||||
continue
|
||||
|
@ -9,6 +9,7 @@
|
||||
class FaulioLiveIE(InfoExtractor):
|
||||
_DOMAINS = (
|
||||
'aloula.sba.sa',
|
||||
'bahry.com',
|
||||
'maraya.sba.net.ae',
|
||||
'sat7plus.org',
|
||||
)
|
||||
@ -25,6 +26,18 @@ class FaulioLiveIE(InfoExtractor):
|
||||
'params': {
|
||||
'skip_download': 'Livestream',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://bahry.com/live/1',
|
||||
'info_dict': {
|
||||
'id': 'bahry.faulio.com_1',
|
||||
'title': str,
|
||||
'description': str,
|
||||
'ext': 'mp4',
|
||||
'live_status': 'is_live',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'Livestream',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://maraya.sba.net.ae/live/1',
|
||||
'info_dict': {
|
||||
|
@ -34,7 +34,6 @@ class NetEaseMusicBaseIE(InfoExtractor):
|
||||
'sky', # SVIP tier; 沉浸环绕声 (Surround Audio); flac
|
||||
)
|
||||
_API_BASE = 'http://music.163.com/api/'
|
||||
_GEO_BYPASS = False
|
||||
|
||||
def _create_eapi_cipher(self, api_path, query_body, cookies):
|
||||
request_text = json.dumps({**query_body, 'header': cookies}, separators=(',', ':'))
|
||||
@ -64,6 +63,8 @@ def _download_eapi_json(self, path, video_id, query_body, headers={}, **kwargs):
|
||||
'MUSIC_U': ('MUSIC_U', {lambda i: i.value}),
|
||||
}),
|
||||
}
|
||||
if self._x_forwarded_for_ip:
|
||||
headers.setdefault('X-Real-IP', self._x_forwarded_for_ip)
|
||||
return self._download_json(
|
||||
urljoin('https://interface3.music.163.com/', f'/eapi{path}'), video_id,
|
||||
data=self._create_eapi_cipher(f'/api{path}', query_body, cookies), headers={
|
||||
|
@ -6,9 +6,11 @@
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
InAdvancePagedList,
|
||||
clean_html,
|
||||
determine_ext,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
make_archive_id,
|
||||
parse_iso8601,
|
||||
qualities,
|
||||
@ -371,3 +373,62 @@ def _real_extract(self, url):
|
||||
raise ExtractorError('The webpage doesn\'t contain any video', expected=True)
|
||||
|
||||
return self.url_result(play_url, ie=RTVEALaCartaIE.ie_key())
|
||||
|
||||
|
||||
class RTVEProgramIE(RTVEBaseIE):
|
||||
IE_NAME = 'rtve.es:program'
|
||||
IE_DESC = 'RTVE.es programs'
|
||||
_VALID_URL = r'https?://(?:www\.)?rtve\.es/play/videos/(?P<id>[\w-]+)/?(?:[?#]|$)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.rtve.es/play/videos/saber-vivir/',
|
||||
'info_dict': {
|
||||
'id': '111570',
|
||||
'title': 'Saber vivir - Programa de ciencia y futuro en RTVE Play',
|
||||
},
|
||||
'playlist_mincount': 400,
|
||||
}]
|
||||
_PAGE_SIZE = 60
|
||||
|
||||
def _fetch_page(self, program_id, page_num):
|
||||
return self._download_json(
|
||||
f'https://www.rtve.es/api/programas/{program_id}/videos',
|
||||
program_id, note=f'Downloading page {page_num}',
|
||||
query={
|
||||
'type': 39816,
|
||||
'page': page_num,
|
||||
'size': 60,
|
||||
})
|
||||
|
||||
def _entries(self, page_data):
|
||||
for video in traverse_obj(page_data, ('page', 'items', lambda _, v: url_or_none(v['htmlUrl']))):
|
||||
yield self.url_result(
|
||||
video['htmlUrl'], RTVEALaCartaIE, url_transparent=True,
|
||||
**traverse_obj(video, {
|
||||
'id': ('id', {str}),
|
||||
'title': ('longTitle', {str}),
|
||||
'description': ('shortDescription', {str}),
|
||||
'duration': ('duration', {float_or_none(scale=1000)}),
|
||||
'series': (('programInfo', 'title'), {str}, any),
|
||||
'season_number': ('temporadaOrden', {int_or_none}),
|
||||
'season_id': ('temporadaId', {str}),
|
||||
'season': ('temporada', {str}),
|
||||
'episode_number': ('episode', {int_or_none}),
|
||||
'episode': ('title', {str}),
|
||||
'thumbnail': ('thumbnail', {url_or_none}),
|
||||
}),
|
||||
)
|
||||
|
||||
def _real_extract(self, url):
|
||||
program_slug = self._match_id(url)
|
||||
program_page = self._download_webpage(url, program_slug)
|
||||
|
||||
program_id = self._html_search_meta('DC.identifier', program_page, 'Program ID', fatal=True)
|
||||
|
||||
first_page = self._fetch_page(program_id, 1)
|
||||
page_count = traverse_obj(first_page, ('page', 'totalPages', {int})) or 1
|
||||
|
||||
entries = InAdvancePagedList(
|
||||
lambda idx: self._entries(self._fetch_page(program_id, idx + 1) if idx else first_page),
|
||||
page_count, self._PAGE_SIZE)
|
||||
|
||||
return self.playlist_result(entries, program_id, self._html_extract_title(program_page))
|
||||
|
@ -33,16 +33,20 @@ def _extract_from_streaks_api(self, project_id, media_id, headers=None, query=No
|
||||
**(headers or {}),
|
||||
})
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, HTTPError) and e.cause.status in {403, 404}:
|
||||
if isinstance(e.cause, HTTPError) and e.cause.status in (403, 404):
|
||||
error = self._parse_json(e.cause.response.read().decode(), media_id, fatal=False)
|
||||
message = traverse_obj(error, ('message', {str}))
|
||||
code = traverse_obj(error, ('code', {str}))
|
||||
error_id = traverse_obj(error, ('id', {int}))
|
||||
if code == 'REQUEST_FAILED':
|
||||
self.raise_geo_restricted(message, countries=self._GEO_COUNTRIES)
|
||||
elif code == 'MEDIA_NOT_FOUND':
|
||||
raise ExtractorError(message, expected=True)
|
||||
elif code or message:
|
||||
raise ExtractorError(join_nonempty(code, message, delim=': '))
|
||||
if error_id == 124:
|
||||
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
|
||||
elif error_id == 126:
|
||||
raise ExtractorError('Access is denied (possibly due to invalid/missing API key)')
|
||||
if code == 'MEDIA_NOT_FOUND':
|
||||
raise ExtractorError(join_nonempty(code, message, delim=': '), expected=True)
|
||||
if code or message:
|
||||
raise ExtractorError(join_nonempty(code, error_id, message, delim=': '))
|
||||
raise
|
||||
|
||||
streaks_id = response['id']
|
||||
|
@ -1,12 +1,16 @@
|
||||
import datetime as dt
|
||||
|
||||
from .streaks import StreaksBaseIE
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
GeoRestrictedError,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
make_archive_id,
|
||||
smuggle_url,
|
||||
str_or_none,
|
||||
strip_or_none,
|
||||
time_seconds,
|
||||
update_url_query,
|
||||
)
|
||||
from ..utils.traversal import require, traverse_obj
|
||||
@ -96,6 +100,7 @@ class TVerIE(StreaksBaseIE):
|
||||
'Referer': 'https://tver.jp/',
|
||||
}
|
||||
_PLATFORM_QUERY = {}
|
||||
_STREAKS_API_INFO = {}
|
||||
|
||||
def _real_initialize(self):
|
||||
session_info = self._download_json(
|
||||
@ -105,6 +110,9 @@ def _real_initialize(self):
|
||||
'platform_uid': 'platform_uid',
|
||||
'platform_token': 'platform_token',
|
||||
}))
|
||||
self._STREAKS_API_INFO = self._download_json(
|
||||
'https://player.tver.jp/player/streaks_info_v2.json', None,
|
||||
'Downloading STREAKS API info', 'Unable to download STREAKS API info')
|
||||
|
||||
def _call_platform_api(self, path, video_id, note=None, fatal=True, query=None):
|
||||
return self._download_json(
|
||||
@ -219,15 +227,26 @@ def _real_extract(self, url):
|
||||
'_type': 'url_transparent',
|
||||
'url': smuggle_url(
|
||||
self.BRIGHTCOVE_URL_TEMPLATE % (account_id, brightcove_id),
|
||||
{'geo_countries': ['JP']}),
|
||||
{'geo_countries': self._GEO_COUNTRIES}),
|
||||
'ie_key': 'BrightcoveNew',
|
||||
}
|
||||
|
||||
return {
|
||||
**self._extract_from_streaks_api(video_info['streaks']['projectID'], streaks_id, {
|
||||
project_id = video_info['streaks']['projectID']
|
||||
key_idx = dt.datetime.fromtimestamp(time_seconds(hours=9), dt.timezone.utc).month % 6 or 6
|
||||
|
||||
try:
|
||||
streaks_info = self._extract_from_streaks_api(project_id, streaks_id, {
|
||||
'Origin': 'https://tver.jp',
|
||||
'Referer': 'https://tver.jp/',
|
||||
}),
|
||||
'X-Streaks-Api-Key': self._STREAKS_API_INFO[project_id]['api_key'][f'key0{key_idx}'],
|
||||
})
|
||||
except GeoRestrictedError as e:
|
||||
# Catch and re-raise with metadata_available to support --ignore-no-formats-error
|
||||
self.raise_geo_restricted(e.orig_msg, countries=self._GEO_COUNTRIES, metadata_available=True)
|
||||
streaks_info = {}
|
||||
|
||||
return {
|
||||
**streaks_info,
|
||||
**metadata,
|
||||
'id': video_id,
|
||||
'_old_archive_ids': [make_archive_id('BrightcoveNew', brightcove_id)] if brightcove_id else None,
|
||||
|
@ -10,12 +10,15 @@
|
||||
unified_timestamp,
|
||||
url_or_none,
|
||||
)
|
||||
from ..utils.traversal import find_element, traverse_obj
|
||||
from ..utils.traversal import find_element, find_elements, traverse_obj
|
||||
|
||||
|
||||
class TvwIE(InfoExtractor):
|
||||
IE_NAME = 'tvw'
|
||||
_VALID_URL = r'https?://(?:www\.)?tvw\.org/video/(?P<id>[^/?#]+)'
|
||||
_VALID_URL = [
|
||||
r'https?://(?:www\.)?tvw\.org/video/(?P<id>[^/?#]+)',
|
||||
r'https?://(?:www\.)?tvw\.org/watch/?\?(?:[^#]+&)?eventID=(?P<id>\d+)',
|
||||
]
|
||||
_TESTS = [{
|
||||
'url': 'https://tvw.org/video/billy-frank-jr-statue-maquette-unveiling-ceremony-2024011211/',
|
||||
'md5': '9ceb94fe2bb7fd726f74f16356825703',
|
||||
@ -75,6 +78,20 @@ class TvwIE(InfoExtractor):
|
||||
'display_id': 'washington-to-washington-a-new-space-race-2022041111',
|
||||
'categories': ['Washington to Washington', 'General Interest'],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://tvw.org/watch?eventID=2025041235',
|
||||
'md5': '7d697c02f110b37d6a47622ea608ca90',
|
||||
'info_dict': {
|
||||
'id': '2025041235',
|
||||
'ext': 'mp4',
|
||||
'title': 'Legislative Review - Medicaid Postpartum Bill Sparks Debate & Senate Approves Automatic Voter Registration',
|
||||
'thumbnail': r're:^https?://.*\.(?:jpe?g|png)$',
|
||||
'description': 'md5:37d0f3a9187ae520aac261b3959eaee6',
|
||||
'timestamp': 1745006400,
|
||||
'upload_date': '20250418',
|
||||
'location': 'Hayner Media Center',
|
||||
'categories': ['Legislative Review'],
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -125,6 +142,41 @@ def _real_extract(self, url):
|
||||
}
|
||||
|
||||
|
||||
class TvwNewsIE(InfoExtractor):
|
||||
IE_NAME = 'tvw:news'
|
||||
_VALID_URL = r'https?://(?:www\.)?tvw\.org/\d{4}/\d{2}/(?P<id>[^/?#]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://tvw.org/2024/01/the-impact-issues-to-watch-in-the-2024-legislative-session/',
|
||||
'info_dict': {
|
||||
'id': 'the-impact-issues-to-watch-in-the-2024-legislative-session',
|
||||
'title': 'The Impact - Issues to Watch in the 2024 Legislative Session',
|
||||
'description': 'md5:65f0b33ec8f18ff1cd401c5547aa5441',
|
||||
},
|
||||
'playlist_count': 6,
|
||||
}, {
|
||||
'url': 'https://tvw.org/2024/06/the-impact-water-rights-and-the-skookumchuck-dam-debate/',
|
||||
'info_dict': {
|
||||
'id': 'the-impact-water-rights-and-the-skookumchuck-dam-debate',
|
||||
'title': 'The Impact - Water Rights and the Skookumchuck Dam Debate',
|
||||
'description': 'md5:185f3a2350ef81e3fa159ac3e040a94b',
|
||||
},
|
||||
'playlist_count': 1,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
video_ids = traverse_obj(webpage, (
|
||||
{find_elements(cls='invintus-player', html=True)}, ..., {extract_attributes}, 'data-eventid'))
|
||||
|
||||
return self.playlist_from_matches(
|
||||
video_ids, playlist_id,
|
||||
playlist_title=remove_end(self._og_search_title(webpage, default=None), ' - TVW'),
|
||||
playlist_description=self._og_search_description(webpage, default=None),
|
||||
getter=lambda x: f'https://tvw.org/watch?eventID={x}', ie=TvwIE)
|
||||
|
||||
|
||||
class TvwTvChannelsIE(InfoExtractor):
|
||||
IE_NAME = 'tvw:tvchannels'
|
||||
_VALID_URL = r'https?://(?:www\.)?tvw\.org/tvchannels/(?P<id>[^/?#]+)'
|
||||
|
@ -139,7 +139,18 @@ def _get_binary_name():
|
||||
|
||||
|
||||
def _get_system_deprecation():
|
||||
MIN_SUPPORTED, MIN_RECOMMENDED = (3, 9), (3, 9)
|
||||
MIN_SUPPORTED, MIN_RECOMMENDED = (3, 9), (3, 10)
|
||||
|
||||
EXE_MSG_TMPL = ('Support for {} has been deprecated. '
|
||||
'See https://github.com/yt-dlp/yt-dlp/{} for details.\n{}')
|
||||
STOP_MSG = 'You may stop receiving updates on this version at any time!'
|
||||
variant = detect_variant()
|
||||
|
||||
# Temporary until macos_legacy executable builds are discontinued
|
||||
if variant == 'darwin_legacy_exe':
|
||||
return EXE_MSG_TMPL.format(
|
||||
f'{variant} (the PyInstaller-bundled executable for macOS versions older than 10.15)',
|
||||
'issues/13856', STOP_MSG)
|
||||
|
||||
if sys.version_info > MIN_RECOMMENDED:
|
||||
return None
|
||||
@ -150,6 +161,13 @@ def _get_system_deprecation():
|
||||
if sys.version_info < MIN_SUPPORTED:
|
||||
return f'Python version {major}.{minor} is no longer supported! {PYTHON_MSG}'
|
||||
|
||||
# Temporary until aarch64/armv7l build flow is bumped to Ubuntu 22.04 and Python 3.10
|
||||
if variant in ('linux_aarch64_exe', 'linux_armv7l_exe'):
|
||||
libc_ver = version_tuple(os.confstr('CS_GNU_LIBC_VERSION').partition(' ')[2])
|
||||
if libc_ver < (2, 35):
|
||||
return EXE_MSG_TMPL.format('system glibc version < 2.35', 'issues/13858', STOP_MSG)
|
||||
return None
|
||||
|
||||
return f'Support for Python version {major}.{minor} has been deprecated. {PYTHON_MSG}'
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user