1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2026-01-17 12:21:52 +00:00

Compare commits

...

33 Commits

Author SHA1 Message Date
github-actions[bot]
a75399d89f Release 2025.10.22
Created by: bashonly

:ci skip all
2025-10-22 19:42:16 +00:00
Robin
c9356f308d [ie/idagio] Support URLs with country codes (#14655)
Authored by: robin-mu
2025-10-22 19:33:43 +00:00
bashonly
de7b3c0705 [cleanup] Misc (#14701)
Authored by: bashonly
2025-10-22 19:25:35 +00:00
gamer191
2c9091e355 [ie/youtube] Use temporary player client workaround (#14693)
Closes #14680
Authored by: gamer191
2025-10-22 19:08:06 +00:00
sepro
dfc0a84c19 [docs] Update list of maintainers (#14148)
Authored by: seproDev, bashonly, coletdjnz

Co-authored-by: bashonly <88596187+bashonly@users.noreply.github.com>
Co-authored-by: coletdjnz <coletdjnz@protonmail.com>
2025-10-18 23:07:21 +02:00
doe1080
fe5ae54a7b [ie/tvnoe] Rework Extractor (#13369)
Authored by: doe1080
2025-10-15 22:00:20 +02:00
doe1080
78748b506f [ie/appleconnect] Rework extractor (#13229)
Authored by: doe1080
2025-10-15 20:42:15 +02:00
sepro
c7bda2192a [cleanup] Misc (#14594)
Authored by: seproDev, bashonly

Co-authored-by: bashonly <88596187+bashonly@users.noreply.github.com>
2025-10-15 11:16:50 +00:00
bashonly
4e6a693057 Remove Python 3.9 support (#13861)
Closes #13858
Authored by: bashonly
2025-10-15 10:25:21 +00:00
github-actions[bot]
264044286d Release 2025.10.14
Created by: bashonly

:ci skip all
2025-10-14 23:29:27 +00:00
Robin
a98e7f9f58 [ie/idagio] Add extractors (#14586)
Closes #2624
Authored by: robin-mu
2025-10-15 01:23:13 +02:00
uoag
0ea5d5882d [ie/abc.net.au] Support listen URLs (#14389)
Authored by: uoag
2025-10-14 22:02:21 +02:00
CasualYouTuber31
cdc533b114 [ie/tiktok:user] Fix private account extraction (#14585)
Closes #14565
Authored by: CasualYT31
2025-10-14 19:42:36 +00:00
bashonly
c2e124881f [ie/slideslive] Fix extractor (#14619)
Closes #14518
Authored by: bashonly
2025-10-14 19:38:15 +00:00
bashonly
ad55bfcfb7 [ie/10play] Handle geo-restriction errors (#14618)
Authored by: bashonly
2025-10-14 19:36:17 +00:00
Josh Holmer
739125d40f [ie/xhamster] Fix extractor (#14446)
Closes #14395
Authored by: shssoichiro, dhwz, dirkf

Co-authored-by: dhwz <3697946+dhwz@users.noreply.github.com>
Co-authored-by: dirkf <1222880+dirkf@users.noreply.github.com>
2025-10-14 19:31:07 +00:00
Sean Ellingham
5f94f05490 [ie/vidyard] Extract chapters (#14478)
Closes #14477
Authored by: exterrestris
2025-10-14 13:53:54 +02:00
columndeeply
5d7678195a [ie/PrankCastPost] Rework extractor (#14445)
Authored by: columndeeply
2025-10-14 13:25:07 +02:00
sepro
eafedc2181 [ie/10play] Rework extractor (#14417)
Closes #14276
Authored by: seproDev, Sipherdrakon

Co-authored-by: Sipherdrakon <64430430+Sipherdrakon@users.noreply.github.com>
2025-10-13 00:54:26 +02:00
Ceci
8eb8695139 [ie/dropout] Update extractor for new domain (#14531)
Closes #14521
Authored by: cecilia-sanare
2025-10-12 23:53:53 +02:00
uoag
df160ab18d [ie/cbc.ca:listen] Add extractor (#14391)
Authored by: uoag
2025-10-12 23:42:39 +02:00
sepro
6d41aaf21c [ie/soundcloud] Support new API URLs (#14449)
Closes #14443
Authored by: seproDev
2025-10-12 22:21:34 +02:00
sepro
a6673a8e82 Fix prefer-vp9-sort compat option (#14603)
Closes #14602
Authored by: seproDev
2025-10-12 20:30:17 +02:00
sepro
87be1bb96a [ie/musescore] Fix extractor (#14598)
Closes #14485
Authored by: seproDev
2025-10-12 08:49:15 +02:00
coletdjnz
ccc25d6710 [ie/youtube:tab] Fix approximate timestamp extraction for feeds (#14539)
Authored by: coletdjnz
2025-10-12 08:29:06 +13:00
Vu Thanh Tai
5513036104 [ie/tiktok] Support browser impersonation (#14473)
Closes #10919, Closes #12574
Authored by: thanhtaivtt, bashonly

Co-authored-by: bashonly <88596187+bashonly@users.noreply.github.com>
2025-10-01 06:53:19 +00:00
coletdjnz
bd5ed90419 [ie/youtube] Detect experiment binding GVS PO Token to video id (#14471)
Fixes https://github.com/yt-dlp/yt-dlp/issues/14421

Authored by: coletdjnz
2025-09-29 16:25:09 +13:00
github-actions[bot]
88e2a2de8e Release 2025.09.26
Created by: bashonly

:ci skip all
2025-09-26 22:13:00 +00:00
bashonly
12b57d2858 [ie/youtube] Replace tv_simply with web_safari in default clients (#14465)
Closes #14456
Authored by: bashonly
2025-09-26 21:59:13 +00:00
sepro
b7b7910d96 [ie/youtube] Fix player JS overrides (#14430)
Authored by: seproDev, bashonly

Co-authored-by: bashonly <88596187+bashonly@users.noreply.github.com>
2025-09-26 21:19:57 +00:00
bashonly
50e452fd7d [ie/twitch:vod] Fix live_status detection (#14457)
Closes #14455
Authored by: bashonly
2025-09-26 18:27:17 +00:00
sepro
94c5622be9 [ie/youtube] Player client maintenance (#14448)
Authored by: seproDev
2025-09-26 18:13:20 +00:00
sepro
7df5acc546 [ie/youtube] Improve PO token logging (#14447)
Authored by: seproDev
2025-09-26 18:12:16 +00:00
77 changed files with 1326 additions and 556 deletions

2
.github/FUNDING.yml vendored
View File

@@ -10,4 +10,4 @@ liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username otechie: # Replace with a single Otechie username
custom: ['https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators'] custom: ['https://github.com/yt-dlp/yt-dlp/blob/master/Maintainers.md#maintainers']

View File

@@ -194,7 +194,7 @@ jobs:
UPDATE_TO: yt-dlp/yt-dlp@2025.09.05 UPDATE_TO: yt-dlp/yt-dlp@2025.09.05
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
fetch-depth: 0 # Needed for changelog fetch-depth: 0 # Needed for changelog
@@ -255,7 +255,7 @@ jobs:
SKIP_ONEFILE_BUILD: ${{ (!matrix.onefile && '1') || '' }} SKIP_ONEFILE_BUILD: ${{ (!matrix.onefile && '1') || '' }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: Cache requirements - name: Cache requirements
if: matrix.cache_requirements if: matrix.cache_requirements
@@ -318,7 +318,7 @@ jobs:
UPDATE_TO: yt-dlp/yt-dlp@2025.09.05 UPDATE_TO: yt-dlp/yt-dlp@2025.09.05
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
# NB: Building universal2 does not work with python from actions/setup-python # NB: Building universal2 does not work with python from actions/setup-python
- name: Cache requirements - name: Cache requirements
@@ -448,7 +448,7 @@ jobs:
PYI_WHEEL: pyinstaller-${{ matrix.pyi_version }}-py3-none-${{ matrix.platform_tag }}.whl PYI_WHEEL: pyinstaller-${{ matrix.pyi_version }}-py3-none-${{ matrix.platform_tag }}.whl
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- uses: actions/setup-python@v6 - uses: actions/setup-python@v6
with: with:
python-version: ${{ matrix.python_version }} python-version: ${{ matrix.python_version }}
@@ -536,7 +536,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Download artifacts - name: Download artifacts
uses: actions/download-artifact@v4 uses: actions/download-artifact@v5
with: with:
path: artifact path: artifact
pattern: build-bin-* pattern: build-bin-*
@@ -558,35 +558,39 @@ jobs:
cat >> _update_spec << EOF cat >> _update_spec << EOF
# This file is used for regulating self-update # This file is used for regulating self-update
lock 2022.08.18.36 .+ Python 3\.6 lock 2022.08.18.36 .+ Python 3\.6
lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7 lock 2023.11.16 zip Python 3\.7
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server) lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
lock 2024.10.22 py2exe .+ lock 2024.10.22 py2exe .+
lock 2024.10.22 zip Python 3\.8 lock 2024.10.22 zip Python 3\.8
lock 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2) lock 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
lock 2025.08.11 darwin_legacy_exe .+ lock 2025.08.11 darwin_legacy_exe .+
lock 2025.08.27 linux_armv7l_exe .+ lock 2025.08.27 linux_armv7l_exe .+
lock 2025.10.14 zip Python 3\.9
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6 lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6
lockV2 yt-dlp/yt-dlp 2023.11.16 (?!win_x86_exe).+ Python 3\.7 lockV2 yt-dlp/yt-dlp 2023.11.16 zip Python 3\.7
lockV2 yt-dlp/yt-dlp 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server) lockV2 yt-dlp/yt-dlp 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
lockV2 yt-dlp/yt-dlp 2024.10.22 py2exe .+ lockV2 yt-dlp/yt-dlp 2024.10.22 py2exe .+
lockV2 yt-dlp/yt-dlp 2024.10.22 zip Python 3\.8 lockV2 yt-dlp/yt-dlp 2024.10.22 zip Python 3\.8
lockV2 yt-dlp/yt-dlp 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2) lockV2 yt-dlp/yt-dlp 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
lockV2 yt-dlp/yt-dlp 2025.08.11 darwin_legacy_exe .+ lockV2 yt-dlp/yt-dlp 2025.08.11 darwin_legacy_exe .+
lockV2 yt-dlp/yt-dlp 2025.08.27 linux_armv7l_exe .+ lockV2 yt-dlp/yt-dlp 2025.08.27 linux_armv7l_exe .+
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 (?!win_x86_exe).+ Python 3\.7 lockV2 yt-dlp/yt-dlp 2025.10.14 zip Python 3\.9
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 zip Python 3\.7
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 win_x86_exe .+ Windows-(?:Vista|2008Server) lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 win_x86_exe .+ Windows-(?:Vista|2008Server)
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 py2exe .+ lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 py2exe .+
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 zip Python 3\.8 lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 zip Python 3\.8
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2) lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
lockV2 yt-dlp/yt-dlp-nightly-builds 2025.08.12.233030 darwin_legacy_exe .+ lockV2 yt-dlp/yt-dlp-nightly-builds 2025.08.12.233030 darwin_legacy_exe .+
lockV2 yt-dlp/yt-dlp-nightly-builds 2025.08.30.232839 linux_armv7l_exe .+ lockV2 yt-dlp/yt-dlp-nightly-builds 2025.08.30.232839 linux_armv7l_exe .+
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 (?!win_x86_exe).+ Python 3\.7 lockV2 yt-dlp/yt-dlp-nightly-builds 2025.10.14.232845 zip Python 3\.9
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 zip Python 3\.7
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 win_x86_exe .+ Windows-(?:Vista|2008Server) lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 win_x86_exe .+ Windows-(?:Vista|2008Server)
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.045052 py2exe .+ lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.045052 py2exe .+
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 zip Python 3\.8 lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 zip Python 3\.8
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2) lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
lockV2 yt-dlp/yt-dlp-master-builds 2025.08.12.232447 darwin_legacy_exe .+ lockV2 yt-dlp/yt-dlp-master-builds 2025.08.12.232447 darwin_legacy_exe .+
lockV2 yt-dlp/yt-dlp-master-builds 2025.09.05.212910 linux_armv7l_exe .+ lockV2 yt-dlp/yt-dlp-master-builds 2025.09.05.212910 linux_armv7l_exe .+
lockV2 yt-dlp/yt-dlp-master-builds 2025.10.14.232330 zip Python 3\.9
EOF EOF
- name: Sign checksum files - name: Sign checksum files

View File

@@ -29,7 +29,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v5
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL

View File

@@ -36,12 +36,10 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-latest] os: [ubuntu-latest]
# CPython 3.9 is in quick-test # CPython 3.10 is in quick-test
python-version: ['3.10', '3.11', '3.12', '3.13', '3.14-dev', pypy-3.11] python-version: ['3.11', '3.12', '3.13', '3.14', pypy-3.11]
include: include:
# atleast one of each CPython/PyPy tests must be in windows # atleast one of each CPython/PyPy tests must be in windows
- os: windows-latest
python-version: '3.9'
- os: windows-latest - os: windows-latest
python-version: '3.10' python-version: '3.10'
- os: windows-latest - os: windows-latest
@@ -51,11 +49,11 @@ jobs:
- os: windows-latest - os: windows-latest
python-version: '3.13' python-version: '3.13'
- os: windows-latest - os: windows-latest
python-version: '3.14-dev' python-version: '3.14'
- os: windows-latest - os: windows-latest
python-version: pypy-3.11 python-version: pypy-3.11
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6 uses: actions/setup-python@v6
with: with:

View File

@@ -9,11 +9,11 @@ jobs:
if: "contains(github.event.head_commit.message, 'ci run dl')" if: "contains(github.event.head_commit.message, 'ci run dl')"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v6 uses: actions/setup-python@v6
with: with:
python-version: 3.9 python-version: '3.10'
- name: Install test requirements - name: Install test requirements
run: python3 ./devscripts/install_deps.py --include dev run: python3 ./devscripts/install_deps.py --include dev
- name: Run tests - name: Run tests
@@ -28,15 +28,15 @@ jobs:
fail-fast: true fail-fast: true
matrix: matrix:
os: [ubuntu-latest] os: [ubuntu-latest]
python-version: ['3.10', '3.11', '3.12', '3.13', '3.14-dev', pypy-3.11] python-version: ['3.11', '3.12', '3.13', '3.14', pypy-3.11]
include: include:
# atleast one of each CPython/PyPy tests must be in windows # atleast one of each CPython/PyPy tests must be in windows
- os: windows-latest - os: windows-latest
python-version: '3.9' python-version: '3.10'
- os: windows-latest - os: windows-latest
python-version: pypy-3.11 python-version: pypy-3.11
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6 uses: actions/setup-python@v6
with: with:

View File

@@ -9,11 +9,11 @@ jobs:
if: "!contains(github.event.head_commit.message, 'ci skip all')" if: "!contains(github.event.head_commit.message, 'ci skip all')"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: Set up Python 3.9 - name: Set up Python 3.10
uses: actions/setup-python@v6 uses: actions/setup-python@v6
with: with:
python-version: '3.9' python-version: '3.10'
- name: Install test requirements - name: Install test requirements
run: python3 ./devscripts/install_deps.py -o --include test run: python3 ./devscripts/install_deps.py -o --include test
- name: Run tests - name: Run tests
@@ -26,10 +26,10 @@ jobs:
if: "!contains(github.event.head_commit.message, 'ci skip all')" if: "!contains(github.event.head_commit.message, 'ci skip all')"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- uses: actions/setup-python@v6 - uses: actions/setup-python@v6
with: with:
python-version: '3.9' python-version: '3.10'
- name: Install dev dependencies - name: Install dev dependencies
run: python3 ./devscripts/install_deps.py -o --include static-analysis run: python3 ./devscripts/install_deps.py -o --include static-analysis
- name: Make lazy extractors - name: Make lazy extractors

View File

@@ -38,7 +38,7 @@ jobs:
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
steps: steps:
- name: Download artifacts - name: Download artifacts
uses: actions/download-artifact@v4 uses: actions/download-artifact@v5
with: with:
path: dist path: dist
name: build-pypi name: build-pypi

View File

@@ -12,7 +12,7 @@ jobs:
outputs: outputs:
commit: ${{ steps.check_for_new_commits.outputs.commit }} commit: ${{ steps.check_for_new_commits.outputs.commit }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Check for new commits - name: Check for new commits
@@ -53,7 +53,7 @@ jobs:
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
steps: steps:
- name: Download artifacts - name: Download artifacts
uses: actions/download-artifact@v4 uses: actions/download-artifact@v5
with: with:
path: dist path: dist
name: build-pypi name: build-pypi

View File

@@ -75,7 +75,7 @@ jobs:
head_sha: ${{ steps.get_target.outputs.head_sha }} head_sha: ${{ steps.get_target.outputs.head_sha }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -170,7 +170,7 @@ jobs:
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@v6 - uses: actions/setup-python@v6
@@ -233,10 +233,10 @@ jobs:
VERSION: ${{ needs.prepare.outputs.version }} VERSION: ${{ needs.prepare.outputs.version }}
HEAD_SHA: ${{ needs.prepare.outputs.head_sha }} HEAD_SHA: ${{ needs.prepare.outputs.head_sha }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/download-artifact@v4 - uses: actions/download-artifact@v5
with: with:
path: artifact path: artifact
pattern: build-* pattern: build-*
@@ -259,7 +259,7 @@ jobs:
"[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)]" \ "[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)]" \
"(https://discord.gg/H5MNcFW63r \"Discord\") " \ "(https://discord.gg/H5MNcFW63r \"Discord\") " \
"[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)]" \ "[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)]" \
"(https://github.com/${BASE_REPO}/blob/master/Collaborators.md#collaborators \"Donate\") " \ "(https://github.com/${BASE_REPO}/blob/master/Maintainers.md#maintainers \"Donate\") " \
"[![Documentation](https://img.shields.io/badge/-Docs-brightgreen.svg?style=for-the-badge&logo=GitBook&labelColor=555555)]" \ "[![Documentation](https://img.shields.io/badge/-Docs-brightgreen.svg?style=for-the-badge&logo=GitBook&labelColor=555555)]" \
"(https://github.com/${REPOSITORY}${DOCS_PATH}#readme \"Documentation\") " > ./RELEASE_NOTES "(https://github.com/${REPOSITORY}${DOCS_PATH}#readme \"Documentation\") " > ./RELEASE_NOTES
if [[ "${TARGET_REPO}" == "${BASE_REPO}" ]]; then if [[ "${TARGET_REPO}" == "${BASE_REPO}" ]]; then

View File

@@ -25,9 +25,9 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-latest, windows-latest] os: [ubuntu-latest, windows-latest]
python-version: ['3.9', '3.10', '3.11', '3.12', '3.13', '3.14-dev', pypy-3.11] python-version: ['3.10', '3.11', '3.12', '3.13', '3.14', pypy-3.11]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6 uses: actions/setup-python@v6
with: with:

View File

@@ -26,7 +26,7 @@ jobs:
name: Check workflows name: Check workflows
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- uses: actions/setup-python@v6 - uses: actions/setup-python@v6
with: with:
python-version: "3.10" # Keep this in sync with release.yml's prepare job python-version: "3.10" # Keep this in sync with release.yml's prepare job

View File

@@ -284,7 +284,7 @@ After you have ensured this site is distributing its content legally, you can fo
You can use `hatch fmt` to automatically fix problems. Rules that the linter/formatter enforces should not be disabled with `# noqa` unless a maintainer requests it. The only exception allowed is for old/printf-style string formatting in GraphQL query templates (use `# noqa: UP031`). You can use `hatch fmt` to automatically fix problems. Rules that the linter/formatter enforces should not be disabled with `# noqa` unless a maintainer requests it. The only exception allowed is for old/printf-style string formatting in GraphQL query templates (use `# noqa: UP031`).
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython >=3.9 and PyPy >=3.11. Backward compatibility is not required for even older versions of Python. 1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython >=3.10 and PyPy >=3.11. Backward compatibility is not required for even older versions of Python.
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this: 1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
```shell ```shell

View File

@@ -1,10 +1,10 @@
pukkandan (owner) pukkandan (owner)
shirt-dev (collaborator) shirt-dev (maintainer)
coletdjnz/colethedj (collaborator) coletdjnz (maintainer)
Ashish0804 (collaborator) Ashish0804 (maintainer)
bashonly (collaborator) bashonly (maintainer)
Grub4K (collaborator) Grub4K (maintainer)
seproDev (collaborator) seproDev (maintainer)
h-h-h-h h-h-h-h
pauldubois98 pauldubois98
nixxo nixxo
@@ -811,3 +811,10 @@ zakaryan2004
cdce8p cdce8p
nicolaasjan nicolaasjan
willsmillie willsmillie
CasualYT31
cecilia-sanare
dhwz
robin-mu
shssoichiro
thanhtaivtt
uoag

View File

@@ -4,6 +4,66 @@
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master # To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
--> -->
### 2025.10.22
#### Important changes
- **A stopgap release with a *TEMPORARY partial* fix for YouTube support**
Some formats may still be unavailable, especially if cookies are passed to yt-dlp. The ***NEXT*** release, expected very soon, **will require an external JS runtime (e.g. Deno)** in order for YouTube downloads to work properly. [Read more](https://github.com/yt-dlp/yt-dlp/issues/14404)
- **The minimum *required* Python version has been raised to 3.10**
Python 3.9 has reached its end-of-life as of October 2025, and yt-dlp has now removed support for it. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13858)
#### Core changes
- [Remove Python 3.9 support](https://github.com/yt-dlp/yt-dlp/commit/4e6a693057cfaf1ce1f07b019ed3bfce2bf936f6) ([#13861](https://github.com/yt-dlp/yt-dlp/issues/13861)) by [bashonly](https://github.com/bashonly)
#### Extractor changes
- **appleconnect**: [Rework extractor](https://github.com/yt-dlp/yt-dlp/commit/78748b506f0dca8236ac0045ed7f72f7cf334b62) ([#13229](https://github.com/yt-dlp/yt-dlp/issues/13229)) by [doe1080](https://github.com/doe1080)
- **idagio**: [Support URLs with country codes](https://github.com/yt-dlp/yt-dlp/commit/c9356f308dd3c5f9f494cb40ed14c5df017b4fe0) ([#14655](https://github.com/yt-dlp/yt-dlp/issues/14655)) by [robin-mu](https://github.com/robin-mu)
- **tvnoe**: [Rework Extractor](https://github.com/yt-dlp/yt-dlp/commit/fe5ae54a7b08ebe679f03afdeafbe1cee5784d5b) ([#13369](https://github.com/yt-dlp/yt-dlp/issues/13369)) by [doe1080](https://github.com/doe1080)
- **youtube**: [Use temporary player client workaround](https://github.com/yt-dlp/yt-dlp/commit/2c9091e355a7ba5d1edb69796ecdca48199b77fb) ([#14693](https://github.com/yt-dlp/yt-dlp/issues/14693)) by [gamer191](https://github.com/gamer191)
#### Misc. changes
- **cleanup**
- Miscellaneous
- [c7bda21](https://github.com/yt-dlp/yt-dlp/commit/c7bda2192aa24afce40fdbbbe056d269aa3b2872) by [bashonly](https://github.com/bashonly), [seproDev](https://github.com/seproDev)
- [de7b3c0](https://github.com/yt-dlp/yt-dlp/commit/de7b3c0705022cb777c5b4b7f0c69c59ad6ff538) by [bashonly](https://github.com/bashonly)
- **docs**: [Update list of maintainers](https://github.com/yt-dlp/yt-dlp/commit/dfc0a84c192a7357dd1768cc345d590253a14fe5) ([#14148](https://github.com/yt-dlp/yt-dlp/issues/14148)) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [seproDev](https://github.com/seproDev)
### 2025.10.14
#### Core changes
- [Fix `prefer-vp9-sort` compat option](https://github.com/yt-dlp/yt-dlp/commit/a6673a8e82276ea529c1773ed09e5bc4a22e822a) ([#14603](https://github.com/yt-dlp/yt-dlp/issues/14603)) by [seproDev](https://github.com/seproDev)
#### Extractor changes
- **10play**
- [Handle geo-restriction errors](https://github.com/yt-dlp/yt-dlp/commit/ad55bfcfb700fbfc1364c04e3425761d6f95c0a7) ([#14618](https://github.com/yt-dlp/yt-dlp/issues/14618)) by [bashonly](https://github.com/bashonly)
- [Rework extractor](https://github.com/yt-dlp/yt-dlp/commit/eafedc21817bb0de20e9aaccd7151a1d4c4e1ebd) ([#14417](https://github.com/yt-dlp/yt-dlp/issues/14417)) by [seproDev](https://github.com/seproDev), [Sipherdrakon](https://github.com/Sipherdrakon)
- **abc.net.au**: [Support listen URLs](https://github.com/yt-dlp/yt-dlp/commit/0ea5d5882def84415f946907cfc00ab431c18fed) ([#14389](https://github.com/yt-dlp/yt-dlp/issues/14389)) by [uoag](https://github.com/uoag)
- **cbc.ca**: listen: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/df160ab18db523f6629f2e7e20123d7a3551df28) ([#14391](https://github.com/yt-dlp/yt-dlp/issues/14391)) by [uoag](https://github.com/uoag)
- **dropout**: [Update extractor for new domain](https://github.com/yt-dlp/yt-dlp/commit/8eb8695139dece6351aac10463df63b87b45b000) ([#14531](https://github.com/yt-dlp/yt-dlp/issues/14531)) by [cecilia-sanare](https://github.com/cecilia-sanare)
- **idagio**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/a98e7f9f58a9492d2cb216baa59c890ed8ce02f3) ([#14586](https://github.com/yt-dlp/yt-dlp/issues/14586)) by [robin-mu](https://github.com/robin-mu)
- **musescore**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/87be1bb96ac47abaaa4cfc6d7dd651e511b74551) ([#14598](https://github.com/yt-dlp/yt-dlp/issues/14598)) by [seproDev](https://github.com/seproDev)
- **prankcastpost**: [Rework extractor](https://github.com/yt-dlp/yt-dlp/commit/5d7678195a7d0c045a9fe0418383171a71a7ea43) ([#14445](https://github.com/yt-dlp/yt-dlp/issues/14445)) by [columndeeply](https://github.com/columndeeply)
- **slideslive**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/c2e124881f9aa02097589e853b3d3505e78372c4) ([#14619](https://github.com/yt-dlp/yt-dlp/issues/14619)) by [bashonly](https://github.com/bashonly)
- **soundcloud**: [Support new API URLs](https://github.com/yt-dlp/yt-dlp/commit/6d41aaf21c61a87e74564646abd0a8ee887e888d) ([#14449](https://github.com/yt-dlp/yt-dlp/issues/14449)) by [seproDev](https://github.com/seproDev)
- **tiktok**
- [Support browser impersonation](https://github.com/yt-dlp/yt-dlp/commit/5513036104ed9710f624c537fb3644b07a0680db) ([#14473](https://github.com/yt-dlp/yt-dlp/issues/14473)) by [bashonly](https://github.com/bashonly), [thanhtaivtt](https://github.com/thanhtaivtt)
- user: [Fix private account extraction](https://github.com/yt-dlp/yt-dlp/commit/cdc533b114c35ceb8a2e9dd3eb9c172a8737ae5e) ([#14585](https://github.com/yt-dlp/yt-dlp/issues/14585)) by [CasualYT31](https://github.com/CasualYT31)
- **vidyard**: [Extract chapters](https://github.com/yt-dlp/yt-dlp/commit/5f94f054907c12e68129cd9ac2508ed8aba1b223) ([#14478](https://github.com/yt-dlp/yt-dlp/issues/14478)) by [exterrestris](https://github.com/exterrestris)
- **xhamster**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/739125d40f8ede3beb7be68fc4df55bec0d226fd) ([#14446](https://github.com/yt-dlp/yt-dlp/issues/14446)) by [dhwz](https://github.com/dhwz), [dirkf](https://github.com/dirkf), [shssoichiro](https://github.com/shssoichiro)
- **youtube**
- [Detect experiment binding GVS PO Token to video id](https://github.com/yt-dlp/yt-dlp/commit/bd5ed90419eea18adfb2f0d8efa9d22b2029119f) ([#14471](https://github.com/yt-dlp/yt-dlp/issues/14471)) by [coletdjnz](https://github.com/coletdjnz)
- tab: [Fix approximate timestamp extraction for feeds](https://github.com/yt-dlp/yt-dlp/commit/ccc25d6710a4aa373b7e15c558e07f8a2ffae5f3) ([#14539](https://github.com/yt-dlp/yt-dlp/issues/14539)) by [coletdjnz](https://github.com/coletdjnz)
### 2025.09.26
#### Extractor changes
- **twitch**: vod: [Fix `live_status` detection](https://github.com/yt-dlp/yt-dlp/commit/50e452fd7dfb8a648bd3b9aaabc8f94f37ce2051) ([#14457](https://github.com/yt-dlp/yt-dlp/issues/14457)) by [bashonly](https://github.com/bashonly)
- **youtube**
- [Fix player JS overrides](https://github.com/yt-dlp/yt-dlp/commit/b7b7910d96359a539b7997890342ab4a59dd685d) ([#14430](https://github.com/yt-dlp/yt-dlp/issues/14430)) by [bashonly](https://github.com/bashonly), [seproDev](https://github.com/seproDev)
- [Improve PO token logging](https://github.com/yt-dlp/yt-dlp/commit/7df5acc546dccd32213c3a125d721e32b06d71b0) ([#14447](https://github.com/yt-dlp/yt-dlp/issues/14447)) by [seproDev](https://github.com/seproDev)
- [Player client maintenance](https://github.com/yt-dlp/yt-dlp/commit/94c5622be96474ca3c637e52898c4daee4d8fb69) ([#14448](https://github.com/yt-dlp/yt-dlp/issues/14448)) by [seproDev](https://github.com/seproDev)
- [Replace `tv_simply` with `web_safari` in default clients](https://github.com/yt-dlp/yt-dlp/commit/12b57d2858845c0c7fb33bf9aa8ed7be6905535d) ([#14465](https://github.com/yt-dlp/yt-dlp/issues/14465)) by [bashonly](https://github.com/bashonly)
### 2025.09.23 ### 2025.09.23
#### Important changes #### Important changes

View File

@@ -1,59 +1,34 @@
# Collaborators # Maintainers
This is a list of the collaborators of the project and their major contributions. See the [Changelog](Changelog.md) for more details. This file lists the maintainers of yt-dlp and their major contributions. See the [Changelog](Changelog.md) for more details.
You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [authors of youtube-dl](https://github.com/ytdl-org/youtube-dl/blob/master/AUTHORS) You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [authors of youtube-dl](https://github.com/ytdl-org/youtube-dl/blob/master/AUTHORS)
## Core Maintainers
## [pukkandan](https://github.com/pukkandan) Core Maintainers are responsible for reviewing and merging contributions, publishing releases, and steering the overall direction of the project.
[![ko-fi](https://img.shields.io/badge/_-Ko--fi-red.svg?logo=kofi&labelColor=555555&style=for-the-badge)](https://ko-fi.com/pukkandan) **You can contact the core maintainers via `maintainers@yt-dlp.org`.**
[![gh-sponsor](https://img.shields.io/badge/_-Github-white.svg?logo=github&labelColor=555555&style=for-the-badge)](https://github.com/sponsors/pukkandan)
* Owner of the fork ### [coletdjnz](https://github.com/coletdjnz)
## [shirt](https://github.com/shirt-dev)
[![ko-fi](https://img.shields.io/badge/_-Ko--fi-red.svg?logo=kofi&labelColor=555555&style=for-the-badge)](https://ko-fi.com/shirt)
* Multithreading (`-N`) and aria2c support for fragment downloads
* Support for media initialization and discontinuity in HLS
* The self-updater (`-U`)
## [coletdjnz](https://github.com/coletdjnz)
[![gh-sponsor](https://img.shields.io/badge/_-Github-white.svg?logo=github&labelColor=555555&style=for-the-badge)](https://github.com/sponsors/coletdjnz) [![gh-sponsor](https://img.shields.io/badge/_-Github-white.svg?logo=github&labelColor=555555&style=for-the-badge)](https://github.com/sponsors/coletdjnz)
* Improved plugin architecture * Overhauled the networking stack and implemented support for `requests` and `curl_cffi` (`--impersonate`) HTTP clients
* Rewrote the networking infrastructure, implemented support for `requests` * Reworked the plugin architecture to support installing plugins across all yt-dlp distributions (exe, pip, etc.)
* YouTube improvements including: age-gate bypass, private playlists, multiple-clients (to avoid throttling) and a lot of under-the-hood improvements * Maintains support for YouTube
* Added support for new websites YoutubeWebArchive, MainStreaming, PRX, nzherald, Mediaklikk, StarTV etc * Added and fixed support for various other sites
* Improved/fixed support for Patreon, panopto, gfycat, itv, pbs, SouthParkDE etc
### [bashonly](https://github.com/bashonly)
* Rewrote and maintains the build/release workflows and the self-updater: executables, automated/nightly/master releases, `--update-to`
* Overhauled external downloader cookie handling
* Added `--cookies-from-browser` support for Firefox containers
* Overhauled and maintains support for sites like Youtube, Vimeo, Twitter, TikTok, etc
* Added support for sites like Dacast, Kick, Loom, SproutVideo, Triller, Weverse, etc
### [Grub4K](https://github.com/Grub4K)
## [Ashish0804](https://github.com/Ashish0804) <sub><sup>[Inactive]</sup></sub>
[![ko-fi](https://img.shields.io/badge/_-Ko--fi-red.svg?logo=kofi&labelColor=555555&style=for-the-badge)](https://ko-fi.com/ashish0804)
* Added support for new websites BiliIntl, DiscoveryPlusIndia, OlympicsReplay, PlanetMarathi, ShemarooMe, Utreon, Zee5 etc
* Added playlist/series downloads for Hotstar, ParamountPlus, Rumble, SonyLIV, Trovo, TubiTv, Voot etc
* Improved/fixed support for HiDive, HotStar, Hungama, LBRY, LinkedInLearning, Mxplayer, SonyLiv, TV2, Vimeo, VLive etc
## [bashonly](https://github.com/bashonly)
* `--update-to`, self-updater rewrite, automated/nightly/master releases
* `--cookies-from-browser` support for Firefox containers, external downloader cookie handling overhaul
* Added support for new websites like Dacast, Kick, NBCStations, Triller, VideoKen, Weverse, WrestleUniverse etc
* Improved/fixed support for Anvato, Brightcove, Reddit, SlidesLive, TikTok, Twitter, Vimeo etc
## [Grub4K](https://github.com/Grub4K)
[![gh-sponsor](https://img.shields.io/badge/_-Github-white.svg?logo=github&labelColor=555555&style=for-the-badge)](https://github.com/sponsors/Grub4K) [![ko-fi](https://img.shields.io/badge/_-Ko--fi-red.svg?logo=kofi&labelColor=555555&style=for-the-badge)](https://ko-fi.com/Grub4K) [![gh-sponsor](https://img.shields.io/badge/_-Github-white.svg?logo=github&labelColor=555555&style=for-the-badge)](https://github.com/sponsors/Grub4K) [![ko-fi](https://img.shields.io/badge/_-Ko--fi-red.svg?logo=kofi&labelColor=555555&style=for-the-badge)](https://ko-fi.com/Grub4K)
@@ -63,8 +38,48 @@ You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [autho
* Improved/fixed/added Bundestag, crunchyroll, pr0gramm, Twitter, WrestleUniverse etc * Improved/fixed/added Bundestag, crunchyroll, pr0gramm, Twitter, WrestleUniverse etc
## [sepro](https://github.com/seproDev) ### [sepro](https://github.com/seproDev)
* UX improvements: Warn when ffmpeg is missing, warn when double-clicking exe * UX improvements: Warn when ffmpeg is missing, warn when double-clicking exe
* Code cleanup: Remove dead extractors, mark extractors as broken, enable/apply ruff rules * Code cleanup: Remove dead extractors, mark extractors as broken, enable/apply ruff rules
* Improved/fixed/added ArdMediathek, DRTV, Floatplane, MagentaMusik, Naver, Nebula, OnDemandKorea, Vbox7 etc * Improved/fixed/added ArdMediathek, DRTV, Floatplane, MagentaMusik, Naver, Nebula, OnDemandKorea, Vbox7 etc
## Inactive Core Maintainers
### [pukkandan](https://github.com/pukkandan)
[![ko-fi](https://img.shields.io/badge/_-Ko--fi-red.svg?logo=kofi&labelColor=555555&style=for-the-badge)](https://ko-fi.com/pukkandan)
[![gh-sponsor](https://img.shields.io/badge/_-Github-white.svg?logo=github&labelColor=555555&style=for-the-badge)](https://github.com/sponsors/pukkandan)
* Founder of the fork
* Lead Maintainer from 2021-2024
### [shirt](https://github.com/shirt-dev)
[![ko-fi](https://img.shields.io/badge/_-Ko--fi-red.svg?logo=kofi&labelColor=555555&style=for-the-badge)](https://ko-fi.com/shirt)
* Multithreading (`-N`) and aria2c support for fragment downloads
* Support for media initialization and discontinuity in HLS
* The self-updater (`-U`)
### [Ashish0804](https://github.com/Ashish0804)
[![ko-fi](https://img.shields.io/badge/_-Ko--fi-red.svg?logo=kofi&labelColor=555555&style=for-the-badge)](https://ko-fi.com/ashish0804)
* Added support for new websites BiliIntl, DiscoveryPlusIndia, OlympicsReplay, PlanetMarathi, ShemarooMe, Utreon, Zee5 etc
* Added playlist/series downloads for Hotstar, ParamountPlus, Rumble, SonyLIV, Trovo, TubiTv, Voot etc
* Improved/fixed support for HiDive, HotStar, Hungama, LBRY, LinkedInLearning, Mxplayer, SonyLiv, TV2, Vimeo, VLive etc
## Triage Maintainers
Triage Maintainers are frequent contributors who can manage issues and pull requests.
- [gamer191](https://github.com/gamer191)
- [garret1317](https://github.com/garret1317)
- [pzhlkj6612](https://github.com/pzhlkj6612)
- [DTrombett](https://github.com/dtrombett)
- [doe1080](https://github.com/doe1080)
- [grqz](https://github.com/grqz)

View File

@@ -157,7 +157,7 @@ yt-dlp.tar.gz: all
--exclude '.git' \ --exclude '.git' \
-- \ -- \
README.md supportedsites.md Changelog.md LICENSE \ README.md supportedsites.md Changelog.md LICENSE \
CONTRIBUTING.md Collaborators.md CONTRIBUTORS AUTHORS \ CONTRIBUTING.md Maintainers.md CONTRIBUTORS AUTHORS \
Makefile yt-dlp.1 README.txt completions .gitignore \ Makefile yt-dlp.1 README.txt completions .gitignore \
yt-dlp yt_dlp pyproject.toml devscripts test yt-dlp yt_dlp pyproject.toml devscripts test

View File

@@ -5,7 +5,7 @@
[![Release version](https://img.shields.io/github/v/release/yt-dlp/yt-dlp?color=brightgreen&label=Download&style=for-the-badge)](#installation "Installation") [![Release version](https://img.shields.io/github/v/release/yt-dlp/yt-dlp?color=brightgreen&label=Download&style=for-the-badge)](#installation "Installation")
[![PyPI](https://img.shields.io/badge/-PyPI-blue.svg?logo=pypi&labelColor=555555&style=for-the-badge)](https://pypi.org/project/yt-dlp "PyPI") [![PyPI](https://img.shields.io/badge/-PyPI-blue.svg?logo=pypi&labelColor=555555&style=for-the-badge)](https://pypi.org/project/yt-dlp "PyPI")
[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)](Collaborators.md#collaborators "Donate") [![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)](Maintainers.md#maintainers "Donate")
[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)](https://discord.gg/H5MNcFW63r "Discord") [![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)](https://discord.gg/H5MNcFW63r "Discord")
[![Supported Sites](https://img.shields.io/badge/-Supported_Sites-brightgreen.svg?style=for-the-badge)](supportedsites.md "Supported Sites") [![Supported Sites](https://img.shields.io/badge/-Supported_Sites-brightgreen.svg?style=for-the-badge)](supportedsites.md "Supported Sites")
[![License: Unlicense](https://img.shields.io/badge/-Unlicense-blue.svg?style=for-the-badge)](LICENSE "License") [![License: Unlicense](https://img.shields.io/badge/-Unlicense-blue.svg?style=for-the-badge)](LICENSE "License")
@@ -194,7 +194,7 @@ When running a yt-dlp version that is older than 90 days, you will see a warning
You can suppress this warning by adding `--no-update` to your command or configuration file. You can suppress this warning by adding `--no-update` to your command or configuration file.
## DEPENDENCIES ## DEPENDENCIES
Python versions 3.9+ (CPython) and 3.11+ (PyPy) are supported. Other versions and implementations may or may not work correctly. Python versions 3.10+ (CPython) and 3.11+ (PyPy) are supported. Other versions and implementations may or may not work correctly.
<!-- Python 3.5+ uses VC++14 and it is already embedded in the binary created <!-- Python 3.5+ uses VC++14 and it is already embedded in the binary created
<!x-- https://www.microsoft.com/en-us/download/details.aspx?id=26999 --x> <!x-- https://www.microsoft.com/en-us/download/details.aspx?id=26999 --x>
@@ -273,7 +273,7 @@ On some systems, you may need to use `py` or `python` instead of `python3`.
**Important**: Running `pyinstaller` directly **instead of** using `python -m bundle.pyinstaller` is **not** officially supported. This may or may not work correctly. **Important**: Running `pyinstaller` directly **instead of** using `python -m bundle.pyinstaller` is **not** officially supported. This may or may not work correctly.
### Platform-independent Binary (UNIX) ### Platform-independent Binary (UNIX)
You will need the build tools `python` (3.9+), `zip`, `make` (GNU), `pandoc`\* and `pytest`\*. You will need the build tools `python` (3.10+), `zip`, `make` (GNU), `pandoc`\* and `pytest`\*.
After installing these, simply run `make`. After installing these, simply run `make`.
@@ -1814,12 +1814,12 @@ The following extractors use this feature:
#### youtube #### youtube
* `lang`: Prefer translated metadata (`title`, `description` etc) of this language code (case-sensitive). By default, the video primary language metadata is preferred, with a fallback to `en` translated. See [youtube/_base.py](https://github.com/yt-dlp/yt-dlp/blob/415b4c9f955b1a0391204bd24a7132590e7b3bdb/yt_dlp/extractor/youtube/_base.py#L402-L409) for the list of supported content language codes * `lang`: Prefer translated metadata (`title`, `description` etc) of this language code (case-sensitive). By default, the video primary language metadata is preferred, with a fallback to `en` translated. See [youtube/_base.py](https://github.com/yt-dlp/yt-dlp/blob/415b4c9f955b1a0391204bd24a7132590e7b3bdb/yt_dlp/extractor/youtube/_base.py#L402-L409) for the list of supported content language codes
* `skip`: One or more of `hls`, `dash` or `translated_subs` to skip extraction of the m3u8 manifests, dash manifests and [auto-translated subtitles](https://github.com/yt-dlp/yt-dlp/issues/4090#issuecomment-1158102032) respectively * `skip`: One or more of `hls`, `dash` or `translated_subs` to skip extraction of the m3u8 manifests, dash manifests and [auto-translated subtitles](https://github.com/yt-dlp/yt-dlp/issues/4090#issuecomment-1158102032) respectively
* `player_client`: Clients to extract video data from. The currently available clients are `web`, `web_safari`, `web_embedded`, `web_music`, `web_creator`, `mweb`, `ios`, `android`, `android_vr`, `tv`, `tv_simply` and `tv_embedded`. By default, `tv_simply,tv,web` is used, but `tv,web_safari,web` is used when authenticating with cookies and `tv,web_creator,web` is used with premium accounts. The `web_music` client is added for `music.youtube.com` URLs when logged-in cookies are used. The `web_embedded` client is added for age-restricted videos but only works if the video is embeddable. The `tv_embedded` and `web_creator` clients are added for age-restricted videos if account age-verification is required. Some clients, such as `web` and `web_music`, require a `po_token` for their formats to be downloadable. Some clients, such as `web_creator`, will only work with authentication. Not all clients support authentication via cookies. You can use `default` for the default clients, or you can use `all` for all clients (not recommended). You can prefix a client with `-` to exclude it, e.g. `youtube:player_client=default,-ios` * `player_client`: Clients to extract video data from. The currently available clients are `web`, `web_safari`, `web_embedded`, `web_music`, `web_creator`, `mweb`, `ios`, `android`, `android_sdkless`, `android_vr`, `tv`, `tv_simply` and `tv_embedded`. By default, `android_sdkless,tv,web_safari,web` is used. `android_sdkless` is omitted if cookies are passed. If premium cookies are passed, `tv,web_creator,web_safari,web` is used instead. The `web_music` client is added for `music.youtube.com` URLs when logged-in cookies are used. The `web_embedded` client is added for age-restricted videos but only works if the video is embeddable. The `tv_embedded` and `web_creator` clients are added for age-restricted videos if account age-verification is required. Some clients, such as `web` and `web_music`, require a `po_token` for their formats to be downloadable. Some clients, such as `web_creator`, will only work with authentication. Not all clients support authentication via cookies. You can use `default` for the default clients, or you can use `all` for all clients (not recommended). You can prefix a client with `-` to exclude it, e.g. `youtube:player_client=default,-ios`
* `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player), `initial_data` (skip initial data/next ep request). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause issues such as missing formats or metadata. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) and [#12826](https://github.com/yt-dlp/yt-dlp/issues/12826) for more details * `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player), `initial_data` (skip initial data/next ep request). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause issues such as missing formats or metadata. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) and [#12826](https://github.com/yt-dlp/yt-dlp/issues/12826) for more details
* `webpage_skip`: Skip extraction of embedded webpage data. One or both of `player_response`, `initial_data`. These options are for testing purposes and don't skip any network requests * `webpage_skip`: Skip extraction of embedded webpage data. One or both of `player_response`, `initial_data`. These options are for testing purposes and don't skip any network requests
* `player_params`: YouTube player parameters to use for player requests. Will overwrite any default ones set by yt-dlp. * `player_params`: YouTube player parameters to use for player requests. Will overwrite any default ones set by yt-dlp.
* `player_js_variant`: The player javascript variant to use for n/sig deciphering. The known variants are: `main`, `tcc`, `tce`, `es5`, `es6`, `tv`, `tv_es6`, `phone`, `tablet`. The default is `main`, and the others are for debugging purposes. You can use `actual` to go with what is prescribed by the site * `player_js_variant`: The player javascript variant to use for n/sig deciphering. The known variants are: `main`, `tcc`, `tce`, `es5`, `es6`, `tv`, `tv_es6`, `phone`, `tablet`. The default is `main`, and the others are for debugging purposes. You can use `actual` to go with what is prescribed by the site
* `player_js_version`: The player javascript version to use for n/sig deciphering, in the format of `signature_timestamp@hash`. Currently, the default is to force `20348@0004de42`. You can use `actual` to go with what is prescribed by the site * `player_js_version`: The player javascript version to use for n/sig deciphering, in the format of `signature_timestamp@hash` (e.g. `20348@0004de42`). The default is to use what is prescribed by the site, and can be selected with `actual`
* `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side) * `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side)
* `max_comments`: Limit the amount of comments to gather. Comma-separated list of integers representing `max-comments,max-parents,max-replies,max-replies-per-thread`. Default is `all,all,all,all` * `max_comments`: Limit the amount of comments to gather. Comma-separated list of integers representing `max-comments,max-parents,max-replies,max-replies-per-thread`. Default is `all,all,all,all`
* E.g. `all,all,1000,10` will get a maximum of 1000 replies total, with up to 10 replies per thread. `1000,all,100` will get a maximum of 1000 comments, with a maximum of 100 replies total * E.g. `all,all,1000,10` will get a maximum of 1000 replies total, with up to 10 replies per thread. `1000,all,100` will get a maximum of 1000 comments, with a maximum of 100 replies total
@@ -2255,7 +2255,7 @@ Features marked with a **\*** have been back-ported to youtube-dl
Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc: Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc:
* yt-dlp supports only [Python 3.9+](## "Windows 8"), and will remove support for more versions as they [become EOL](https://devguide.python.org/versions/#python-release-cycle); while [youtube-dl still supports Python 2.6+ and 3.2+](https://github.com/ytdl-org/youtube-dl/issues/30568#issue-1118238743) * yt-dlp supports only [Python 3.10+](## "Windows 8"), and will remove support for more versions as they [become EOL](https://devguide.python.org/versions/#python-release-cycle); while [youtube-dl still supports Python 2.6+ and 3.2+](https://github.com/ytdl-org/youtube-dl/issues/30568#issue-1118238743)
* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details * The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
* `avconv` is not supported as an alternative to `ffmpeg` * `avconv` is not supported as an alternative to `ffmpeg`
* yt-dlp stores config files in slightly different locations to youtube-dl. See [CONFIGURATION](#configuration) for a list of correct locations * yt-dlp stores config files in slightly different locations to youtube-dl. See [CONFIGURATION](#configuration) for a list of correct locations

View File

@@ -298,5 +298,15 @@
"action": "add", "action": "add",
"when": "08d78996831bd8e1e3c2592d740c3def00bbf548", "when": "08d78996831bd8e1e3c2592d740c3def00bbf548",
"short": "[priority] **Several options have been deprecated**\nIn order to simplify the codebase and reduce maintenance burden, various options have been deprecated. Please remove them from your commands/configurations. [Read more](https://github.com/yt-dlp/yt-dlp/issues/14198)" "short": "[priority] **Several options have been deprecated**\nIn order to simplify the codebase and reduce maintenance burden, various options have been deprecated. Please remove them from your commands/configurations. [Read more](https://github.com/yt-dlp/yt-dlp/issues/14198)"
},
{
"action": "add",
"when": "4e6a693057cfaf1ce1f07b019ed3bfce2bf936f6",
"short": "[priority] **The minimum *required* Python version has been raised to 3.10**\nPython 3.9 has reached its end-of-life as of October 2025, and yt-dlp has now removed support for it. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13858)"
},
{
"action": "add",
"when": "2c9091e355a7ba5d1edb69796ecdca48199b77fb",
"short": "[priority] **A stopgap release with a *TEMPORARY partial* fix for YouTube support**\nSome formats may still be unavailable, especially if cookies are passed to yt-dlp. The ***NEXT*** release, expected very soon, **will require an external JS runtime (e.g. Deno)** in order for YouTube downloads to work properly. [Read more](https://github.com/yt-dlp/yt-dlp/issues/14404)"
} }
] ]

View File

@@ -373,7 +373,7 @@ class CommitRange:
issues = [issue.strip()[1:] for issue in issues.split(',')] if issues else [] issues = [issue.strip()[1:] for issue in issues.split(',')] if issues else []
if prefix: if prefix:
groups, details, sub_details = zip(*map(self.details_from_prefix, prefix.split(','))) groups, details, sub_details = zip(*map(self.details_from_prefix, prefix.split(',')), strict=True)
group = next(iter(filter(None, groups)), None) group = next(iter(filter(None, groups)), None)
details = ', '.join(unique(details)) details = ', '.join(unique(details))
sub_details = list(itertools.chain.from_iterable(sub_details)) sub_details = list(itertools.chain.from_iterable(sub_details))

View File

@@ -4,8 +4,11 @@ build-backend = "hatchling.build"
[project] [project]
name = "yt-dlp" name = "yt-dlp"
maintainers = [ authors = [
{name = "pukkandan", email = "pukkandan.ytdlp@gmail.com"}, {name = "pukkandan", email = "pukkandan.ytdlp@gmail.com"},
]
maintainers = [
{email = "maintainers@yt-dlp.org"},
{name = "Grub4K", email = "contact@grub4k.xyz"}, {name = "Grub4K", email = "contact@grub4k.xyz"},
{name = "bashonly", email = "bashonly@protonmail.com"}, {name = "bashonly", email = "bashonly@protonmail.com"},
{name = "coletdjnz", email = "coletdjnz@protonmail.com"}, {name = "coletdjnz", email = "coletdjnz@protonmail.com"},
@@ -13,7 +16,7 @@ maintainers = [
] ]
description = "A feature-rich command-line audio/video downloader" description = "A feature-rich command-line audio/video downloader"
readme = "README.md" readme = "README.md"
requires-python = ">=3.9" requires-python = ">=3.10"
keywords = [ keywords = [
"cli", "cli",
"downloader", "downloader",
@@ -30,7 +33,6 @@ classifiers = [
"Environment :: Console", "Environment :: Console",
"Programming Language :: Python", "Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.12",
@@ -76,7 +78,7 @@ dev = [
] ]
static-analysis = [ static-analysis = [
"autopep8~=2.0", "autopep8~=2.0",
"ruff~=0.13.0", "ruff~=0.14.0",
] ]
test = [ test = [
"pytest~=8.1", "pytest~=8.1",
@@ -90,7 +92,7 @@ pyinstaller = [
Documentation = "https://github.com/yt-dlp/yt-dlp#readme" Documentation = "https://github.com/yt-dlp/yt-dlp#readme"
Repository = "https://github.com/yt-dlp/yt-dlp" Repository = "https://github.com/yt-dlp/yt-dlp"
Tracker = "https://github.com/yt-dlp/yt-dlp/issues" Tracker = "https://github.com/yt-dlp/yt-dlp/issues"
Funding = "https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators" Funding = "https://github.com/yt-dlp/yt-dlp/blob/master/Maintainers.md#maintainers"
[project.scripts] [project.scripts]
yt-dlp = "yt_dlp:main" yt-dlp = "yt_dlp:main"
@@ -168,7 +170,6 @@ run-cov = "echo Code coverage not implemented && exit 1"
[[tool.hatch.envs.hatch-test.matrix]] [[tool.hatch.envs.hatch-test.matrix]]
python = [ python = [
"3.9",
"3.10", "3.10",
"3.11", "3.11",
"3.12", "3.12",

View File

@@ -85,7 +85,7 @@ The only reliable way to check if a site is supported is to try it.
- **aol.com**: Yahoo screen and movies (**Currently broken**) - **aol.com**: Yahoo screen and movies (**Currently broken**)
- **APA** - **APA**
- **Aparat** - **Aparat**
- **AppleConnect** - **apple:music:connect**: Apple Music Connect
- **AppleDaily**: 臺灣蘋果日報 - **AppleDaily**: 臺灣蘋果日報
- **ApplePodcasts** - **ApplePodcasts**
- **appletrailers** - **appletrailers**
@@ -242,6 +242,7 @@ The only reliable way to check if a site is supported is to try it.
- **Canalsurmas** - **Canalsurmas**
- **CaracolTvPlay**: [*caracoltv-play*](## "netrc machine") - **CaracolTvPlay**: [*caracoltv-play*](## "netrc machine")
- **cbc.ca** - **cbc.ca**
- **cbc.ca:listen**
- **cbc.ca:player** - **cbc.ca:player**
- **cbc.ca:player:playlist** - **cbc.ca:player:playlist**
- **CBS**: (**Currently broken**) - **CBS**: (**Currently broken**)
@@ -579,6 +580,11 @@ The only reliable way to check if a site is supported is to try it.
- **Hypem** - **Hypem**
- **Hytale** - **Hytale**
- **Icareus** - **Icareus**
- **IdagioAlbum**
- **IdagioPersonalPlaylist**
- **IdagioPlaylist**
- **IdagioRecording**
- **IdagioTrack**
- **IdolPlus** - **IdolPlus**
- **iflix:episode** - **iflix:episode**
- **IflixSeries** - **IflixSeries**
@@ -1535,7 +1541,7 @@ The only reliable way to check if a site is supported is to try it.
- **tvigle**: Интернет-телевидение Tvigle.ru - **tvigle**: Интернет-телевидение Tvigle.ru
- **TVIPlayer** - **TVIPlayer**
- **TVN24**: (**Currently broken**) - **TVN24**: (**Currently broken**)
- **TVNoe**: (**Currently broken**) - **tvnoe**: Televize Noe
- **tvopengr:embed**: tvopen.gr embedded videos - **tvopengr:embed**: tvopen.gr embedded videos
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos - **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
- **tvp**: Telewizja Polska - **tvp**: Telewizja Polska

View File

@@ -176,7 +176,7 @@ def _iter_differences(got, expected, field):
yield field, f'expected length of {len(expected)}, got {len(got)}' yield field, f'expected length of {len(expected)}, got {len(got)}'
return return
for index, (got_val, expected_val) in enumerate(zip(got, expected)): for index, (got_val, expected_val) in enumerate(zip(got, expected, strict=True)):
field_name = str(index) if field is None else f'{field}.{index}' field_name = str(index) if field is None else f'{field}.{index}'
yield from _iter_differences(got_val, expected_val, field_name) yield from _iter_differences(got_val, expected_val, field_name)
return return

View File

@@ -13,6 +13,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import contextlib import contextlib
import copy import copy
import itertools
import json import json
from test.helper import FakeYDL, assertRegexpMatches, try_rm from test.helper import FakeYDL, assertRegexpMatches, try_rm
@@ -414,7 +415,7 @@ class TestFormatSelection(unittest.TestCase):
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
self.assertEqual(downloaded_ids, ['248+141']) self.assertEqual(downloaded_ids, ['248+141'])
for f1, f2 in zip(formats_order, formats_order[1:]): for f1, f2 in itertools.pairwise(formats_order):
info_dict = _make_result([f1, f2], extractor='youtube') info_dict = _make_result([f1, f2], extractor='youtube')
ydl = YDL({'format': 'best/bestvideo'}) ydl = YDL({'format': 'best/bestvideo'})
ydl.sort_formats(info_dict) ydl.sort_formats(info_dict)
@@ -749,7 +750,7 @@ class TestYoutubeDL(unittest.TestCase):
if not isinstance(expected, (list, tuple)): if not isinstance(expected, (list, tuple)):
expected = (expected, expected) expected = (expected, expected)
for (name, got), expect in zip((('outtmpl', out), ('filename', fname)), expected): for (name, got), expect in zip((('outtmpl', out), ('filename', fname)), expected, strict=True):
if callable(expect): if callable(expect):
self.assertTrue(expect(got), f'Wrong {name} from {tmpl}') self.assertTrue(expect(got), f'Wrong {name} from {tmpl}')
elif expect is not None: elif expect is not None:
@@ -1147,7 +1148,7 @@ class TestYoutubeDL(unittest.TestCase):
entries = func(evaluated) entries = func(evaluated)
results = [(v['playlist_autonumber'] - 1, (int(v['id']), v['playlist_index'])) results = [(v['playlist_autonumber'] - 1, (int(v['id']), v['playlist_index']))
for v in get_downloaded_info_dicts(params, entries)] for v in get_downloaded_info_dicts(params, entries)]
self.assertEqual(results, list(enumerate(zip(expected_ids, expected_ids))), f'Entries of {name} for {params}') self.assertEqual(results, list(enumerate(zip(expected_ids, expected_ids, strict=True))), f'Entries of {name} for {params}')
self.assertEqual(sorted(evaluated), expected_eval, f'Evaluation of {name} for {params}') self.assertEqual(sorted(evaluated), expected_eval, f'Evaluation of {name} for {params}')
test_selection({}, INDICES) test_selection({}, INDICES)

View File

@@ -115,7 +115,7 @@ class TestModifyChaptersPP(unittest.TestCase):
self.assertEqual(len(ends), len(titles)) self.assertEqual(len(ends), len(titles))
start = 0 start = 0
chapters = [] chapters = []
for e, t in zip(ends, titles): for e, t in zip(ends, titles, strict=True):
chapters.append(self._chapter(start, e, t)) chapters.append(self._chapter(start, e, t))
start = e start = e
return chapters return chapters

View File

@@ -45,3 +45,8 @@ class TestGetWebPoContentBinding:
def test_invalid_base64(self, pot_request): def test_invalid_base64(self, pot_request):
pot_request.visitor_data = 'invalid-base64' pot_request.visitor_data = 'invalid-base64'
assert get_webpo_content_binding(pot_request, bind_to_visitor_id=True) == (pot_request.visitor_data, ContentBindingType.VISITOR_DATA) assert get_webpo_content_binding(pot_request, bind_to_visitor_id=True) == (pot_request.visitor_data, ContentBindingType.VISITOR_DATA)
def test_gvs_video_id_binding_experiment(self, pot_request):
pot_request.context = PoTokenContext.GVS
pot_request._gvs_bind_to_video_id = True
assert get_webpo_content_binding(pot_request) == ('example-video-id', ContentBindingType.VIDEO_ID)

View File

@@ -417,7 +417,7 @@ class TestTraversal:
def test_traversal_morsel(self): def test_traversal_morsel(self):
morsel = http.cookies.Morsel() morsel = http.cookies.Morsel()
values = dict(zip(morsel, 'abcdefghijklmnop')) values = dict(zip(morsel, 'abcdefghijklmnop', strict=False))
morsel.set('item_key', 'item_value', 'coded_value') morsel.set('item_key', 'item_value', 'coded_value')
morsel.update(values) morsel.update(values)
values['key'] = 'item_key' values['key'] = 'item_key'

View File

@@ -1863,7 +1863,7 @@ Line 1
self.assertEqual( self.assertEqual(
list(get_elements_text_and_html_by_attribute('class', 'foo bar', html)), list(get_elements_text_and_html_by_attribute('class', 'foo bar', html)),
list(zip(['nice', 'also nice'], self.GET_ELEMENTS_BY_CLASS_RES))) list(zip(['nice', 'also nice'], self.GET_ELEMENTS_BY_CLASS_RES, strict=True)))
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'foo', html)), []) self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'foo', html)), [])
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'no-such-foo', html)), []) self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'no-such-foo', html)), [])

View File

@@ -2007,7 +2007,7 @@ class YoutubeDL:
else: else:
entries = resolved_entries = list(entries) entries = resolved_entries = list(entries)
n_entries = len(resolved_entries) n_entries = len(resolved_entries)
ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], []) ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries, strict=True)) or ([], [])
if not ie_result.get('playlist_count'): if not ie_result.get('playlist_count'):
# Better to do this after potentially exhausting entries # Better to do this after potentially exhausting entries
ie_result['playlist_count'] = all_entries.get_full_count() ie_result['playlist_count'] = all_entries.get_full_count()
@@ -2785,7 +2785,7 @@ class YoutubeDL:
dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')} dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
for idx, (prev, current, next_) in enumerate(zip( for idx, (prev, current, next_) in enumerate(zip(
(dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1): (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter), strict=False), 1):
if current.get('start_time') is None: if current.get('start_time') is None:
current['start_time'] = prev.get('end_time') current['start_time'] = prev.get('end_time')
if not current.get('end_time'): if not current.get('end_time'):
@@ -3370,7 +3370,7 @@ class YoutubeDL:
def existing_video_file(*filepaths): def existing_video_file(*filepaths):
ext = info_dict.get('ext') ext = info_dict.get('ext')
converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext) converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)), file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths, strict=True)),
default_overwrite=False) default_overwrite=False)
if file: if file:
info_dict['ext'] = os.path.splitext(file)[1][1:] info_dict['ext'] = os.path.splitext(file)[1][1:]
@@ -3956,7 +3956,7 @@ class YoutubeDL:
def render_subtitles_table(self, video_id, subtitles): def render_subtitles_table(self, video_id, subtitles):
def _row(lang, formats): def _row(lang, formats):
exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats))) exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)), strict=True)
if len(set(names)) == 1: if len(set(names)) == 1:
names = [] if names[0] == 'unknown' else names[:1] names = [] if names[0] == 'unknown' else names[:1]
return [lang, ', '.join(names), ', '.join(exts)] return [lang, ', '.join(names), ', '.join(exts)]
@@ -4112,8 +4112,7 @@ class YoutubeDL:
self.params.get('cookiefile'), self.params.get('cookiesfrombrowser'), self) self.params.get('cookiefile'), self.params.get('cookiesfrombrowser'), self)
except CookieLoadError as error: except CookieLoadError as error:
cause = error.__context__ cause = error.__context__
# compat: <=py3.9: `traceback.format_exception` has a different signature self.report_error(str(cause), tb=''.join(traceback.format_exception(cause)))
self.report_error(str(cause), tb=''.join(traceback.format_exception(None, cause, cause.__traceback__)))
raise raise
@property @property

View File

@@ -1,8 +1,8 @@
import sys import sys
if sys.version_info < (3, 9): if sys.version_info < (3, 10):
raise ImportError( raise ImportError(
f'You are using an unsupported version of Python. Only Python versions 3.9 and above are supported by yt-dlp') # noqa: F541 f'You are using an unsupported version of Python. Only Python versions 3.10 and above are supported by yt-dlp') # noqa: F541
__license__ = 'The Unlicense' __license__ = 'The Unlicense'
@@ -155,7 +155,7 @@ def set_compat_opts(opts):
if 'format-sort' in opts.compat_opts: if 'format-sort' in opts.compat_opts:
opts.format_sort.extend(FormatSorter.ytdl_default) opts.format_sort.extend(FormatSorter.ytdl_default)
elif 'prefer-vp9-sort' in opts.compat_opts: elif 'prefer-vp9-sort' in opts.compat_opts:
opts.format_sort.extend(FormatSorter._prefer_vp9_sort) FormatSorter.default = FormatSorter._prefer_vp9_sort
if 'mtime-by-default' in opts.compat_opts: if 'mtime-by-default' in opts.compat_opts:
if opts.updatetime is None: if opts.updatetime is None:
@@ -974,13 +974,8 @@ def _real_main(argv=None):
try: try:
updater = Updater(ydl, opts.update_self) updater = Updater(ydl, opts.update_self)
if opts.update_self and updater.update() and actual_use: if opts.update_self and updater.update() and actual_use and updater.cmd:
if updater.cmd: return updater.restart()
return updater.restart()
# This code is reachable only for zip variant in py < 3.10
# It makes sense to exit here, but the old behavior is to continue
ydl.report_warning('Restart yt-dlp to use the updated version')
# return 100, 'ERROR: The program must exit for the update to complete'
except Exception: except Exception:
traceback.print_exc() traceback.print_exc()
ydl._download_retcode = 100 ydl._download_retcode = 100

View File

@@ -447,7 +447,7 @@ def key_schedule_core(data, rcon_iteration):
def xor(data1, data2): def xor(data1, data2):
return [x ^ y for x, y in zip(data1, data2)] return [x ^ y for x, y in zip(data1, data2, strict=False)]
def iter_mix_columns(data, matrix): def iter_mix_columns(data, matrix):

View File

@@ -1,13 +0,0 @@
# flake8: noqa: F405
from types import * # noqa: F403
from .compat_utils import passthrough_module
passthrough_module(__name__, 'types')
del passthrough_module
try:
# NB: pypy has builtin NoneType, so checking NameError won't work
from types import NoneType # >= 3.10
except ImportError:
NoneType = type(None)

View File

@@ -22,15 +22,11 @@ if os.name == 'nt':
def getproxies_registry_patched(): def getproxies_registry_patched():
proxies = getproxies_registry() proxies = getproxies_registry()
if (
sys.version_info >= (3, 10, 5) # https://docs.python.org/3.10/whatsnew/changelog.html#python-3-10-5-final
or (3, 9, 13) <= sys.version_info < (3, 10) # https://docs.python.org/3.9/whatsnew/changelog.html#python-3-9-13-final
):
return proxies
for scheme in ('https', 'ftp'): if sys.version_info < (3, 10, 5): # https://docs.python.org/3.10/whatsnew/changelog.html#python-3-10-5-final
if scheme in proxies and proxies[scheme].startswith(f'{scheme}://'): for scheme in ('https', 'ftp'):
proxies[scheme] = 'http' + proxies[scheme][len(scheme):] if scheme in proxies and proxies[scheme].startswith(f'{scheme}://'):
proxies[scheme] = 'http' + proxies[scheme][len(scheme):]
return proxies return proxies

View File

@@ -337,6 +337,7 @@ from .cbc import (
CBCGemIE, CBCGemIE,
CBCGemLiveIE, CBCGemLiveIE,
CBCGemPlaylistIE, CBCGemPlaylistIE,
CBCListenIE,
CBCPlayerIE, CBCPlayerIE,
CBCPlayerPlaylistIE, CBCPlayerPlaylistIE,
) )
@@ -823,6 +824,13 @@ from .ichinanalive import (
IchinanaLiveIE, IchinanaLiveIE,
IchinanaLiveVODIE, IchinanaLiveVODIE,
) )
from .idagio import (
IdagioAlbumIE,
IdagioPersonalPlaylistIE,
IdagioPlaylistIE,
IdagioRecordingIE,
IdagioTrackIE,
)
from .idolplus import IdolPlusIE from .idolplus import IdolPlusIE
from .ign import ( from .ign import (
IGNIE, IGNIE,

View File

@@ -21,7 +21,7 @@ from ..utils import (
class ABCIE(InfoExtractor): class ABCIE(InfoExtractor):
IE_NAME = 'abc.net.au' IE_NAME = 'abc.net.au'
_VALID_URL = r'https?://(?:www\.)?abc\.net\.au/(?:news|btn)/(?:[^/]+/){1,4}(?P<id>\d{5,})' _VALID_URL = r'https?://(?:www\.)?abc\.net\.au/(?:news|btn|listen)/(?:[^/?#]+/){1,4}(?P<id>\d{5,})'
_TESTS = [{ _TESTS = [{
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334', 'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
@@ -53,8 +53,9 @@ class ABCIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '6880080', 'id': '6880080',
'ext': 'mp3', 'ext': 'mp3',
'title': 'NAB lifts interest rates, following Westpac and CBA', 'title': 'NAB lifts interest rates, following Westpac and CBA - ABC listen',
'description': 'md5:f13d8edc81e462fce4a0437c7dc04728', 'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',
'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/2193d7437c84b25eafd6360c82b5fa21',
}, },
}, { }, {
'url': 'http://www.abc.net.au/news/2015-10-19/6866214', 'url': 'http://www.abc.net.au/news/2015-10-19/6866214',
@@ -64,8 +65,9 @@ class ABCIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '10527914', 'id': '10527914',
'ext': 'mp4', 'ext': 'mp4',
'title': 'WWI Centenary', 'title': 'WWI Centenary - Behind The News',
'description': 'md5:c2379ec0ca84072e86b446e536954546', 'description': 'md5:fa4405939ff750fade46ff0cd4c66a52',
'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/bcc3433c97bf992dff32ec5a768713c9',
}, },
}, { }, {
'url': 'https://www.abc.net.au/news/programs/the-world/2020-06-10/black-lives-matter-protests-spawn-support-for/12342074', 'url': 'https://www.abc.net.au/news/programs/the-world/2020-06-10/black-lives-matter-protests-spawn-support-for/12342074',
@@ -73,7 +75,8 @@ class ABCIE(InfoExtractor):
'id': '12342074', 'id': '12342074',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Black Lives Matter protests spawn support for Papuans in Indonesia', 'title': 'Black Lives Matter protests spawn support for Papuans in Indonesia',
'description': 'md5:2961a17dc53abc558589ccd0fb8edd6f', 'description': 'md5:625257209f2d14ce23cb4e3785da9beb',
'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/7ee6f190de6d7dbb04203e514bfae9ec',
}, },
}, { }, {
'url': 'https://www.abc.net.au/btn/newsbreak/btn-newsbreak-20200814/12560476', 'url': 'https://www.abc.net.au/btn/newsbreak/btn-newsbreak-20200814/12560476',
@@ -93,7 +96,16 @@ class ABCIE(InfoExtractor):
'title': 'Wagner Group retreating from Russia, leader Prigozhin to move to Belarus', 'title': 'Wagner Group retreating from Russia, leader Prigozhin to move to Belarus',
'ext': 'mp4', 'ext': 'mp4',
'description': 'Wagner troops leave Rostov-on-Don and\xa0Yevgeny Prigozhin will move to Belarus under a deal brokered by Belarusian President Alexander Lukashenko to end the mutiny.', 'description': 'Wagner troops leave Rostov-on-Don and\xa0Yevgeny Prigozhin will move to Belarus under a deal brokered by Belarusian President Alexander Lukashenko to end the mutiny.',
'thumbnail': 'https://live-production.wcms.abc-cdn.net.au/0c170f5b57f0105c432f366c0e8e267b?impolicy=wcms_crop_resize&cropH=2813&cropW=5000&xPos=0&yPos=249&width=862&height=485', 'thumbnail': r're:https://live-production\.wcm\.abc-cdn\.net\.au/0c170f5b57f0105c432f366c0e8e267b',
},
}, {
'url': 'https://www.abc.net.au/listen/programs/the-followers-madness-of-two/presents-followers-madness-of-two/105697646',
'info_dict': {
'id': '105697646',
'title': 'INTRODUCING — The Followers: Madness of Two - ABC listen',
'ext': 'mp3',
'description': 'md5:2310cd0d440a4e01656abea15db8d1f3',
'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/90d7078214e5d66553ffb7fcf0da0cda',
}, },
}] }]

View File

@@ -1,47 +1,125 @@
import time
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ExtractorError, str_to_int from ..utils import (
ExtractorError,
extract_attributes,
float_or_none,
jwt_decode_hs256,
jwt_encode,
parse_resolution,
qualities,
unified_strdate,
update_url,
url_or_none,
urljoin,
)
from ..utils.traversal import (
find_element,
require,
traverse_obj,
)
class AppleConnectIE(InfoExtractor): class AppleConnectIE(InfoExtractor):
_VALID_URL = r'https?://itunes\.apple\.com/\w{0,2}/?post/(?:id)?sa\.(?P<id>[\w-]+)' IE_NAME = 'apple:music:connect'
IE_DESC = 'Apple Music Connect'
_BASE_URL = 'https://music.apple.com'
_QUALITIES = {
'provisionalUploadVideo': None,
'sdVideo': 480,
'sdVideoWithPlusAudio': 480,
'sd480pVideo': 480,
'720pHdVideo': 720,
'1080pHdVideo': 1080,
}
_VALID_URL = r'https?://music\.apple\.com/[\w-]+/post/(?P<id>\d+)'
_TESTS = [{ _TESTS = [{
'url': 'https://itunes.apple.com/us/post/idsa.4ab17a39-2720-11e5-96c5-a5b38f6c42d3', 'url': 'https://music.apple.com/us/post/1018290019',
'md5': 'c1d41f72c8bcaf222e089434619316e4',
'info_dict': { 'info_dict': {
'id': '4ab17a39-2720-11e5-96c5-a5b38f6c42d3', 'id': '1018290019',
'ext': 'm4v', 'ext': 'm4v',
'title': 'Energy', 'title': 'Energy',
'uploader': 'Drake', 'duration': 177.911,
'thumbnail': r're:^https?://.*\.jpg$', 'thumbnail': r're:https?://.+\.png',
'upload_date': '20150710', 'upload_date': '20150710',
'timestamp': 1436545535, 'uploader': 'Drake',
}, },
}, { }, {
'url': 'https://itunes.apple.com/us/post/sa.0fe0229f-2457-11e5-9f40-1bb645f2d5d9', 'url': 'https://music.apple.com/us/post/1016746627',
'only_matching': True, 'info_dict': {
'id': '1016746627',
'ext': 'm4v',
'title': 'Body Shop (Madonna) - Chellous Lima (Acoustic Cover)',
'duration': 210.278,
'thumbnail': r're:https?://.+\.png',
'upload_date': '20150706',
'uploader': 'Chellous Lima',
},
}] }]
_jwt = None
@staticmethod
def _jwt_is_expired(token):
return jwt_decode_hs256(token)['exp'] - time.time() < 120
def _get_token(self, webpage, video_id):
if self._jwt and not self._jwt_is_expired(self._jwt):
return self._jwt
js_url = traverse_obj(webpage, (
{find_element(tag='script', attr='crossorigin', value='', html=True)},
{extract_attributes}, 'src', {urljoin(self._BASE_URL)}, {require('JS URL')}))
js = self._download_webpage(
js_url, video_id, 'Downloading token JS', 'Unable to download token JS')
header = jwt_encode({}, '', headers={'alg': 'ES256', 'kid': 'WebPlayKid'}).split('.')[0]
self._jwt = self._search_regex(
fr'(["\'])(?P<jwt>{header}(?:\.[\w-]+){{2}})\1', js, 'JSON Web Token', group='jwt')
if self._jwt_is_expired(self._jwt):
raise ExtractorError('The fetched token is already expired')
return self._jwt
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
try: videos = self._download_json(
video_json = self._html_search_regex( 'https://amp-api.music.apple.com/v1/catalog/us/uploaded-videos',
r'class="auc-video-data">(\{.*?\})', webpage, 'json') video_id, headers={
except ExtractorError: 'Authorization': f'Bearer {self._get_token(webpage, video_id)}',
raise ExtractorError('This post doesn\'t contain a video', expected=True) 'Origin': self._BASE_URL,
}, query={'ids': video_id, 'l': 'en-US'})
attributes = traverse_obj(videos, (
'data', ..., 'attributes', any, {require('video information')}))
video_data = self._parse_json(video_json, video_id) formats = []
timestamp = str_to_int(self._html_search_regex(r'data-timestamp="(\d+)"', webpage, 'timestamp')) quality = qualities(list(self._QUALITIES.keys()))
like_count = str_to_int(self._html_search_regex(r'(\d+) Loves', webpage, 'like count', default=None)) for format_id, src_url in traverse_obj(attributes, (
'assetTokens', {dict.items}, lambda _, v: url_or_none(v[1]),
)):
formats.append({
'ext': 'm4v',
'format_id': format_id,
'height': self._QUALITIES.get(format_id),
'quality': quality(format_id),
'url': src_url,
**parse_resolution(update_url(src_url, query=None), lenient=True),
})
return { return {
'id': video_id, 'id': video_id,
'url': video_data['sslSrc'], 'formats': formats,
'title': video_data['title'], 'thumbnail': self._html_search_meta(
'description': video_data['description'], ['og:image', 'og:image:secure_url', 'twitter:image'], webpage),
'uploader': video_data['artistName'], **traverse_obj(attributes, {
'thumbnail': video_data['artworkUrl'], 'title': ('name', {str}),
'timestamp': timestamp, 'duration': ('durationInMilliseconds', {float_or_none(scale=1000)}),
'like_count': like_count, 'upload_date': ('uploadDate', {unified_strdate}),
'uploader': (('artistName', 'uploadingArtistName'), {str}, any),
'webpage_url': ('postUrl', {url_or_none}),
}),
} }

View File

@@ -740,7 +740,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
note or 'Downloading CDX API JSON', query=query, fatal=fatal) note or 'Downloading CDX API JSON', query=query, fatal=fatal)
if isinstance(res, list) and len(res) >= 2: if isinstance(res, list) and len(res) >= 2:
# format response to make it easier to use # format response to make it easier to use
return [dict(zip(res[0], v)) for v in res[1:]] return [dict(zip(res[0], v)) for v in res[1:]] # noqa: B905
elif not isinstance(res, list) or len(res) != 0: elif not isinstance(res, list) or len(res) != 0:
self.report_warning('Error while parsing CDX API response' + bug_reports_message()) self.report_warning('Error while parsing CDX API response' + bug_reports_message())

View File

@@ -31,7 +31,7 @@ from ..utils.traversal import require, traverse_obj, trim_str
class CBCIE(InfoExtractor): class CBCIE(InfoExtractor):
IE_NAME = 'cbc.ca' IE_NAME = 'cbc.ca'
_VALID_URL = r'https?://(?:www\.)?cbc\.ca/(?!player/)(?:[^/]+/)+(?P<id>[^/?#]+)' _VALID_URL = r'https?://(?:www\.)?cbc\.ca/(?!player/|listen/|i/caffeine/syndicate/)(?:[^/?#]+/)+(?P<id>[^/?#]+)'
_TESTS = [{ _TESTS = [{
# with mediaId # with mediaId
'url': 'http://www.cbc.ca/22minutes/videos/clips-season-23/don-cherry-play-offs', 'url': 'http://www.cbc.ca/22minutes/videos/clips-season-23/don-cherry-play-offs',
@@ -112,10 +112,6 @@ class CBCIE(InfoExtractor):
'playlist_mincount': 6, 'playlist_mincount': 6,
}] }]
@classmethod
def suitable(cls, url):
return False if CBCPlayerIE.suitable(url) else super().suitable(url)
def _extract_player_init(self, player_init, display_id): def _extract_player_init(self, player_init, display_id):
player_info = self._parse_json(player_init, display_id, js_to_json) player_info = self._parse_json(player_init, display_id, js_to_json)
media_id = player_info.get('mediaId') media_id = player_info.get('mediaId')
@@ -913,3 +909,63 @@ class CBCGemLiveIE(InfoExtractor):
'thumbnail': ('images', 'card', 'url'), 'thumbnail': ('images', 'card', 'url'),
}), }),
} }
class CBCListenIE(InfoExtractor):
IE_NAME = 'cbc.ca:listen'
_VALID_URL = r'https?://(?:www\.)?cbc\.ca/listen/(?:cbc-podcasts|live-radio)/[\w-]+/[\w-]+/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.cbc.ca/listen/cbc-podcasts/1353-the-naked-emperor/episode/16142603-introducing-understood-who-broke-the-internet',
'info_dict': {
'id': '16142603',
'title': 'Introducing Understood: Who Broke the Internet?',
'ext': 'mp3',
'description': 'md5:c605117500084e43f08a950adc6a708c',
'duration': 229,
'timestamp': 1745812800,
'release_timestamp': 1745827200,
'release_date': '20250428',
'upload_date': '20250428',
},
}, {
'url': 'https://www.cbc.ca/listen/live-radio/1-64-the-house/clip/16170773-should-canada-suck-stand-donald-trump',
'info_dict': {
'id': '16170773',
'title': 'Should Canada suck up or stand up to Donald Trump?',
'ext': 'mp3',
'description': 'md5:7385194f1cdda8df27ba3764b35e7976',
'duration': 3159,
'timestamp': 1758340800,
'release_timestamp': 1758254400,
'release_date': '20250919',
'upload_date': '20250920',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
response = self._download_json(
f'https://www.cbc.ca/listen/api/v1/clips/{video_id}', video_id, fatal=False)
data = traverse_obj(response, ('data', {dict}))
if not data:
self.report_warning('API failed to return data. Falling back to webpage parsing')
webpage = self._download_webpage(url, video_id)
preloaded_state = self._search_json(
r'window\.__PRELOADED_STATE__\s*=', webpage, 'preloaded state',
video_id, transform_source=js_to_json)
data = traverse_obj(preloaded_state, (
('podcastDetailData', 'showDetailData'), ..., 'episodes',
lambda _, v: str(v['clipID']) == video_id, any, {require('episode data')}))
return {
'id': video_id,
**traverse_obj(data, {
'url': (('src', 'url'), {url_or_none}, any),
'title': ('title', {str}),
'description': ('description', {str}),
'release_timestamp': ('releasedAt', {int_or_none(scale=1000)}),
'timestamp': ('airdate', {int_or_none(scale=1000)}),
'duration': ('duration', {int_or_none}),
}),
}

View File

@@ -5,18 +5,6 @@ from ..utils import ExtractorError, make_archive_id, url_basename
class CellebriteIE(VidyardBaseIE): class CellebriteIE(VidyardBaseIE):
_VALID_URL = r'https?://cellebrite\.com/(?:\w+)?/(?P<id>[\w-]+)' _VALID_URL = r'https?://cellebrite\.com/(?:\w+)?/(?P<id>[\w-]+)'
_TESTS = [{ _TESTS = [{
'url': 'https://cellebrite.com/en/collect-data-from-android-devices-with-cellebrite-ufed/',
'info_dict': {
'id': 'ZqmUss3dQfEMGpauambPuH',
'display_id': '16025876',
'ext': 'mp4',
'title': 'Ask the Expert: Chat Capture - Collect Data from Android Devices in Cellebrite UFED',
'description': 'md5:dee48fe12bbae5c01fe6a053f7676da4',
'thumbnail': 'https://cellebrite.com/wp-content/uploads/2021/05/Chat-Capture-1024x559.png',
'duration': 455.979,
'_old_archive_ids': ['cellebrite 16025876'],
},
}, {
'url': 'https://cellebrite.com/en/how-to-lawfully-collect-the-maximum-amount-of-data-from-android-devices/', 'url': 'https://cellebrite.com/en/how-to-lawfully-collect-the-maximum-amount-of-data-from-android-devices/',
'info_dict': { 'info_dict': {
'id': 'QV1U8a2yzcxigw7VFnqKyg', 'id': 'QV1U8a2yzcxigw7VFnqKyg',

View File

@@ -1663,7 +1663,7 @@ class InfoExtractor:
'end_time': part.get('endOffset'), 'end_time': part.get('endOffset'),
} for part in variadic(e.get('hasPart') or []) if part.get('@type') == 'Clip'] } for part in variadic(e.get('hasPart') or []) if part.get('@type') == 'Clip']
for idx, (last_c, current_c, next_c) in enumerate(zip( for idx, (last_c, current_c, next_c) in enumerate(zip(
[{'end_time': 0}, *chapters], chapters, chapters[1:])): [{'end_time': 0}, *chapters], chapters, chapters[1:], strict=False)):
current_c['end_time'] = current_c['end_time'] or next_c['start_time'] current_c['end_time'] = current_c['end_time'] or next_c['start_time']
current_c['start_time'] = current_c['start_time'] or last_c['end_time'] current_c['start_time'] = current_c['start_time'] or last_c['end_time']
if None in current_c.values(): if None in current_c.values():
@@ -1848,7 +1848,7 @@ class InfoExtractor:
return {} return {}
args = dict(zip(arg_keys.split(','), map(json.dumps, self._parse_json( args = dict(zip(arg_keys.split(','), map(json.dumps, self._parse_json(
f'[{arg_vals}]', video_id, transform_source=js_to_json, fatal=fatal) or ()))) f'[{arg_vals}]', video_id, transform_source=js_to_json, fatal=fatal) or ()), strict=True))
ret = self._parse_json(js, video_id, transform_source=functools.partial(js_to_json, vars=args), fatal=fatal) ret = self._parse_json(js, video_id, transform_source=functools.partial(js_to_json, vars=args), fatal=fatal)
return traverse_obj(ret, traverse) or {} return traverse_obj(ret, traverse) or {}

View File

@@ -18,15 +18,15 @@ from ..utils import (
class DropoutIE(InfoExtractor): class DropoutIE(InfoExtractor):
_LOGIN_URL = 'https://www.dropout.tv/login' _LOGIN_URL = 'https://watch.dropout.tv/login'
_NETRC_MACHINE = 'dropout' _NETRC_MACHINE = 'dropout'
_VALID_URL = r'https?://(?:www\.)?dropout\.tv/(?:[^/]+/)*videos/(?P<id>[^/]+)/?$' _VALID_URL = r'https?://(?:watch\.)?dropout\.tv/(?:[^/?#]+/)*videos/(?P<id>[^/?#]+)/?(?:[?#]|$)'
_TESTS = [ _TESTS = [
{ {
'url': 'https://www.dropout.tv/game-changer/season:2/videos/yes-or-no', 'url': 'https://watch.dropout.tv/game-changer/season:2/videos/yes-or-no',
'note': 'Episode in a series', 'note': 'Episode in a series',
'md5': '5e000fdfd8d8fa46ff40456f1c2af04a', 'md5': '4b76963f904f8bc4ba22dcf0e66ada06',
'info_dict': { 'info_dict': {
'id': '738153', 'id': '738153',
'display_id': 'yes-or-no', 'display_id': 'yes-or-no',
@@ -45,35 +45,35 @@ class DropoutIE(InfoExtractor):
'uploader_url': 'https://vimeo.com/user80538407', 'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos', 'uploader': 'OTT Videos',
}, },
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'], 'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest', 'Failed to parse XML: not well-formed'],
}, },
{ {
'url': 'https://www.dropout.tv/dimension-20-fantasy-high/season:1/videos/episode-1', 'url': 'https://watch.dropout.tv/tablepop-presents-megadungeon-live/season:1/videos/enter-through-the-gift-shop',
'note': 'Episode in a series (missing release_date)', 'note': 'Episode in a series (missing release_date)',
'md5': '712caf7c191f1c47c8f1879520c2fa5c', 'md5': 'b08fb03050585ea25cd7ee092db9134c',
'info_dict': { 'info_dict': {
'id': '320562', 'id': '624270',
'display_id': 'episode-1', 'display_id': 'enter-through-the-gift-shop',
'ext': 'mp4', 'ext': 'mp4',
'title': 'The Beginning Begins', 'title': 'Enter Through the Gift Shop',
'description': 'The cast introduces their PCs, including a neurotic elf, a goblin PI, and a corn-worshipping cleric.', 'description': 'A new adventuring party explores a gift shop and runs into a friendly orc -- and some angry goblins.',
'thumbnail': 'https://vhx.imgix.net/chuncensoredstaging/assets/4421ed0d-f630-4c88-9004-5251b2b8adfa.jpg', 'thumbnail': 'https://vhx.imgix.net/chuncensoredstaging/assets/a1d876c3-3dee-4cd0-87c6-27a851b1d0ec.jpg',
'series': 'Dimension 20: Fantasy High', 'series': 'TablePop Presents: MEGADUNGEON LIVE!',
'season_number': 1, 'season_number': 1,
'season': 'Season 1', 'season': 'Season 1',
'episode_number': 1, 'episode_number': 1,
'episode': 'The Beginning Begins', 'episode': 'Enter Through the Gift Shop',
'duration': 6838, 'duration': 7101,
'uploader_id': 'user80538407', 'uploader_id': 'user80538407',
'uploader_url': 'https://vimeo.com/user80538407', 'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos', 'uploader': 'OTT Videos',
}, },
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'], 'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest', 'Failed to parse XML: not well-formed'],
}, },
{ {
'url': 'https://www.dropout.tv/videos/misfits-magic-holiday-special', 'url': 'https://watch.dropout.tv/videos/misfits-magic-holiday-special',
'note': 'Episode not in a series', 'note': 'Episode not in a series',
'md5': 'c30fa18999c5880d156339f13c953a26', 'md5': '1e6428f7756b02c93b573d39ddd789fe',
'info_dict': { 'info_dict': {
'id': '1915774', 'id': '1915774',
'display_id': 'misfits-magic-holiday-special', 'display_id': 'misfits-magic-holiday-special',
@@ -87,7 +87,7 @@ class DropoutIE(InfoExtractor):
'uploader_url': 'https://vimeo.com/user80538407', 'uploader_url': 'https://vimeo.com/user80538407',
'uploader': 'OTT Videos', 'uploader': 'OTT Videos',
}, },
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'], 'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest', 'Failed to parse XML: not well-formed'],
}, },
] ]
@@ -125,7 +125,7 @@ class DropoutIE(InfoExtractor):
display_id = self._match_id(url) display_id = self._match_id(url)
webpage = None webpage = None
if self._get_cookies('https://www.dropout.tv').get('_session'): if self._get_cookies('https://watch.dropout.tv').get('_session'):
webpage = self._download_webpage(url, display_id) webpage = self._download_webpage(url, display_id)
if not webpage or '<div id="watch-unauthorized"' in webpage: if not webpage or '<div id="watch-unauthorized"' in webpage:
login_err = self._login(display_id) login_err = self._login(display_id)
@@ -148,7 +148,7 @@ class DropoutIE(InfoExtractor):
return { return {
'_type': 'url_transparent', '_type': 'url_transparent',
'ie_key': VHXEmbedIE.ie_key(), 'ie_key': VHXEmbedIE.ie_key(),
'url': VHXEmbedIE._smuggle_referrer(embed_url, 'https://www.dropout.tv'), 'url': VHXEmbedIE._smuggle_referrer(embed_url, 'https://watch.dropout.tv'),
'id': self._search_regex(r'embed\.vhx\.tv/videos/(.+?)\?', embed_url, 'id'), 'id': self._search_regex(r'embed\.vhx\.tv/videos/(.+?)\?', embed_url, 'id'),
'display_id': display_id, 'display_id': display_id,
'title': title, 'title': title,
@@ -167,10 +167,10 @@ class DropoutIE(InfoExtractor):
class DropoutSeasonIE(InfoExtractor): class DropoutSeasonIE(InfoExtractor):
_PAGE_SIZE = 24 _PAGE_SIZE = 24
_VALID_URL = r'https?://(?:www\.)?dropout\.tv/(?P<id>[^\/$&?#]+)(?:/?$|/season:(?P<season>[0-9]+)/?$)' _VALID_URL = r'https?://(?:watch\.)?dropout\.tv/(?P<id>[^\/$&?#]+)(?:/?$|/season:(?P<season>[0-9]+)/?$)'
_TESTS = [ _TESTS = [
{ {
'url': 'https://www.dropout.tv/dimension-20-fantasy-high/season:1', 'url': 'https://watch.dropout.tv/dimension-20-fantasy-high/season:1',
'note': 'Multi-season series with the season in the url', 'note': 'Multi-season series with the season in the url',
'playlist_count': 24, 'playlist_count': 24,
'info_dict': { 'info_dict': {
@@ -179,7 +179,7 @@ class DropoutSeasonIE(InfoExtractor):
}, },
}, },
{ {
'url': 'https://www.dropout.tv/dimension-20-fantasy-high', 'url': 'https://watch.dropout.tv/dimension-20-fantasy-high',
'note': 'Multi-season series with the season not in the url', 'note': 'Multi-season series with the season not in the url',
'playlist_count': 24, 'playlist_count': 24,
'info_dict': { 'info_dict': {
@@ -188,7 +188,7 @@ class DropoutSeasonIE(InfoExtractor):
}, },
}, },
{ {
'url': 'https://www.dropout.tv/dimension-20-shriek-week', 'url': 'https://watch.dropout.tv/dimension-20-shriek-week',
'note': 'Single-season series', 'note': 'Single-season series',
'playlist_count': 4, 'playlist_count': 4,
'info_dict': { 'info_dict': {
@@ -197,7 +197,7 @@ class DropoutSeasonIE(InfoExtractor):
}, },
}, },
{ {
'url': 'https://www.dropout.tv/breaking-news-no-laugh-newsroom/season:3', 'url': 'https://watch.dropout.tv/breaking-news-no-laugh-newsroom/season:3',
'note': 'Multi-season series with season in the url that requires pagination', 'note': 'Multi-season series with season in the url that requires pagination',
'playlist_count': 25, 'playlist_count': 25,
'info_dict': { 'info_dict': {

View File

@@ -1,5 +1,4 @@
import json import json
import socket
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
@@ -56,7 +55,7 @@ class DTubeIE(InfoExtractor):
try: try:
self.to_screen(f'{video_id}: Checking {format_id} video format URL') self.to_screen(f'{video_id}: Checking {format_id} video format URL')
self._downloader._opener.open(video_url, timeout=5).close() self._downloader._opener.open(video_url, timeout=5).close()
except socket.timeout: except TimeoutError:
self.to_screen( self.to_screen(
f'{video_id}: {format_id} URL is invalid, skipping') f'{video_id}: {format_id} URL is invalid, skipping')
continue continue

View File

@@ -56,7 +56,7 @@ class FujiTVFODPlus7IE(InfoExtractor):
fmt, subs = self._extract_m3u8_formats_and_subtitles(src['url'], video_id, 'ts') fmt, subs = self._extract_m3u8_formats_and_subtitles(src['url'], video_id, 'ts')
for f in fmt: for f in fmt:
f.update(dict(zip(('height', 'width'), f.update(dict(zip(('height', 'width'),
self._BITRATE_MAP.get(f.get('tbr'), ())))) self._BITRATE_MAP.get(f.get('tbr'), ()), strict=False)))
formats.extend(fmt) formats.extend(fmt)
subtitles = self._merge_subtitles(subtitles, subs) subtitles = self._merge_subtitles(subtitles, subs)

262
yt_dlp/extractor/idagio.py Normal file
View File

@@ -0,0 +1,262 @@
from .common import InfoExtractor
from ..utils import int_or_none, unified_timestamp, url_or_none
from ..utils.traversal import traverse_obj
class IdagioTrackIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?app\.idagio\.com(?:/[a-z]{2})?/recordings/\d+\?(?:[^#]+&)?trackId=(?P<id>\d+)'
_TESTS = [{
'url': 'https://app.idagio.com/recordings/30576934?trackId=30576943',
'md5': '15148bd71804b2450a2508931a116b56',
'info_dict': {
'id': '30576943',
'ext': 'mp3',
'title': 'Theme. Andante',
'duration': 82,
'composers': ['Edward Elgar'],
'artists': ['Vasily Petrenko', 'Royal Liverpool Philharmonic Orchestra'],
'genres': ['Orchestral', 'Other Orchestral Music'],
'track': 'Theme. Andante',
'timestamp': 1554474370,
'upload_date': '20190405',
},
}, {
'url': 'https://app.idagio.com/recordings/20514467?trackId=20514478&utm_source=pcl',
'md5': '3acef2ea0feadf889123b70e5a1e7fa7',
'info_dict': {
'id': '20514478',
'ext': 'mp3',
'title': 'I. Adagio sostenuto',
'duration': 316,
'composers': ['Ludwig van Beethoven'],
'genres': ['Keyboard', 'Sonata (Keyboard)'],
'track': 'I. Adagio sostenuto',
'timestamp': 1518076337,
'upload_date': '20180208',
},
}, {
'url': 'https://app.idagio.com/de/recordings/20514467?trackId=20514478&utm_source=pcl',
'only_matching': True,
}]
def _real_extract(self, url):
track_id = self._match_id(url)
track_info = self._download_json(
f'https://api.idagio.com/v2.0/metadata/tracks/{track_id}',
track_id, fatal=False, expected_status=406)
if traverse_obj(track_info, 'error_code') == 'idagio.error.blocked.location':
self.raise_geo_restricted()
content_info = self._download_json(
f'https://api.idagio.com/v1.8/content/track/{track_id}', track_id,
query={
'quality': '0',
'format': '2',
'client_type': 'web-4',
})
return {
'ext': 'mp3',
'vcodec': 'none',
'id': track_id,
'url': traverse_obj(content_info, ('url', {url_or_none})),
**traverse_obj(track_info, ('result', {
'title': ('piece', 'title', {str}),
'timestamp': ('recording', 'created_at', {int_or_none(scale=1000)}),
'location': ('recording', 'location', {str}),
'duration': ('duration', {int_or_none}),
'track': ('piece', 'title', {str}),
'artists': ('recording', ('conductor', ('ensembles', ...), ('soloists', ...)), 'name', {str}, filter),
'composers': ('piece', 'workpart', 'work', 'composer', 'name', {str}, filter, all, filter),
'genres': ('piece', 'workpart', 'work', ('genre', 'subgenre'), 'title', {str}, filter),
})),
}
class IdagioPlaylistBaseIE(InfoExtractor):
"""Subclasses must set _API_URL_TMPL and define _parse_playlist_metadata"""
_PLAYLIST_ID_KEY = 'id' # vs. 'display_id'
def _entries(self, playlist_info):
for track_data in traverse_obj(playlist_info, ('tracks', lambda _, v: v['id'] and v['recording']['id'])):
track_id = track_data['id']
recording_id = track_data['recording']['id']
yield self.url_result(
f'https://app.idagio.com/recordings/{recording_id}?trackId={track_id}',
ie=IdagioTrackIE, video_id=track_id)
def _real_extract(self, url):
playlist_id = self._match_id(url)
playlist_info = self._download_json(
self._API_URL_TMPL.format(playlist_id), playlist_id)['result']
return {
'_type': 'playlist',
self._PLAYLIST_ID_KEY: playlist_id,
'entries': self._entries(playlist_info),
**self._parse_playlist_metadata(playlist_info),
}
class IdagioRecordingIE(IdagioPlaylistBaseIE):
_VALID_URL = r'https?://(?:www\.)?app\.idagio\.com(?:/[a-z]{2})?/recordings/(?P<id>\d+)(?![^#]*[&?]trackId=\d+)'
_TESTS = [{
'url': 'https://app.idagio.com/recordings/30576934',
'info_dict': {
'id': '30576934',
'title': 'Variations on an Original Theme op. 36',
'composers': ['Edward Elgar'],
'artists': ['Vasily Petrenko', 'Royal Liverpool Philharmonic Orchestra'],
'genres': ['Orchestral', 'Other Orchestral Music'],
'timestamp': 1554474370,
'modified_timestamp': 1554474370,
'modified_date': '20190405',
'upload_date': '20190405',
},
'playlist_count': 15,
}, {
'url': 'https://app.idagio.com/de/recordings/20514467',
'info_dict': {
'id': '20514467',
'title': 'Sonata for Piano No. 14 in C sharp minor op. 27/2',
'composers': ['Ludwig van Beethoven'],
'genres': ['Keyboard', 'Sonata (Keyboard)'],
'timestamp': 1518076337,
'upload_date': '20180208',
'modified_timestamp': 1518076337,
'modified_date': '20180208',
},
'playlist_count': 3,
}]
_API_URL_TMPL = 'https://api.idagio.com/v2.0/metadata/recordings/{}'
def _parse_playlist_metadata(self, playlist_info):
return traverse_obj(playlist_info, {
'title': ('work', 'title', {str}),
'timestamp': ('created_at', {int_or_none(scale=1000)}),
'modified_timestamp': ('created_at', {int_or_none(scale=1000)}),
'location': ('location', {str}),
'artists': (('conductor', ('ensembles', ...), ('soloists', ...)), 'name', {str}),
'composers': ('work', 'composer', 'name', {str}, all),
'genres': ('work', ('genre', 'subgenre'), 'title', {str}),
'tags': ('tags', ..., {str}),
})
class IdagioAlbumIE(IdagioPlaylistBaseIE):
_VALID_URL = r'https?://(?:www\.)?app\.idagio\.com(?:/[a-z]{2})?/albums/(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://app.idagio.com/albums/elgar-enigma-variations-in-the-south-serenade-for-strings',
'info_dict': {
'id': 'a9f139b8-f70d-4b8a-a9a4-5fe8d35eaf9c',
'display_id': 'elgar-enigma-variations-in-the-south-serenade-for-strings',
'title': 'Elgar: Enigma Variations, In the South, Serenade for Strings',
'description': '',
'thumbnail': r're:https://.+/albums/880040420521/main\.jpg',
'artists': ['Vasily Petrenko', 'Royal Liverpool Philharmonic Orchestra', 'Edward Elgar'],
'timestamp': 1553817600,
'upload_date': '20190329',
'modified_timestamp': 1562566559.0,
'modified_date': '20190708',
},
'playlist_count': 19,
}, {
'url': 'https://app.idagio.com/de/albums/brahms-ein-deutsches-requiem-3B403DF6-62D7-4A42-807B-47173F3E0192',
'info_dict': {
'id': '2862ad4e-4a61-45ad-9ce4-7fcf0c2626fe',
'display_id': 'brahms-ein-deutsches-requiem-3B403DF6-62D7-4A42-807B-47173F3E0192',
'title': 'Brahms: Ein deutsches Requiem',
'description': 'GRAMOPHONE CLASSICAL MUSIC AWARDS 2025 Recording of the Year & Choral',
'thumbnail': r're:https://.+/albums/3149020954522/main\.jpg',
'artists': ['Sabine Devieilhe', 'Stéphane Degout', 'Raphaël Pichon', 'Pygmalion', 'Johannes Brahms'],
'timestamp': 1760054400,
'upload_date': '20251010',
'modified_timestamp': 1760624868,
'modified_date': '20251016',
'tags': ['recommended', 'recent-release'],
},
'playlist_count': 7,
}]
_API_URL_TMPL = 'https://api.idagio.com/v2.0/metadata/albums/{}'
_PLAYLIST_ID_KEY = 'display_id'
def _parse_playlist_metadata(self, playlist_info):
return traverse_obj(playlist_info, {
'id': ('id', {str}),
'title': ('title', {str}),
'timestamp': ('publishDate', {unified_timestamp}),
'modified_timestamp': ('lastModified', {unified_timestamp}),
'thumbnail': ('imageUrl', {url_or_none}),
'description': ('description', {str}),
'artists': ('participants', ..., 'name', {str}),
'tags': ('tags', ..., {str}),
})
class IdagioPlaylistIE(IdagioPlaylistBaseIE):
_VALID_URL = r'https?://(?:www\.)?app\.idagio\.com(?:/[a-z]{2})?/playlists/(?!personal/)(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://app.idagio.com/playlists/beethoven-the-most-beautiful-piano-music',
'info_dict': {
'id': '31652bec-8c5b-460e-a3f0-cf1f69817f53',
'display_id': 'beethoven-the-most-beautiful-piano-music',
'title': 'Beethoven: the most beautiful piano music',
'description': 'md5:d41bb04b8896bb69377f5c2cd9345ad1',
'thumbnail': r're:https://.+/playlists/31652bec-8c5b-460e-a3f0-cf1f69817f53/main\.jpg',
'creators': ['IDAGIO'],
},
'playlist_mincount': 16, # one entry is geo-restricted
}, {
'url': 'https://app.idagio.com/de/playlists/piano-music-for-an-autumn-day',
'info_dict': {
'id': 'd70e9c7f-7080-4308-ae0f-f890dddeda82',
'display_id': 'piano-music-for-an-autumn-day',
'title': 'Piano Music for an Autumn Day',
'description': 'Get ready to snuggle up and enjoy all the musical colours of this cosy, autumnal playlist.',
'thumbnail': r're:https://.+/playlists/d70e9c7f-7080-4308-ae0f-f890dddeda82/main\.jpg',
'creators': ['IDAGIO'],
},
'playlist_count': 35,
}]
_API_URL_TMPL = 'https://api.idagio.com/v2.0/playlists/{}'
_PLAYLIST_ID_KEY = 'display_id'
def _parse_playlist_metadata(self, playlist_info):
return traverse_obj(playlist_info, {
'id': ('id', {str}),
'title': ('title', {str}),
'thumbnail': ('imageUrl', {url_or_none}),
'description': ('description', {str}),
'creators': ('curator', 'name', {str}, all),
})
class IdagioPersonalPlaylistIE(IdagioPlaylistBaseIE):
_VALID_URL = r'https?://(?:www\.)?app\.idagio\.com(?:/[a-z]{2})?/playlists/personal/(?P<id>[\da-f-]+)'
_TESTS = [{
'url': 'https://app.idagio.com/playlists/personal/99dad72e-7b3a-45a4-b216-867c08046ed8',
'info_dict': {
'id': '99dad72e-7b3a-45a4-b216-867c08046ed8',
'title': 'Test',
'creators': ['1a6f16a6-4514-4d0c-b481-3a9877835626'],
'thumbnail': r're:https://.+/artists/86371/main\.jpg',
'timestamp': 1602859138,
'modified_timestamp': 1755616667,
'upload_date': '20201016',
'modified_date': '20250819',
},
'playlist_count': 100,
}, {
'url': 'https://app.idagio.com/de/playlists/personal/99dad72e-7b3a-45a4-b216-867c08046ed8',
'only_matching': True,
}]
_API_URL_TMPL = 'https://api.idagio.com/v1.0/personal-playlists/{}'
def _parse_playlist_metadata(self, playlist_info):
return traverse_obj(playlist_info, {
'title': ('title', {str}),
'thumbnail': ('image_url', {url_or_none}),
'creators': ('user_id', {str}, all),
'timestamp': ('created_at', {int_or_none(scale=1000)}),
'modified_timestamp': ('updated_at', {int_or_none(scale=1000)}),
})

View File

@@ -437,7 +437,7 @@ class KalturaIE(InfoExtractor):
params = urllib.parse.parse_qs(query) params = urllib.parse.parse_qs(query)
if path: if path:
splitted_path = path.split('/') splitted_path = path.split('/')
params.update(dict(zip(splitted_path[::2], [[v] for v in splitted_path[1::2]]))) params.update(dict(zip(splitted_path[::2], [[v] for v in splitted_path[1::2]]))) # noqa: B905
if 'wid' in params: if 'wid' in params:
partner_id = remove_start(params['wid'][0], '_') partner_id = remove_start(params['wid'][0], '_')
elif 'p' in params: elif 'p' in params:

View File

@@ -1,3 +1,4 @@
import itertools
import re import re
import urllib.parse import urllib.parse
@@ -216,7 +217,7 @@ class LyndaIE(LyndaBaseIE):
def _fix_subtitles(self, subs): def _fix_subtitles(self, subs):
srt = '' srt = ''
seq_counter = 0 seq_counter = 0
for seq_current, seq_next in zip(subs, subs[1:]): for seq_current, seq_next in itertools.pairwise(subs):
m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode']) m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode'])
if m_current is None: if m_current is None:
continue continue

View File

@@ -92,7 +92,7 @@ class MojevideoIE(InfoExtractor):
contains_pattern=r'\[(?s:.+)\]', transform_source=js_to_json) contains_pattern=r'\[(?s:.+)\]', transform_source=js_to_json)
formats = [] formats = []
for video_hash, (suffix, quality, format_note) in zip(video_hashes, [ for video_hash, (suffix, quality, format_note) in zip(video_hashes, [ # noqa: B905
('', 1, 'normálna kvalita'), ('', 1, 'normálna kvalita'),
('_lq', 0, 'nízka kvalita'), ('_lq', 0, 'nízka kvalita'),
('_hd', 2, 'HD-720p'), ('_hd', 2, 'HD-720p'),

View File

@@ -1,3 +1,5 @@
import hashlib
from .common import InfoExtractor from .common import InfoExtractor
@@ -9,10 +11,10 @@ class MuseScoreIE(InfoExtractor):
'id': '142975', 'id': '142975',
'ext': 'mp3', 'ext': 'mp3',
'title': 'WA Mozart Marche Turque (Turkish March fingered)', 'title': 'WA Mozart Marche Turque (Turkish March fingered)',
'description': 'md5:7ede08230e4eaabd67a4a98bb54d07be', 'description': 'md5:0ca4cf6b79d7f5868a1fee74097394ab',
'thumbnail': r're:https?://(?:www\.)?musescore\.com/.*\.png[^$]+', 'thumbnail': r're:https?://cdn\.ustatik\.com/musescore/.*\.jpg',
'uploader': 'PapyPiano', 'uploader': 'PapyPiano',
'creator': 'Wolfgang Amadeus Mozart', 'creators': ['Wolfgang Amadeus Mozart'],
}, },
}, { }, {
'url': 'https://musescore.com/user/36164500/scores/6837638', 'url': 'https://musescore.com/user/36164500/scores/6837638',
@@ -20,10 +22,10 @@ class MuseScoreIE(InfoExtractor):
'id': '6837638', 'id': '6837638',
'ext': 'mp3', 'ext': 'mp3',
'title': 'Sweet Child O\' Mine Guns N\' Roses sweet child', 'title': 'Sweet Child O\' Mine Guns N\' Roses sweet child',
'description': 'md5:4dca71191c14abc312a0a4192492eace', 'description': 'md5:2cd49bd6b4e48a75a3c469d4775d5079',
'thumbnail': r're:https?://(?:www\.)?musescore\.com/.*\.png[^$]+', 'thumbnail': r're:https?://cdn\.ustatik\.com/musescore/.*\.png',
'uploader': 'roxbelviolin', 'uploader': 'roxbelviolin',
'creator': 'Guns N´Roses Arr. Roxbel Violin', 'creators': ['Guns N´Roses Arr. Roxbel Violin'],
}, },
}, { }, {
'url': 'https://musescore.com/classicman/fur-elise', 'url': 'https://musescore.com/classicman/fur-elise',
@@ -31,22 +33,28 @@ class MuseScoreIE(InfoExtractor):
'id': '33816', 'id': '33816',
'ext': 'mp3', 'ext': 'mp3',
'title': 'Für Elise Beethoven', 'title': 'Für Elise Beethoven',
'description': 'md5:49515a3556d5ecaf9fa4b2514064ac34', 'description': 'md5:e37b241c0280b33e9ac25651b815d06e',
'thumbnail': r're:https?://(?:www\.)?musescore\.com/.*\.png[^$]+', 'thumbnail': r're:https?://cdn\.ustatik\.com/musescore/.*\.jpg',
'uploader': 'ClassicMan', 'uploader': 'ClassicMan',
'creator': 'Ludwig van Beethoven (17701827)', 'creators': ['Ludwig van Beethoven (17701827)'],
}, },
}, { }, {
'url': 'https://musescore.com/minh_cuteee/scores/6555384', 'url': 'https://musescore.com/minh_cuteee/scores/6555384',
'only_matching': True, 'only_matching': True,
}] }]
@staticmethod
def _generate_auth_token(video_id):
return hashlib.md5((video_id + 'mp30gs').encode()).hexdigest()[:4]
def _real_extract(self, url): def _real_extract(self, url):
webpage = self._download_webpage(url, None) webpage = self._download_webpage(url, None)
url = self._og_search_url(webpage) or url url = self._og_search_url(webpage) or url
video_id = self._match_id(url) video_id = self._match_id(url)
mp3_url = self._download_json(f'https://musescore.com/api/jmuse?id={video_id}&index=0&type=mp3&v2=1', video_id, mp3_url = self._download_json(
headers={'authorization': '63794e5461e4cfa046edfbdddfccc1ac16daffd2'})['info']['url'] 'https://musescore.com/api/jmuse', video_id,
headers={'authorization': self._generate_auth_token(video_id)},
query={'id': video_id, 'index': '0', 'type': 'mp3'})['info']['url']
formats = [{ formats = [{
'url': mp3_url, 'url': mp3_url,
'ext': 'mp3', 'ext': 'mp3',
@@ -57,7 +65,7 @@ class MuseScoreIE(InfoExtractor):
'id': video_id, 'id': video_id,
'formats': formats, 'formats': formats,
'title': self._og_search_title(webpage), 'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage), 'description': self._html_search_meta('description', webpage, 'description'),
'thumbnail': self._og_search_thumbnail(webpage), 'thumbnail': self._og_search_thumbnail(webpage),
'uploader': self._html_search_meta('musescore:author', webpage, 'uploader'), 'uploader': self._html_search_meta('musescore:author', webpage, 'uploader'),
'creator': self._html_search_meta('musescore:composer', webpage, 'composer'), 'creator': self._html_search_meta('musescore:composer', webpage, 'composer'),

View File

@@ -503,7 +503,7 @@ class NhkForSchoolBangumiIE(InfoExtractor):
'start_time': s, 'start_time': s,
'end_time': e, 'end_time': e,
'title': t, 'title': t,
} for s, e, t in zip(start_time, end_time, chapter_titles)] } for s, e, t in zip(start_time, end_time, chapter_titles, strict=True)]
return { return {
'id': video_id, 'id': video_id,

View File

@@ -181,7 +181,7 @@ class PBSIE(InfoExtractor):
) )
IE_NAME = 'pbs' IE_NAME = 'pbs'
IE_DESC = 'Public Broadcasting Service (PBS) and member stations: {}'.format(', '.join(list(zip(*_STATIONS))[1])) IE_DESC = 'Public Broadcasting Service (PBS) and member stations: {}'.format(', '.join(list(zip(*_STATIONS, strict=True))[1]))
_VALID_URL = r'''(?x)https?:// _VALID_URL = r'''(?x)https?://
(?: (?:
@@ -193,7 +193,7 @@ class PBSIE(InfoExtractor):
(?:[^/?#]+/){{1,5}}(?P<presumptive_id>[^/?#]+?)(?:\.html)?/?(?:$|[?#]) (?:[^/?#]+/){{1,5}}(?P<presumptive_id>[^/?#]+?)(?:\.html)?/?(?:$|[?#])
) )
) )
'''.format('|'.join(next(zip(*_STATIONS)))) '''.format('|'.join(next(zip(*_STATIONS, strict=True))))
_GEO_COUNTRIES = ['US'] _GEO_COUNTRIES = ['US']

View File

@@ -405,7 +405,7 @@ class PolskieRadioCategoryIE(InfoExtractor):
tab_content = self._download_json( tab_content = self._download_json(
'https://www.polskieradio.pl/CMS/TemplateBoxesManagement/TemplateBoxTabContent.aspx/GetTabContent', 'https://www.polskieradio.pl/CMS/TemplateBoxesManagement/TemplateBoxTabContent.aspx/GetTabContent',
category_id, f'Downloading page {page_num}', headers={'content-type': 'application/json'}, category_id, f'Downloading page {page_num}', headers={'content-type': 'application/json'},
data=json.dumps(dict(zip(( data=json.dumps(dict(zip(( # noqa: B905
'boxInstanceId', 'tabId', 'categoryType', 'sectionId', 'categoryId', 'pagerMode', 'boxInstanceId', 'tabId', 'categoryType', 'sectionId', 'categoryId', 'pagerMode',
'subjectIds', 'tagIndexId', 'queryString', 'name', 'openArticlesInParentTemplate', 'subjectIds', 'tagIndexId', 'queryString', 'name', 'openArticlesInParentTemplate',
'idSectionFromUrl', 'maxDocumentAge', 'showCategoryForArticle', 'pageNumber', 'idSectionFromUrl', 'maxDocumentAge', 'showCategoryForArticle', 'pageNumber',

View File

@@ -155,7 +155,7 @@ class Pr0grammIE(InfoExtractor):
# Sorted by "confidence", higher confidence = earlier in list # Sorted by "confidence", higher confidence = earlier in list
confidences = traverse_obj(metadata, ('tags', ..., 'confidence', ({int}, {float}))) confidences = traverse_obj(metadata, ('tags', ..., 'confidence', ({int}, {float})))
if confidences: if confidences:
tags = [tag for _, tag in sorted(zip(confidences, tags), reverse=True)] tags = [tag for _, tag in sorted(zip(confidences, tags), reverse=True)] # noqa: B905
formats = traverse_obj(video_info, ('variants', ..., { formats = traverse_obj(video_info, ('variants', ..., {
'format_id': ('name', {str}), 'format_id': ('name', {str}),

View File

@@ -1,8 +1,8 @@
import json import json
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import float_or_none, parse_iso8601, str_or_none, try_call from ..utils import float_or_none, parse_iso8601, str_or_none, try_call, url_or_none
from ..utils.traversal import traverse_obj from ..utils.traversal import traverse_obj, value
class PrankCastIE(InfoExtractor): class PrankCastIE(InfoExtractor):
@@ -100,9 +100,38 @@ class PrankCastPostIE(InfoExtractor):
'duration': 263.287, 'duration': 263.287,
'cast': ['despicabledogs'], 'cast': ['despicabledogs'],
'description': 'https://imgur.com/a/vtxLvKU', 'description': 'https://imgur.com/a/vtxLvKU',
'categories': [],
'upload_date': '20240104', 'upload_date': '20240104',
}, },
}, {
'url': 'https://prankcast.com/drtomservo/posts/11988-butteye-s-late-night-stank-episode-1-part-1-',
'info_dict': {
'id': '11988',
'ext': 'mp3',
'title': 'Butteye\'s Late Night Stank Episode 1 (Part 1)',
'display_id': 'butteye-s-late-night-stank-episode-1-part-1-',
'timestamp': 1754238686,
'uploader': 'DrTomServo',
'channel_id': '136',
'duration': 2176.464,
'cast': ['DrTomServo'],
'description': '',
'upload_date': '20250803',
},
}, {
'url': 'https://prankcast.com/drtomservo/posts/12105-butteye-s-late-night-stank-episode-08-16-2025-part-2',
'info_dict': {
'id': '12105',
'ext': 'mp3',
'title': 'Butteye\'s Late Night Stank Episode 08-16-2025 Part 2',
'display_id': 'butteye-s-late-night-stank-episode-08-16-2025-part-2',
'timestamp': 1755453505,
'uploader': 'DrTomServo',
'channel_id': '136',
'duration': 19018.392,
'cast': ['DrTomServo'],
'description': '',
'upload_date': '20250817',
},
}] }]
def _real_extract(self, url): def _real_extract(self, url):
@@ -112,26 +141,28 @@ class PrankCastPostIE(InfoExtractor):
post = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['ssr_data_posts'] post = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['ssr_data_posts']
content = self._parse_json(post['post_contents_json'], video_id)[0] content = self._parse_json(post['post_contents_json'], video_id)[0]
uploader = post.get('user_name')
guests_json = traverse_obj(content, ('guests_json', {json.loads}, {dict})) or {}
return { return {
'id': video_id, 'id': video_id,
'title': post.get('post_title') or self._og_search_title(webpage),
'display_id': display_id, 'display_id': display_id,
'url': content.get('url'), 'title': self._og_search_title(webpage),
'timestamp': parse_iso8601(content.get('start_date') or content.get('crdate'), ' '), **traverse_obj(post, {
'uploader': uploader, 'title': ('post_title', {str}),
'channel_id': str_or_none(post.get('user_id')), 'description': ('post_body', {str}),
'duration': float_or_none(content.get('duration')), 'tags': ('post_tags', {lambda x: x.split(',')}, ..., {str.strip}, filter),
'cast': list(filter(None, [uploader, *traverse_obj(guests_json, (..., 'name'))])), 'channel_id': ('user_id', {int}, {str_or_none}),
'description': post.get('post_body'), 'uploader': ('user_name', {str}),
'categories': list(filter(None, [content.get('category')])), }),
'tags': try_call(lambda: list(filter('', post['post_tags'].split(',')))), **traverse_obj(content, {
'subtitles': { 'url': (('secure_url', 'url'), {url_or_none}, any),
'live_chat': [{ 'timestamp': ((
'url': f'https://prankcast.com/api/private/chat/select-broadcast?id={post["content_id"]}&cache=', (('start_date', 'crdate'), {parse_iso8601(delimiter=' ')}),
'ext': 'json', ('created_at', {parse_iso8601}),
}], ), any),
} if post.get('content_id') else None, 'duration': ('duration', {float_or_none}),
'categories': ('category', {str}, filter, all, filter),
'cast': ((
{value(post.get('user_name'))},
('guests_json', {json.loads}, ..., 'name'),
), {str}, filter),
}),
} }

View File

@@ -248,35 +248,17 @@ class SlidesLiveIE(InfoExtractor):
'skip_download': 'm3u8', 'skip_download': 'm3u8',
}, },
}, { }, {
# /v3/ slides, .jpg and .png, service_name = youtube # /v3/ slides, .jpg and .png, formerly service_name = youtube, now native
'url': 'https://slideslive.com/embed/38932460/', 'url': 'https://slideslive.com/embed/38932460/',
'info_dict': { 'info_dict': {
'id': 'RTPdrgkyTiE', 'id': '38932460',
'display_id': '38932460',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Active Learning for Hierarchical Multi-Label Classification', 'title': 'Active Learning for Hierarchical Multi-Label Classification',
'description': 'Watch full version of this video at https://slideslive.com/38932460.', 'duration': 941,
'channel': 'SlidesLive Videos - A', 'thumbnail': r're:https?://.+/.+\.(?:jpg|png)',
'channel_id': 'UC62SdArr41t_-_fX40QCLRw',
'channel_url': 'https://www.youtube.com/channel/UC62SdArr41t_-_fX40QCLRw',
'uploader': 'SlidesLive Videos - A',
'uploader_id': '@slideslivevideos-a6075',
'uploader_url': 'https://www.youtube.com/@slideslivevideos-a6075',
'upload_date': '20200903',
'timestamp': 1697805922,
'duration': 942,
'age_limit': 0,
'live_status': 'not_live',
'playable_in_embed': True,
'availability': 'unlisted',
'categories': ['People & Blogs'],
'tags': [],
'channel_follower_count': int,
'like_count': int,
'view_count': int,
'thumbnail': r're:^https?://.*\.(?:jpg|png|webp)',
'thumbnails': 'count:21',
'chapters': 'count:20', 'chapters': 'count:20',
'timestamp': 1708338974,
'upload_date': '20240219',
}, },
'params': { 'params': {
'skip_download': 'm3u8', 'skip_download': 'm3u8',
@@ -425,7 +407,7 @@ class SlidesLiveIE(InfoExtractor):
player_token = self._search_regex(r'data-player-token="([^"]+)"', webpage, 'player token') player_token = self._search_regex(r'data-player-token="([^"]+)"', webpage, 'player token')
player_data = self._download_webpage( player_data = self._download_webpage(
f'https://ben.slideslive.com/player/{video_id}', video_id, f'https://slideslive.com/player/{video_id}', video_id,
note='Downloading player info', query={'player_token': player_token}) note='Downloading player info', query={'player_token': player_token})
player_info = self._extract_custom_m3u8_info(player_data) player_info = self._extract_custom_m3u8_info(player_data)
@@ -525,7 +507,7 @@ class SlidesLiveIE(InfoExtractor):
yield info yield info
service_data = self._download_json( service_data = self._download_json(
f'https://ben.slideslive.com/player/{video_id}/slides_video_service_data', f'https://slideslive.com/player/{video_id}/slides_video_service_data',
video_id, fatal=False, query={ video_id, fatal=False, query={
'player_token': player_token, 'player_token': player_token,
'videos': ','.join(video_slides), 'videos': ','.join(video_slides),

View File

@@ -438,7 +438,7 @@ class SoundcloudIE(SoundcloudBaseIE):
(?P<title>[\w\d-]+) (?P<title>[\w\d-]+)
(?:/(?P<token>(?!(?:albums|sets|recommended))[^?]+?))? (?:/(?P<token>(?!(?:albums|sets|recommended))[^?]+?))?
(?:[?].*)?$) (?:[?].*)?$)
|(?:api(?:-v2)?\.soundcloud\.com/tracks/(?P<track_id>\d+) |(?:api(?:-v2)?\.soundcloud\.com/tracks/(?:soundcloud%3Atracks%3A)?(?P<track_id>\d+)
(?:/?\?secret_token=(?P<secret_token>[^&]+))?) (?:/?\?secret_token=(?P<secret_token>[^&]+))?)
) )
''' '''
@@ -692,6 +692,9 @@ class SoundcloudIE(SoundcloudBaseIE):
# Go+ (account with active subscription needed) # Go+ (account with active subscription needed)
'url': 'https://soundcloud.com/taylorswiftofficial/look-what-you-made-me-do', 'url': 'https://soundcloud.com/taylorswiftofficial/look-what-you-made-me-do',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://api.soundcloud.com/tracks/soundcloud%3Atracks%3A1083788353',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View File

@@ -1,12 +1,20 @@
import base64
import datetime as dt
import itertools import itertools
import json
import re
import time
from .common import InfoExtractor from .common import InfoExtractor
from ..networking import HEADRequest from ..networking.exceptions import HTTPError
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
encode_data_uri,
filter_dict,
int_or_none, int_or_none,
update_url_query, jwt_decode_hs256,
url_or_none, url_or_none,
urlencode_postdata,
urljoin, urljoin,
) )
from ..utils.traversal import traverse_obj from ..utils.traversal import traverse_obj
@@ -90,7 +98,7 @@ class TenPlayIE(InfoExtractor):
'only_matching': True, 'only_matching': True,
}] }]
_GEO_BYPASS = False _GEO_BYPASS = False
_GEO_COUNTRIES = ['AU']
_AUS_AGES = { _AUS_AGES = {
'G': 0, 'G': 0,
'PG': 15, 'PG': 15,
@@ -100,31 +108,155 @@ class TenPlayIE(InfoExtractor):
'R': 18, 'R': 18,
'X': 18, 'X': 18,
} }
_TOKEN_CACHE_KEY = 'token_data'
_SEGMENT_BITRATE_RE = r'(?m)-(?:300|150|75|55)0000-(\d+(?:-[\da-f]+)?)\.ts$'
_refresh_token = None
_access_token = None
@staticmethod
def _filter_ads_from_m3u8(m3u8_doc):
out = []
for line in m3u8_doc.splitlines():
if line.startswith('https://redirector.googlevideo.com/'):
out.pop()
continue
out.append(line)
return '\n'.join(out)
@staticmethod
def _generate_xnetwork_ten_auth_token():
ts = dt.datetime.now(dt.timezone.utc).strftime('%Y%m%d%H%M%S')
return base64.b64encode(ts.encode()).decode()
@staticmethod
def _is_jwt_expired(token):
return jwt_decode_hs256(token)['exp'] - time.time() < 300
def _refresh_access_token(self):
try:
refresh_data = self._download_json(
'https://10.com.au/api/token/refresh', None, 'Refreshing access token',
headers={
'Content-Type': 'application/json',
}, data=json.dumps({
'accessToken': self._access_token,
'refreshToken': self._refresh_token,
}).encode())
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
self._refresh_token = self._access_token = None
self.cache.store(self._NETRC_MACHINE, self._TOKEN_CACHE_KEY, [None, None])
self.report_warning('Refresh token has been invalidated; retrying with credentials')
self._perform_login(*self._get_login_info())
return
raise
self._access_token = refresh_data['accessToken']
self._refresh_token = refresh_data['refreshToken']
self.cache.store(self._NETRC_MACHINE, self._TOKEN_CACHE_KEY, [self._refresh_token, self._access_token])
def _perform_login(self, username, password):
if not self._refresh_token:
self._refresh_token, self._access_token = self.cache.load(
self._NETRC_MACHINE, self._TOKEN_CACHE_KEY, default=[None, None])
if self._refresh_token and self._access_token:
self.write_debug('Using cached refresh token')
return
try:
auth_data = self._download_json(
'https://10.com.au/api/user/auth', None, 'Logging in',
headers={
'Content-Type': 'application/json',
'X-Network-Ten-Auth': self._generate_xnetwork_ten_auth_token(),
'Referer': 'https://10.com.au/',
}, data=json.dumps({
'email': username,
'password': password,
}).encode())
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
raise ExtractorError('Invalid username/password', expected=True)
raise
self._refresh_token = auth_data['jwt']['refreshToken']
self._access_token = auth_data['jwt']['accessToken']
self.cache.store(self._NETRC_MACHINE, self._TOKEN_CACHE_KEY, [self._refresh_token, self._access_token])
def _call_playback_api(self, content_id):
if self._access_token and self._is_jwt_expired(self._access_token):
self._refresh_access_token()
for is_retry in (False, True):
try:
return self._download_json_handle(
f'https://10.com.au/api/v1/videos/playback/{content_id}/', content_id,
note='Downloading video JSON', query={'platform': 'samsung'},
headers=filter_dict({
'TP-AcceptFeature': 'v1/fw;v1/drm',
'Authorization': f'Bearer {self._access_token}' if self._access_token else None,
}))
except ExtractorError as e:
if not is_retry and isinstance(e.cause, HTTPError) and e.cause.status == 403:
if self._access_token:
self.to_screen('Access token has expired; refreshing')
self._refresh_access_token()
continue
elif not self._get_login_info()[0]:
self.raise_login_required('Login required to access this video', method='password')
raise
def _real_extract(self, url): def _real_extract(self, url):
content_id = self._match_id(url) content_id = self._match_id(url)
data = self._download_json( try:
'https://10.com.au/api/v1/videos/' + content_id, content_id) data = self._download_json(f'https://10.com.au/api/v1/videos/{content_id}', content_id)
except ExtractorError as e:
if (
isinstance(e.cause, HTTPError) and e.cause.status == 403
and 'Error 54113' in e.cause.response.read().decode()
):
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
raise
video_data = self._download_json( video_data, urlh = self._call_playback_api(content_id)
f'https://vod.ten.com.au/api/videos/bcquery?command=find_videos_by_id&video_id={data["altId"]}', content_source_id = video_data['dai']['contentSourceId']
content_id, 'Downloading video JSON') video_id = video_data['dai']['videoId']
# Dash URL 404s, changing the m3u8 format works auth_token = urlh.get_header('x-dai-auth')
m3u8_url = self._request_webpage( if not auth_token:
HEADRequest(update_url_query(video_data['items'][0]['dashManifestUrl'], { raise ExtractorError('Failed to get DAI auth token')
'manifest': 'm3u',
})), dai_data = self._download_json(
content_id, 'Checking stream URL').url f'https://pubads.g.doubleclick.net/ondemand/hls/content/{content_source_id}/vid/{video_id}/streams',
if '10play-not-in-oz' in m3u8_url: content_id, note='Downloading DAI JSON',
self.raise_geo_restricted(countries=['AU']) data=urlencode_postdata({'auth-token': auth_token}))
if '10play_unsupported' in m3u8_url:
raise ExtractorError('Unable to extract stream') # Ignore subs to avoid ad break cleanup
# Attempt to get a higher quality stream formats, _ = self._extract_m3u8_formats_and_subtitles(
formats = self._extract_m3u8_formats( dai_data['stream_manifest'], content_id, 'mp4')
m3u8_url.replace(',150,75,55,0000', ',500,300,150,75,55,0000'),
content_id, 'mp4', fatal=False) already_have_1080p = False
if not formats: for fmt in formats:
formats = self._extract_m3u8_formats(m3u8_url, content_id, 'mp4') m3u8_doc = self._download_webpage(
fmt['url'], content_id, note='Downloading m3u8 information')
m3u8_doc = self._filter_ads_from_m3u8(m3u8_doc)
fmt['hls_media_playlist_data'] = m3u8_doc
if fmt.get('height') == 1080:
already_have_1080p = True
# Attempt format upgrade
if not already_have_1080p and m3u8_doc and re.search(self._SEGMENT_BITRATE_RE, m3u8_doc):
m3u8_doc = re.sub(self._SEGMENT_BITRATE_RE, r'-5000000-\1.ts', m3u8_doc)
m3u8_doc = re.sub(r'-(?:300|150|75|55)0000\.key"', r'-5000000.key"', m3u8_doc)
formats.append({
'format_id': 'upgrade-attempt-1080p',
'url': encode_data_uri(m3u8_doc.encode(), 'application/x-mpegurl'),
'hls_media_playlist_data': m3u8_doc,
'width': 1920,
'height': 1080,
'ext': 'mp4',
'protocol': 'm3u8_native',
'__needs_testing': True,
})
return { return {
'id': content_id, 'id': content_id,

View File

@@ -81,7 +81,7 @@ class TikTokBaseIE(InfoExtractor):
} }
self._APP_INFO_POOL = [ self._APP_INFO_POOL = [
{**defaults, **dict( {**defaults, **dict(
(k, v) for k, v in zip(self._APP_INFO_DEFAULTS, app_info.split('/')) if v (k, v) for k, v in zip(self._APP_INFO_DEFAULTS, app_info.split('/'), strict=False) if v
)} for app_info in self._KNOWN_APP_INFO )} for app_info in self._KNOWN_APP_INFO
] ]
@@ -220,7 +220,7 @@ class TikTokBaseIE(InfoExtractor):
def _extract_web_data_and_status(self, url, video_id, fatal=True): def _extract_web_data_and_status(self, url, video_id, fatal=True):
video_data, status = {}, -1 video_data, status = {}, -1
res = self._download_webpage_handle(url, video_id, fatal=fatal, headers={'User-Agent': 'Mozilla/5.0'}) res = self._download_webpage_handle(url, video_id, fatal=fatal, impersonate=True)
if res is False: if res is False:
return video_data, status return video_data, status
@@ -1071,12 +1071,15 @@ class TikTokUserIE(TikTokBaseIE):
webpage = self._download_webpage( webpage = self._download_webpage(
self._UPLOADER_URL_FORMAT % user_name, user_name, self._UPLOADER_URL_FORMAT % user_name, user_name,
'Downloading user webpage', 'Unable to download user webpage', 'Downloading user webpage', 'Unable to download user webpage',
fatal=False, headers={'User-Agent': 'Mozilla/5.0'}) or '' fatal=False, impersonate=True) or ''
detail = traverse_obj( detail = traverse_obj(
self._get_universal_data(webpage, user_name), ('webapp.user-detail', {dict})) or {} self._get_universal_data(webpage, user_name), ('webapp.user-detail', {dict})) or {}
if detail.get('statusCode') == 10222: video_count = traverse_obj(detail, ('userInfo', ('stats', 'statsV2'), 'videoCount', {int}, any))
if not video_count and detail.get('statusCode') == 10222:
self.raise_login_required( self.raise_login_required(
'This user\'s account is private. Log into an account that has access') 'This user\'s account is private. Log into an account that has access')
elif video_count == 0:
raise ExtractorError('This account does not have any videos posted', expected=True)
sec_uid = traverse_obj(detail, ('userInfo', 'user', 'secUid', {str})) sec_uid = traverse_obj(detail, ('userInfo', 'user', 'secUid', {str}))
if sec_uid: if sec_uid:
fail_early = not traverse_obj(detail, ('userInfo', 'itemList', ...)) fail_early = not traverse_obj(detail, ('userInfo', 'itemList', ...))
@@ -1520,7 +1523,7 @@ class TikTokLiveIE(TikTokBaseIE):
uploader, room_id = self._match_valid_url(url).group('uploader', 'id') uploader, room_id = self._match_valid_url(url).group('uploader', 'id')
if not room_id: if not room_id:
webpage = self._download_webpage( webpage = self._download_webpage(
format_field(uploader, None, self._UPLOADER_URL_FORMAT), uploader) format_field(uploader, None, self._UPLOADER_URL_FORMAT), uploader, impersonate=True)
room_id = traverse_obj( room_id = traverse_obj(
self._get_universal_data(webpage, uploader), self._get_universal_data(webpage, uploader),
('webapp.user-detail', 'userInfo', 'user', 'roomId', {str})) ('webapp.user-detail', 'userInfo', 'user', 'roomId', {str}))

View File

@@ -1,46 +1,82 @@
import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
clean_html, clean_html,
get_element_by_class, extract_attributes,
js_to_json, js_to_json,
mimetype2ext,
unified_strdate,
url_or_none,
urljoin,
) )
from ..utils.traversal import find_element, traverse_obj
class TVNoeIE(InfoExtractor): class TVNoeIE(InfoExtractor):
_WORKING = False IE_NAME = 'tvnoe'
_VALID_URL = r'https?://(?:www\.)?tvnoe\.cz/video/(?P<id>[0-9]+)' IE_DESC = 'Televize Noe'
_TEST = {
'url': 'http://www.tvnoe.cz/video/10362', _VALID_URL = r'https?://(?:www\.)?tvnoe\.cz/porad/(?P<id>[\w-]+)'
'md5': 'aee983f279aab96ec45ab6e2abb3c2ca', _TESTS = [{
'url': 'https://www.tvnoe.cz/porad/43216-outdoor-films-s-mudr-tomasem-kempnym-pomahat-potrebnym-nejen-u-nas',
'info_dict': { 'info_dict': {
'id': '10362', 'id': '43216-outdoor-films-s-mudr-tomasem-kempnym-pomahat-potrebnym-nejen-u-nas',
'ext': 'mp4', 'ext': 'mp4',
'series': 'Noční univerzita', 'title': 'Pomáhat potřebným nejen u nás',
'title': 'prof. Tomáš Halík, Th.D. - Návrat náboženství a střet civilizací', 'description': 'md5:78b538ee32f7e881ec23b9c278a0ff3a',
'description': 'md5:f337bae384e1a531a52c55ebc50fff41', 'release_date': '20250531',
'series': 'Outdoor Films s MUDr. Tomášem Kempným',
'thumbnail': r're:https?://www\.tvnoe\.cz/.+\.jpg',
}, },
} }, {
'url': 'https://www.tvnoe.cz/porad/43205-zamysleni-tomase-halika-7-nedele-velikonocni',
'info_dict': {
'id': '43205-zamysleni-tomase-halika-7-nedele-velikonocni',
'ext': 'mp4',
'title': '7. neděle velikonoční',
'description': 'md5:6bb9908efc59abe60e1c8c7c0e9bb6cd',
'release_date': '20250531',
'series': 'Zamyšlení Tomáše Halíka',
'thumbnail': r're:https?://www\.tvnoe\.cz/.+\.jpg',
},
}]
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
player = self._search_json(
r'var\s+INIT_PLAYER\s*=', webpage, 'init player',
video_id, transform_source=js_to_json)
iframe_url = self._search_regex( formats = []
r'<iframe[^>]+src="([^"]+)"', webpage, 'iframe URL') for source in traverse_obj(player, ('tracks', ..., lambda _, v: url_or_none(v['src']))):
src_url = source['src']
ext = mimetype2ext(source.get('type'))
if ext == 'm3u8':
fmts = self._extract_m3u8_formats(
src_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
elif ext == 'mpd':
fmts = self._extract_mpd_formats(
src_url, video_id, mpd_id='dash', fatal=False)
else:
self.report_warning(f'Unsupported stream type: {ext}')
continue
formats.extend(fmts)
ifs_page = self._download_webpage(iframe_url, video_id) return {
jwplayer_data = self._find_jwplayer_data(
ifs_page, video_id, transform_source=js_to_json)
info_dict = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False, base_url=iframe_url)
info_dict.update({
'id': video_id, 'id': video_id,
'title': clean_html(get_element_by_class( 'description': clean_html(self._search_regex(
'field-name-field-podnazev', webpage)), r'<p\s+class="">(.+?)</p>', webpage, 'description', default=None)),
'description': clean_html(get_element_by_class( 'formats': formats,
'field-name-body', webpage)), **traverse_obj(webpage, {
'series': clean_html(get_element_by_class('title', webpage)), 'title': ({find_element(tag='h2')}, {clean_html}),
}) 'release_date': (
{clean_html}, {re.compile(r'Premiéra:\s*(\d{1,2}\.\d{1,2}\.\d{4})').findall},
return info_dict ..., {str}, {unified_strdate}, any),
'series': ({find_element(tag='h1')}, {clean_html}),
'thumbnail': (
{find_element(id='player-live', html=True)}, {extract_attributes},
'poster', {urljoin('https://www.tvnoe.cz/')}),
}),
}

View File

@@ -514,7 +514,10 @@ class TwitchVodIE(TwitchBaseIE):
is_live = None is_live = None
if thumbnail: if thumbnail:
if re.findall(r'/404_processing_[^.?#]+\.png', thumbnail): if re.findall(r'/404_processing_[^.?#]+\.png', thumbnail):
is_live, thumbnail = True, None # False positive for is_live if info.get('broadcastType') == 'HIGHLIGHT'
# See https://github.com/yt-dlp/yt-dlp/issues/14455
is_live = info.get('broadcastType') == 'ARCHIVE'
thumbnail = None
else: else:
is_live = False is_live = False

View File

@@ -58,6 +58,20 @@ class VidyardBaseIE(InfoExtractor):
return subs return subs
def _get_additional_metadata(self, video_id):
additional_metadata = self._download_json(
f'https://play.vidyard.com/video/{video_id}', video_id,
note='Downloading additional metadata', fatal=False)
return traverse_obj(additional_metadata, {
'title': ('name', {str}),
'duration': ('seconds', {int_or_none}),
'thumbnails': ('thumbnailUrl', {'url': {url_or_none}}, all),
'chapters': ('videoSections', lambda _, v: float_or_none(v['milliseconds']) is not None, {
'title': ('title', {str}),
'start_time': ('milliseconds', {float_or_none(scale=1000)}),
}),
})
def _fetch_video_json(self, video_id): def _fetch_video_json(self, video_id):
return self._download_json( return self._download_json(
f'https://play.vidyard.com/player/{video_id}.json', video_id)['payload'] f'https://play.vidyard.com/player/{video_id}.json', video_id)['payload']
@@ -67,6 +81,7 @@ class VidyardBaseIE(InfoExtractor):
self._merge_subtitles(self._get_direct_subtitles(json_data.get('captions')), target=subtitles) self._merge_subtitles(self._get_direct_subtitles(json_data.get('captions')), target=subtitles)
return { return {
**self._get_additional_metadata(json_data['facadeUuid']),
**traverse_obj(json_data, { **traverse_obj(json_data, {
'id': ('facadeUuid', {str}), 'id': ('facadeUuid', {str}),
'display_id': ('videoId', {int}, {str_or_none}), 'display_id': ('videoId', {int}, {str_or_none}),
@@ -113,6 +128,29 @@ class VidyardIE(VidyardBaseIE):
'thumbnail': 'https://cdn.vidyard.com/thumbnails/spacer.gif', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/spacer.gif',
'duration': 41.186, 'duration': 41.186,
}, },
}, {
'url': 'https://share.vidyard.com/watch/wL237MtNgZUHo6e8WPiJbF',
'info_dict': {
'id': 'wL237MtNgZUHo6e8WPiJbF',
'display_id': '25926870',
'ext': 'mp4',
'title': 'Adding & Editing Video Chapters',
'thumbnail': 'https://cdn.vidyard.com/thumbnails/25926870/bvSEZS3dGY7DByQ_bzB57avIZ_hsvhr4_small.jpg',
'duration': 135.46,
'chapters': [{
'title': 'Adding new chapters',
'start_time': 0,
}, {
'title': 'Previewing your video',
'start_time': 74,
}, {
'title': 'Editing your chapters',
'start_time': 91,
}, {
'title': 'Share a link to a specific chapter',
'start_time': 105,
}],
},
}, { }, {
'url': 'https://embed.vidyard.com/share/oTDMPlUv--51Th455G5u7Q', 'url': 'https://embed.vidyard.com/share/oTDMPlUv--51Th455G5u7Q',
'info_dict': { 'info_dict': {
@@ -132,8 +170,8 @@ class VidyardIE(VidyardBaseIE):
'id': 'SyStyHtYujcBHe5PkZc5DL', 'id': 'SyStyHtYujcBHe5PkZc5DL',
'display_id': '41974005', 'display_id': '41974005',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Prepare the Frame and Track for Palm Beach Polysatin Shutters With BiFold Track', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 1 of 6)',
'description': r're:In this video, you will learn how to prepare the frame.+', 'description': r're:In this video, you will learn the first step.+',
'thumbnail': 'https://cdn.vidyard.com/thumbnails/41974005/IJw7oCaJcF1h7WWu3OVZ8A_small.png', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/41974005/IJw7oCaJcF1h7WWu3OVZ8A_small.png',
'duration': 258.666, 'duration': 258.666,
}, },
@@ -147,42 +185,42 @@ class VidyardIE(VidyardBaseIE):
'id': 'SyStyHtYujcBHe5PkZc5DL', 'id': 'SyStyHtYujcBHe5PkZc5DL',
'display_id': '41974005', 'display_id': '41974005',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Prepare the Frame and Track for Palm Beach Polysatin Shutters With BiFold Track', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 1 of 6)',
'thumbnail': 'https://cdn.vidyard.com/thumbnails/41974005/IJw7oCaJcF1h7WWu3OVZ8A_small.png', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/41974005/IJw7oCaJcF1h7WWu3OVZ8A_small.png',
'duration': 258.666, 'duration': 258.666,
}, { }, {
'id': '1Fw4B84jZTXLXWqkE71RiM', 'id': '1Fw4B84jZTXLXWqkE71RiM',
'display_id': '5861113', 'display_id': '5861113',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Palm Beach - Bi-Fold Track System "Frame Installation"', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 2 of 6)',
'thumbnail': 'https://cdn.vidyard.com/thumbnails/5861113/29CJ54s5g1_aP38zkKLHew_small.jpg', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/5861113/29CJ54s5g1_aP38zkKLHew_small.jpg',
'duration': 167.858, 'duration': 167.858,
}, { }, {
'id': 'DqP3wBvLXSpxrcqpT5kEeo', 'id': 'DqP3wBvLXSpxrcqpT5kEeo',
'display_id': '41976334', 'display_id': '41976334',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Install the Track for Palm Beach Polysatin Shutters With BiFold Track', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 3 of 6)',
'thumbnail': 'https://cdn.vidyard.com/thumbnails/5861090/RwG2VaTylUa6KhSTED1r1Q_small.png', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/5861090/RwG2VaTylUa6KhSTED1r1Q_small.png',
'duration': 94.229, 'duration': 94.229,
}, { }, {
'id': 'opfybfxpzQArxqtQYB6oBU', 'id': 'opfybfxpzQArxqtQYB6oBU',
'display_id': '41976364', 'display_id': '41976364',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Install the Panel for Palm Beach Polysatin Shutters With BiFold Track', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 4 of 6)',
'thumbnail': 'https://cdn.vidyard.com/thumbnails/5860926/JIOaJR08dM4QgXi_iQ2zGA_small.png', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/5860926/JIOaJR08dM4QgXi_iQ2zGA_small.png',
'duration': 191.467, 'duration': 191.467,
}, { }, {
'id': 'rWrXvkbTNNaNqD6189HJya', 'id': 'rWrXvkbTNNaNqD6189HJya',
'display_id': '41976382', 'display_id': '41976382',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Adjust the Panels for Palm Beach Polysatin Shutters With BiFold Track', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 5 of 6)',
'thumbnail': 'https://cdn.vidyard.com/thumbnails/5860687/CwHxBv4UudAhOh43FVB4tw_small.png', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/5860687/CwHxBv4UudAhOh43FVB4tw_small.png',
'duration': 138.155, 'duration': 138.155,
}, { }, {
'id': 'eYPTB521MZ9TPEArSethQ5', 'id': 'eYPTB521MZ9TPEArSethQ5',
'display_id': '41976409', 'display_id': '41976409',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Assemble and Install the Valance for Palm Beach Polysatin Shutters With BiFold Track', 'title': 'Install Palm Beach Shutters with a Bi-Fold Track System (Video 6 of 6)',
'thumbnail': 'https://cdn.vidyard.com/thumbnails/5861425/0y68qlMU4O5VKU7bJ8i_AA_small.png', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/5861425/0y68qlMU4O5VKU7bJ8i_AA_small.png',
'duration': 148.224, 'duration': 148.224,
}], }],
@@ -191,6 +229,7 @@ class VidyardIE(VidyardBaseIE):
}, { }, {
# Non hubs.vidyard.com playlist # Non hubs.vidyard.com playlist
'url': 'https://salesforce.vidyard.com/watch/d4vqPjs7Q5EzVEis5QT3jd', 'url': 'https://salesforce.vidyard.com/watch/d4vqPjs7Q5EzVEis5QT3jd',
'skip': 'URL now 404s. Alternative non hubs.vidyard.com playlist not yet available',
'info_dict': { 'info_dict': {
'id': 'd4vqPjs7Q5EzVEis5QT3jd', 'id': 'd4vqPjs7Q5EzVEis5QT3jd',
'title': 'How To: Service Cloud: Import External Content in Lightning Knowledge', 'title': 'How To: Service Cloud: Import External Content in Lightning Knowledge',
@@ -300,6 +339,7 @@ class VidyardIE(VidyardBaseIE):
}, { }, {
# <script ... id="vidyard_embed_code_DXx2sW4WaLA6hTdGFz7ja8" src="//play.vidyard.com/DXx2sW4WaLA6hTdGFz7ja8.js? # <script ... id="vidyard_embed_code_DXx2sW4WaLA6hTdGFz7ja8" src="//play.vidyard.com/DXx2sW4WaLA6hTdGFz7ja8.js?
'url': 'http://videos.vivint.com/watch/DXx2sW4WaLA6hTdGFz7ja8', 'url': 'http://videos.vivint.com/watch/DXx2sW4WaLA6hTdGFz7ja8',
'skip': 'URL certificate expired 2025-09-10. Alternative script embed test case not yet available',
'info_dict': { 'info_dict': {
'id': 'DXx2sW4WaLA6hTdGFz7ja8', 'id': 'DXx2sW4WaLA6hTdGFz7ja8',
'display_id': '2746529', 'display_id': '2746529',
@@ -317,11 +357,12 @@ class VidyardIE(VidyardBaseIE):
'ext': 'mp4', 'ext': 'mp4',
'title': 'Lesson 1 - Opening an MT4 Account', 'title': 'Lesson 1 - Opening an MT4 Account',
'description': 'Never heard of MetaTrader4? Here\'s the 411 on the popular trading platform!', 'description': 'Never heard of MetaTrader4? Here\'s the 411 on the popular trading platform!',
'duration': 168, 'duration': 168.16,
'thumbnail': 'https://cdn.vidyard.com/thumbnails/20291/IM-G2WXQR9VBLl2Cmzvftg_small.jpg', 'thumbnail': 'https://cdn.vidyard.com/thumbnails/20291/IM-G2WXQR9VBLl2Cmzvftg_small.jpg',
}, },
}, { }, {
# <iframe ... src="//play.vidyard.com/d61w8EQoZv1LDuPxDkQP2Q/type/background?preview=1" # <iframe ... src="//play.vidyard.com/d61w8EQoZv1LDuPxDkQP2Q/type/background?preview=1"
'skip': 'URL changed embed method to \'class="vidyard-player-embed"\'. An alternative iframe embed test case is not yet available',
'url': 'https://www.avaya.com/en/', 'url': 'https://www.avaya.com/en/',
'info_dict': { 'info_dict': {
# These values come from the generic extractor and don't matter # These values come from the generic extractor and don't matter
@@ -354,46 +395,18 @@ class VidyardIE(VidyardBaseIE):
}], }],
'playlist_count': 2, 'playlist_count': 2,
}, { }, {
# <div class="vidyard-player-embed" data-uuid="vpCWTVHw3qrciLtVY94YkS" # <div class="vidyard-player-embed" data-uuid="pMk8eNCYzukzJaEPoo1Hgn"
'url': 'https://www.gogoair.com/', # URL previously used iframe embeds and was used for that test case
'url': 'https://www.avaya.com/en/',
'info_dict': { 'info_dict': {
# These values come from the generic extractor and don't matter 'id': 'pMk8eNCYzukzJaEPoo1Hgn',
'id': str, 'display_id': '47074153',
'title': str, 'ext': 'mp4',
'description': str, 'title': 'Avaya Infinity Helps Redefine the Contact Center as Your Connection Center',
'age_limit': 0, 'description': r're:Our mission is to help you turn single engagements.+',
'duration': 81.55,
'thumbnail': 'https://cdn.vidyard.com/thumbnails/47074153/MZOLKhXdbiUWwp2ROnT5HaXL0oau6JtR_small.jpg',
}, },
'playlist': [{
'info_dict': {
'id': 'vpCWTVHw3qrciLtVY94YkS',
'display_id': '40780699',
'ext': 'mp4',
'title': 'Upgrade to AVANCE 100% worth it - Jason Talley, Owner and Pilot, Testimonial',
'description': 'md5:f609824839439a51990cef55ffc472aa',
'duration': 70.737,
'thumbnail': 'https://cdn.vidyard.com/thumbnails/40780699/KzjfYZz5MZl2gHF_e-4i2c6ib1cLDweQ_small.jpg',
},
}, {
'info_dict': {
'id': 'xAmV9AsLbnitCw35paLBD8',
'display_id': '31130867',
'ext': 'mp4',
'title': 'Brad Keselowski goes faster with Gogo AVANCE inflight Wi-Fi',
'duration': 132.565,
'thumbnail': 'https://cdn.vidyard.com/thumbnails/31130867/HknyDtLdm2Eih9JZ4A5XLjhfBX_6HRw5_small.jpg',
},
}, {
'info_dict': {
'id': 'RkkrFRNxfP79nwCQavecpF',
'display_id': '39009815',
'ext': 'mp4',
'title': 'Live Demo of Gogo Galileo',
'description': 'md5:e2df497236f4e12c3fef8b392b5f23e0',
'duration': 112.128,
'thumbnail': 'https://cdn.vidyard.com/thumbnails/38144873/CWLlxfUbJ4Gh0ThbUum89IsEM4yupzMb_small.jpg',
},
}],
'playlist_count': 3,
}] }]
@classmethod @classmethod

View File

@@ -2,6 +2,7 @@ import base64
import codecs import codecs
import itertools import itertools
import re import re
import string
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
@@ -22,6 +23,47 @@ from ..utils import (
) )
def to_signed_32(n):
return n % ((-1 if n < 0 else 1) * 2**32)
class _ByteGenerator:
def __init__(self, algo_id, seed):
try:
self._algorithm = getattr(self, f'_algo{algo_id}')
except AttributeError:
raise ExtractorError(f'Unknown algorithm ID: {algo_id}')
self._s = to_signed_32(seed)
def _algo1(self, s):
# LCG (a=1664525, c=1013904223, m=2^32)
# Ref: https://en.wikipedia.org/wiki/Linear_congruential_generator
s = self._s = to_signed_32(s * 1664525 + 1013904223)
return s
def _algo2(self, s):
# xorshift32
# Ref: https://en.wikipedia.org/wiki/Xorshift
s = to_signed_32(s ^ (s << 13))
s = to_signed_32(s ^ ((s & 0xFFFFFFFF) >> 17))
s = self._s = to_signed_32(s ^ (s << 5))
return s
def _algo3(self, s):
# Weyl Sequence (k≈2^32*φ, m=2^32) + MurmurHash3 (fmix32)
# Ref: https://en.wikipedia.org/wiki/Weyl_sequence
# https://commons.apache.org/proper/commons-codec/jacoco/org.apache.commons.codec.digest/MurmurHash3.java.html
s = self._s = to_signed_32(s + 0x9e3779b9)
s = to_signed_32(s ^ ((s & 0xFFFFFFFF) >> 16))
s = to_signed_32(s * to_signed_32(0x85ebca77))
s = to_signed_32(s ^ ((s & 0xFFFFFFFF) >> 13))
s = to_signed_32(s * to_signed_32(0xc2b2ae3d))
return to_signed_32(s ^ ((s & 0xFFFFFFFF) >> 16))
def __next__(self):
return self._algorithm(self._s) & 0xFF
class XHamsterIE(InfoExtractor): class XHamsterIE(InfoExtractor):
_DOMAINS = r'(?:xhamster\.(?:com|one|desi)|xhms\.pro|xhamster\d+\.(?:com|desi)|xhday\.com|xhvid\.com)' _DOMAINS = r'(?:xhamster\.(?:com|one|desi)|xhms\.pro|xhamster\d+\.(?:com|desi)|xhday\.com|xhvid\.com)'
_VALID_URL = rf'''(?x) _VALID_URL = rf'''(?x)
@@ -146,6 +188,12 @@ class XHamsterIE(InfoExtractor):
_XOR_KEY = b'xh7999' _XOR_KEY = b'xh7999'
def _decipher_format_url(self, format_url, format_id): def _decipher_format_url(self, format_url, format_id):
if all(char in string.hexdigits for char in format_url):
byte_data = bytes.fromhex(format_url)
seed = int.from_bytes(byte_data[1:5], byteorder='little', signed=True)
byte_gen = _ByteGenerator(byte_data[0], seed)
return bytearray(byte ^ next(byte_gen) for byte in byte_data[5:]).decode('latin-1')
cipher_type, _, ciphertext = try_call( cipher_type, _, ciphertext = try_call(
lambda: base64.b64decode(format_url).decode().partition('_')) or [None] * 3 lambda: base64.b64decode(format_url).decode().partition('_')) or [None] * 3
@@ -164,6 +212,16 @@ class XHamsterIE(InfoExtractor):
self.report_warning(f'Skipping format "{format_id}": unsupported cipher type "{cipher_type}"') self.report_warning(f'Skipping format "{format_id}": unsupported cipher type "{cipher_type}"')
return None return None
def _fixup_formats(self, formats):
for f in formats:
if f.get('vcodec'):
continue
for vcodec in ('av1', 'h264'):
if any(f'.{vcodec}.' in f_url for f_url in (f['url'], f.get('manifest_url', ''))):
f['vcodec'] = vcodec
break
return formats
def _real_extract(self, url): def _real_extract(self, url):
mobj = self._match_valid_url(url) mobj = self._match_valid_url(url)
video_id = mobj.group('id') or mobj.group('id_2') video_id = mobj.group('id') or mobj.group('id_2')
@@ -312,7 +370,8 @@ class XHamsterIE(InfoExtractor):
'comment_count': int_or_none(video.get('comments')), 'comment_count': int_or_none(video.get('comments')),
'age_limit': age_limit if age_limit is not None else 18, 'age_limit': age_limit if age_limit is not None else 18,
'categories': categories, 'categories': categories,
'formats': formats, 'formats': self._fixup_formats(formats),
'_format_sort_fields': ('res', 'proto', 'tbr'),
} }
# Old layout fallback # Old layout fallback

View File

@@ -99,7 +99,7 @@ INNERTUBE_CLIENTS = {
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'WEB', 'clientName': 'WEB',
'clientVersion': '2.20250312.04.00', 'clientVersion': '2.20250925.01.00',
}, },
}, },
'INNERTUBE_CONTEXT_CLIENT_NAME': 1, 'INNERTUBE_CONTEXT_CLIENT_NAME': 1,
@@ -111,7 +111,7 @@ INNERTUBE_CLIENTS = {
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'WEB', 'clientName': 'WEB',
'clientVersion': '2.20250312.04.00', 'clientVersion': '2.20250925.01.00',
'userAgent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.5 Safari/605.1.15,gzip(gfe)', 'userAgent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.5 Safari/605.1.15,gzip(gfe)',
}, },
}, },
@@ -123,7 +123,7 @@ INNERTUBE_CLIENTS = {
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'WEB_EMBEDDED_PLAYER', 'clientName': 'WEB_EMBEDDED_PLAYER',
'clientVersion': '1.20250310.01.00', 'clientVersion': '1.20250923.21.00',
}, },
}, },
'INNERTUBE_CONTEXT_CLIENT_NAME': 56, 'INNERTUBE_CONTEXT_CLIENT_NAME': 56,
@@ -134,7 +134,7 @@ INNERTUBE_CLIENTS = {
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'WEB_REMIX', 'clientName': 'WEB_REMIX',
'clientVersion': '1.20250310.01.00', 'clientVersion': '1.20250922.03.00',
}, },
}, },
'INNERTUBE_CONTEXT_CLIENT_NAME': 67, 'INNERTUBE_CONTEXT_CLIENT_NAME': 67,
@@ -163,7 +163,7 @@ INNERTUBE_CLIENTS = {
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'WEB_CREATOR', 'clientName': 'WEB_CREATOR',
'clientVersion': '1.20250312.03.01', 'clientVersion': '1.20250922.03.00',
}, },
}, },
'INNERTUBE_CONTEXT_CLIENT_NAME': 62, 'INNERTUBE_CONTEXT_CLIENT_NAME': 62,
@@ -220,16 +220,30 @@ INNERTUBE_CLIENTS = {
}, },
'PLAYER_PO_TOKEN_POLICY': PlayerPoTokenPolicy(required=False, recommended=True), 'PLAYER_PO_TOKEN_POLICY': PlayerPoTokenPolicy(required=False, recommended=True),
}, },
# Doesn't require a PoToken for some reason
'android_sdkless': {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '20.10.38',
'userAgent': 'com.google.android.youtube/20.10.38 (Linux; U; Android 11) gzip',
'osName': 'Android',
'osVersion': '11',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
'REQUIRE_JS_PLAYER': False,
},
# YouTube Kids videos aren't returned on this client for some reason # YouTube Kids videos aren't returned on this client for some reason
'android_vr': { 'android_vr': {
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'ANDROID_VR', 'clientName': 'ANDROID_VR',
'clientVersion': '1.62.27', 'clientVersion': '1.65.10',
'deviceMake': 'Oculus', 'deviceMake': 'Oculus',
'deviceModel': 'Quest 3', 'deviceModel': 'Quest 3',
'androidSdkVersion': 32, 'androidSdkVersion': 32,
'userAgent': 'com.google.android.apps.youtube.vr.oculus/1.62.27 (Linux; U; Android 12L; eureka-user Build/SQ3A.220605.009.A1) gzip', 'userAgent': 'com.google.android.apps.youtube.vr.oculus/1.65.10 (Linux; U; Android 12L; eureka-user Build/SQ3A.220605.009.A1) gzip',
'osName': 'Android', 'osName': 'Android',
'osVersion': '12L', 'osVersion': '12L',
}, },
@@ -274,7 +288,7 @@ INNERTUBE_CLIENTS = {
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'MWEB', 'clientName': 'MWEB',
'clientVersion': '2.20250311.03.00', 'clientVersion': '2.20250925.01.00',
# mweb previously did not require PO Token with this UA # mweb previously did not require PO Token with this UA
'userAgent': 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)', 'userAgent': 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)',
}, },
@@ -304,7 +318,7 @@ INNERTUBE_CLIENTS = {
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'TVHTML5', 'clientName': 'TVHTML5',
'clientVersion': '7.20250312.16.00', 'clientVersion': '7.20250923.13.00',
'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/Version', 'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/Version',
}, },
}, },
@@ -320,6 +334,20 @@ INNERTUBE_CLIENTS = {
'clientVersion': '1.0', 'clientVersion': '1.0',
}, },
}, },
'GVS_PO_TOKEN_POLICY': {
StreamingProtocol.HTTPS: GvsPoTokenPolicy(
required=True,
recommended=True,
),
StreamingProtocol.DASH: GvsPoTokenPolicy(
required=True,
recommended=True,
),
StreamingProtocol.HLS: GvsPoTokenPolicy(
required=False,
recommended=True,
),
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 75, 'INNERTUBE_CONTEXT_CLIENT_NAME': 75,
}, },
# This client now requires sign-in for every video # This client now requires sign-in for every video
@@ -1182,7 +1210,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
except ValueError: except ValueError:
return None return None
def _parse_time_text(self, text): def _parse_time_text(self, text, report_failure=True):
if not text: if not text:
return return
dt_ = self.extract_relative_time(text) dt_ = self.extract_relative_time(text)
@@ -1197,7 +1225,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
(r'([a-z]+\s*\d{1,2},?\s*20\d{2})', r'(?:.+|^)(?:live|premieres|ed|ing)(?:\s*(?:on|for))?\s*(.+\d)'), (r'([a-z]+\s*\d{1,2},?\s*20\d{2})', r'(?:.+|^)(?:live|premieres|ed|ing)(?:\s*(?:on|for))?\s*(.+\d)'),
text.lower(), 'time text', default=None))) text.lower(), 'time text', default=None)))
if text and timestamp is None and self._preferred_lang in (None, 'en'): if report_failure and text and timestamp is None and self._preferred_lang in (None, 'en'):
self.report_warning( self.report_warning(
f'Cannot parse localized time text "{text}"', only_once=True) f'Cannot parse localized time text "{text}"', only_once=True)
return timestamp return timestamp

View File

@@ -341,7 +341,11 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
'contentImage', *thumb_keys, 'thumbnailViewModel', 'image'), final_key='sources'), 'contentImage', *thumb_keys, 'thumbnailViewModel', 'image'), final_key='sources'),
duration=traverse_obj(view_model, ( duration=traverse_obj(view_model, (
'contentImage', 'thumbnailViewModel', 'overlays', ..., 'thumbnailOverlayBadgeViewModel', 'contentImage', 'thumbnailViewModel', 'overlays', ..., 'thumbnailOverlayBadgeViewModel',
'thumbnailBadges', ..., 'thumbnailBadgeViewModel', 'text', {parse_duration}, any))) 'thumbnailBadges', ..., 'thumbnailBadgeViewModel', 'text', {parse_duration}, any)),
timestamp=(traverse_obj(view_model, (
'metadata', 'lockupMetadataViewModel', 'metadata', 'contentMetadataViewModel', 'metadataRows',
..., 'metadataParts', ..., 'text', 'content', {lambda t: self._parse_time_text(t, report_failure=False)}, any))
if self._configuration_arg('approximate_date', ie_key=YoutubeTabIE) else None))
def _rich_entries(self, rich_grid_renderer): def _rich_entries(self, rich_grid_renderer):
if lockup_view_model := traverse_obj(rich_grid_renderer, ('content', 'lockupViewModel', {dict})): if lockup_view_model := traverse_obj(rich_grid_renderer, ('content', 'lockupViewModel', {dict})):

View File

@@ -257,10 +257,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'}, '401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
} }
_SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'srt', 'vtt') _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'srt', 'vtt')
_DEFAULT_CLIENTS = ('tv_simply', 'tv', 'web') _DEFAULT_CLIENTS = ('android_sdkless', 'tv', 'web_safari', 'web')
_DEFAULT_AUTHED_CLIENTS = ('tv', 'web_safari', 'web') _DEFAULT_AUTHED_CLIENTS = ('tv', 'web_safari', 'web')
# Premium does not require POT (except for subtitles) # Premium does not require POT (except for subtitles)
_DEFAULT_PREMIUM_CLIENTS = ('tv', 'web_creator', 'web') _DEFAULT_PREMIUM_CLIENTS = ('tv', 'web_creator', 'web_safari', 'web')
_GEO_BYPASS = False _GEO_BYPASS = False
@@ -1815,6 +1815,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'params': {'skip_download': True}, 'params': {'skip_download': True},
}] }]
_DEFAULT_PLAYER_JS_VERSION = 'actual'
_DEFAULT_PLAYER_JS_VARIANT = 'main'
_PLAYER_JS_VARIANT_MAP = { _PLAYER_JS_VARIANT_MAP = {
'main': 'player_ias.vflset/en_US/base.js', 'main': 'player_ias.vflset/en_US/base.js',
'tcc': 'player_ias_tcc.vflset/en_US/base.js', 'tcc': 'player_ias_tcc.vflset/en_US/base.js',
@@ -2016,7 +2018,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
time.sleep(max(0, FETCH_SPAN + fetch_time - time.time())) time.sleep(max(0, FETCH_SPAN + fetch_time - time.time()))
def _get_player_js_version(self): def _get_player_js_version(self):
player_js_version = self._configuration_arg('player_js_version', [''])[0] or '20348@0004de42' player_js_version = self._configuration_arg('player_js_version', [''])[0] or self._DEFAULT_PLAYER_JS_VERSION
if player_js_version == 'actual': if player_js_version == 'actual':
return None, None return None, None
if not re.fullmatch(r'[0-9]{5,}@[0-9a-f]{8,}', player_js_version): if not re.fullmatch(r'[0-9]{5,}@[0-9a-f]{8,}', player_js_version):
@@ -2026,31 +2028,64 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return None, None return None, None
return player_js_version.split('@') return player_js_version.split('@')
def _construct_player_url(self, *, player_id=None, player_url=None):
assert player_id or player_url, '_construct_player_url must take one of player_id or player_url'
if not player_id:
player_id = self._extract_player_info(player_url)
force_player_id = False
player_id_override = self._get_player_js_version()[1]
if player_id_override and player_id_override != player_id:
force_player_id = f'Forcing player {player_id_override} in place of player {player_id}'
player_id = player_id_override
variant = self._configuration_arg('player_js_variant', [''])[0] or self._DEFAULT_PLAYER_JS_VARIANT
if variant not in (*self._PLAYER_JS_VARIANT_MAP, 'actual'):
self.report_warning(
f'Invalid player JS variant name "{variant}" requested. '
f'Valid choices are: {", ".join(self._PLAYER_JS_VARIANT_MAP)}', only_once=True)
variant = self._DEFAULT_PLAYER_JS_VARIANT
if not player_url:
if force_player_id:
self.write_debug(force_player_id, only_once=True)
if variant == 'actual':
# We don't have an actual variant so we always use 'main' & don't need to write debug
variant = 'main'
return urljoin('https://www.youtube.com', f'/s/player/{player_id}/{self._PLAYER_JS_VARIANT_MAP[variant]}')
actual_variant = self._get_player_id_variant_and_path(player_url)[1]
if not force_player_id and (variant == 'actual' or variant == actual_variant):
return urljoin('https://www.youtube.com', player_url)
if variant == 'actual':
if actual_variant:
variant = actual_variant
else:
# We need to force player_id but can't determine variant; fall back to 'main' variant
variant = 'main'
self.write_debug(join_nonempty(
force_player_id,
variant != actual_variant and f'Forcing "{variant}" player JS variant for player {player_id}',
f'original url = {player_url}',
delim='\n '), only_once=True)
return urljoin('https://www.youtube.com', f'/s/player/{player_id}/{self._PLAYER_JS_VARIANT_MAP[variant]}')
def _extract_player_url(self, *ytcfgs, webpage=None): def _extract_player_url(self, *ytcfgs, webpage=None):
player_url = traverse_obj( player_url = traverse_obj(
ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'), ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'),
get_all=False, expected_type=str) get_all=False, expected_type=str)
if not player_url: if not player_url:
return return
player_id_override = self._get_player_js_version()[1] return self._construct_player_url(player_url=player_url)
requested_js_variant = self._configuration_arg('player_js_variant', [''])[0] or 'main'
if requested_js_variant in self._PLAYER_JS_VARIANT_MAP:
player_id = player_id_override or self._extract_player_info(player_url)
original_url = player_url
player_url = f'/s/player/{player_id}/{self._PLAYER_JS_VARIANT_MAP[requested_js_variant]}'
if original_url != player_url:
self.write_debug(
f'Forcing "{requested_js_variant}" player JS variant for player {player_id}\n'
f' original url = {original_url}', only_once=True)
elif requested_js_variant != 'actual':
self.report_warning(
f'Invalid player JS variant name "{requested_js_variant}" requested. '
f'Valid choices are: {", ".join(self._PLAYER_JS_VARIANT_MAP)}', only_once=True)
return urljoin('https://www.youtube.com', player_url)
def _download_player_url(self, video_id, fatal=False): def _download_player_url(self, video_id, fatal=False):
if player_id_override := self._get_player_js_version()[1]:
self.write_debug(f'Forcing player {player_id_override}', only_once=True)
return self._construct_player_url(player_id=player_id_override)
iframe_webpage = self._download_webpage_with_retries( iframe_webpage = self._download_webpage_with_retries(
'https://www.youtube.com/iframe_api', 'https://www.youtube.com/iframe_api',
note='Downloading iframe API JS', note='Downloading iframe API JS',
@@ -2060,9 +2095,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
player_version = self._search_regex( player_version = self._search_regex(
r'player\\?/([0-9a-fA-F]{8})\\?/', iframe_webpage, 'player version', fatal=fatal) r'player\\?/([0-9a-fA-F]{8})\\?/', iframe_webpage, 'player version', fatal=fatal)
if player_version: if player_version:
return f'https://www.youtube.com/s/player/{player_version}/player_ias.vflset/en_US/base.js' return self._construct_player_url(player_id=player_version)
def _player_js_cache_key(self, player_url): def _get_player_id_variant_and_path(self, player_url):
player_id = self._extract_player_info(player_url) player_id = self._extract_player_info(player_url)
player_path = remove_start(urllib.parse.urlparse(player_url).path, f'/s/player/{player_id}/') player_path = remove_start(urllib.parse.urlparse(player_url).path, f'/s/player/{player_id}/')
variant = self._INVERSE_PLAYER_JS_VARIANT_MAP.get(player_path) or next(( variant = self._INVERSE_PLAYER_JS_VARIANT_MAP.get(player_path) or next((
@@ -2072,8 +2107,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
self.write_debug( self.write_debug(
f'Unable to determine player JS variant\n' f'Unable to determine player JS variant\n'
f' player = {player_url}', only_once=True) f' player = {player_url}', only_once=True)
return player_id, variant, player_path
def _player_js_cache_key(self, player_url):
player_id, variant, player_path = self._get_player_id_variant_and_path(player_url)
if not variant:
variant = re.sub(r'[^a-zA-Z0-9]', '_', remove_end(player_path, '.js')) variant = re.sub(r'[^a-zA-Z0-9]', '_', remove_end(player_path, '.js'))
return join_nonempty(player_id, variant) return f'{player_id}-{variant}'
def _signature_cache_id(self, example_sig): def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """ """ Return a string representation of a signature """
@@ -2915,12 +2955,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
# TODO(future): This validation should be moved into pot framework. # TODO(future): This validation should be moved into pot framework.
# Some sort of middleware or validation provider perhaps? # Some sort of middleware or validation provider perhaps?
gvs_bind_to_video_id = False
experiments = traverse_obj(ytcfg, (
'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'serializedExperimentFlags', {urllib.parse.parse_qs}))
if 'true' in traverse_obj(experiments, (..., 'html5_generate_content_po_token', -1)):
self.write_debug(
f'{video_id}: Detected experiment to bind GVS PO Token to video id.', only_once=True)
gvs_bind_to_video_id = True
# GVS WebPO Token is bound to visitor_data / Visitor ID when logged out. # GVS WebPO Token is bound to visitor_data / Visitor ID when logged out.
# Must have visitor_data for it to function. # Must have visitor_data for it to function.
if player_url and context == _PoTokenContext.GVS and not visitor_data and not self.is_authenticated: if (
player_url and context == _PoTokenContext.GVS
and not visitor_data and not self.is_authenticated and not gvs_bind_to_video_id
):
self.report_warning( self.report_warning(
f'Unable to fetch GVS PO Token for {client} client: Missing required Visitor Data. ' f'Unable to fetch GVS PO Token for {client} client: Missing required Visitor Data. '
f'You may need to pass Visitor Data with --extractor-args "youtube:visitor_data=XXX"') f'You may need to pass Visitor Data with --extractor-args "youtube:visitor_data=XXX"', only_once=True)
return return
if context == _PoTokenContext.PLAYER and not video_id: if context == _PoTokenContext.PLAYER and not video_id:
@@ -2931,7 +2982,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
config_po_token = self._get_config_po_token(client, context) config_po_token = self._get_config_po_token(client, context)
if config_po_token: if config_po_token:
# GVS WebPO token is bound to data_sync_id / account Session ID when logged in. # GVS WebPO token is bound to data_sync_id / account Session ID when logged in.
if player_url and context == _PoTokenContext.GVS and not data_sync_id and self.is_authenticated: if (
player_url and context == _PoTokenContext.GVS
and not data_sync_id and self.is_authenticated and not gvs_bind_to_video_id
):
self.report_warning( self.report_warning(
f'Got a GVS PO Token for {client} client, but missing Data Sync ID for account. Formats may not work.' f'Got a GVS PO Token for {client} client, but missing Data Sync ID for account. Formats may not work.'
f'You may need to pass a Data Sync ID with --extractor-args "youtube:data_sync_id=XXX"') f'You may need to pass a Data Sync ID with --extractor-args "youtube:data_sync_id=XXX"')
@@ -2943,7 +2997,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
if player_url and context == _PoTokenContext.GVS and not data_sync_id and self.is_authenticated: if player_url and context == _PoTokenContext.GVS and not data_sync_id and self.is_authenticated:
self.report_warning( self.report_warning(
f'Unable to fetch GVS PO Token for {client} client: Missing required Data Sync ID for account. ' f'Unable to fetch GVS PO Token for {client} client: Missing required Data Sync ID for account. '
f'You may need to pass a Data Sync ID with --extractor-args "youtube:data_sync_id=XXX"') f'You may need to pass a Data Sync ID with --extractor-args "youtube:data_sync_id=XXX"', only_once=True)
return return
po_token = self._fetch_po_token( po_token = self._fetch_po_token(
@@ -2957,6 +3011,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
video_id=video_id, video_id=video_id,
video_webpage=webpage, video_webpage=webpage,
required=required, required=required,
_gvs_bind_to_video_id=gvs_bind_to_video_id,
**kwargs, **kwargs,
) )
@@ -3000,6 +3055,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
data_sync_id=kwargs.get('data_sync_id'), data_sync_id=kwargs.get('data_sync_id'),
video_id=kwargs.get('video_id'), video_id=kwargs.get('video_id'),
request_cookiejar=self._downloader.cookiejar, request_cookiejar=self._downloader.cookiejar,
_gvs_bind_to_video_id=kwargs.get('_gvs_bind_to_video_id', False),
# All requests that would need to be proxied should be in the # All requests that would need to be proxied should be in the
# context of www.youtube.com or the innertube host # context of www.youtube.com or the innertube host
@@ -4054,7 +4110,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
else 'video'), else 'video'),
'release_timestamp': live_start_time, 'release_timestamp': live_start_time,
'_format_sort_fields': ( # source_preference is lower for potentially damaged formats '_format_sort_fields': ( # source_preference is lower for potentially damaged formats
'quality', 'res', 'fps', 'hdr:12', 'source', 'vcodec', 'channels', 'acodec', 'lang', 'proto'), 'quality', 'res', 'fps', 'hdr:12', 'source',
'vcodec:vp9.2' if 'prefer-vp9-sort' in self.get_param('compat_opts', []) else 'vcodec',
'channels', 'acodec', 'lang', 'proto'),
} }
def get_lang_code(track): def get_lang_code(track):

View File

@@ -58,6 +58,8 @@ class PoTokenRequest:
visitor_data: str | None = None visitor_data: str | None = None
data_sync_id: str | None = None data_sync_id: str | None = None
video_id: str | None = None video_id: str | None = None
# Internal, YouTube experiment on whether to bind GVS PO Token to video_id.
_gvs_bind_to_video_id: bool = False
# Networking parameters # Networking parameters
request_cookiejar: YoutubeDLCookieJar = dataclasses.field(default_factory=YoutubeDLCookieJar) request_cookiejar: YoutubeDLCookieJar = dataclasses.field(default_factory=YoutubeDLCookieJar)

View File

@@ -42,6 +42,9 @@ def get_webpo_content_binding(
if not client_name or client_name not in webpo_clients: if not client_name or client_name not in webpo_clients:
return None, None return None, None
if request.context == PoTokenContext.GVS and request._gvs_bind_to_video_id:
return request.video_id, ContentBindingType.VIDEO_ID
if request.context == PoTokenContext.GVS or client_name in ('WEB_REMIX', ): if request.context == PoTokenContext.GVS or client_name in ('WEB_REMIX', ):
if request.is_authenticated: if request.is_authenticated:
return request.data_sync_id, ContentBindingType.DATASYNC_ID return request.data_sync_id, ContentBindingType.DATASYNC_ID

View File

@@ -186,7 +186,7 @@ _OPERATORS = { # None => Defined in JSInterpreter._operator
_COMP_OPERATORS = {'===', '!==', '==', '!=', '<=', '>=', '<', '>'} _COMP_OPERATORS = {'===', '!==', '==', '!=', '<=', '>=', '<', '>'}
_NAME_RE = r'[a-zA-Z_$][\w$]*' _NAME_RE = r'[a-zA-Z_$][\w$]*'
_MATCHING_PARENS = dict(zip(*zip('()', '{}', '[]'))) _MATCHING_PARENS = dict(zip(*zip('()', '{}', '[]', strict=True), strict=True))
_QUOTES = '\'"/' _QUOTES = '\'"/'
_NESTED_BRACKETS = r'[^[\]]+(?:\[[^[\]]+(?:\[[^\]]+\])?\])?' _NESTED_BRACKETS = r'[^[\]]+(?:\[[^[\]]+(?:\[[^\]]+\])?\])?'

View File

@@ -4,7 +4,6 @@ import functools
import http.client import http.client
import logging import logging
import re import re
import socket
import warnings import warnings
from ..dependencies import brotli, requests, urllib3 from ..dependencies import brotli, requests, urllib3
@@ -125,7 +124,7 @@ class RequestsResponseAdapter(Response):
# Work around issue with `.read(amt)` then `.read()` # Work around issue with `.read(amt)` then `.read()`
# See: https://github.com/urllib3/urllib3/issues/3636 # See: https://github.com/urllib3/urllib3/issues/3636
if amt is None: if amt is None:
# Python 3.9 preallocates the whole read buffer, read in chunks # compat: py3.9: Python 3.9 preallocates the whole read buffer, read in chunks
read_chunk = functools.partial(self.fp.read, 1 << 20, decode_content=True) read_chunk = functools.partial(self.fp.read, 1 << 20, decode_content=True)
return b''.join(iter(read_chunk, b'')) return b''.join(iter(read_chunk, b''))
# Interact with urllib3 response directly. # Interact with urllib3 response directly.
@@ -378,7 +377,7 @@ class SocksHTTPConnection(urllib3.connection.HTTPConnection):
source_address=self.source_address, source_address=self.source_address,
_create_socket_func=functools.partial( _create_socket_func=functools.partial(
create_socks_proxy_socket, (self.host, self.port), self._proxy_args)) create_socks_proxy_socket, (self.host, self.port), self._proxy_args))
except (socket.timeout, TimeoutError) as e: except TimeoutError as e:
raise urllib3.exceptions.ConnectTimeoutError( raise urllib3.exceptions.ConnectTimeoutError(
self, f'Connection to {self.host} timed out. (connect timeout={self.timeout})') from e self, f'Connection to {self.host} timed out. (connect timeout={self.timeout})') from e
except SocksProxyError as e: except SocksProxyError as e:

View File

@@ -12,6 +12,7 @@ import urllib.response
from collections.abc import Iterable, Mapping from collections.abc import Iterable, Mapping
from email.message import Message from email.message import Message
from http import HTTPStatus from http import HTTPStatus
from types import NoneType
from ._helper import make_ssl_context, wrap_request_errors from ._helper import make_ssl_context, wrap_request_errors
from .exceptions import ( from .exceptions import (
@@ -20,7 +21,6 @@ from .exceptions import (
TransportError, TransportError,
UnsupportedRequest, UnsupportedRequest,
) )
from ..compat.types import NoneType
from ..cookies import YoutubeDLCookieJar from ..cookies import YoutubeDLCookieJar
from ..utils import ( from ..utils import (
bug_reports_message, bug_reports_message,

View File

@@ -3,11 +3,11 @@ from __future__ import annotations
import re import re
from abc import ABC from abc import ABC
from dataclasses import dataclass from dataclasses import dataclass
from types import NoneType
from typing import Any from typing import Any
from .common import RequestHandler, register_preference, Request from .common import RequestHandler, register_preference, Request
from .exceptions import UnsupportedRequest from .exceptions import UnsupportedRequest
from ..compat.types import NoneType
from ..utils import classproperty, join_nonempty from ..utils import classproperty, join_nonempty
from ..utils.networking import std_headers, HTTPHeaderDict from ..utils.networking import std_headers, HTTPHeaderDict

View File

@@ -11,7 +11,6 @@ import os
import pkgutil import pkgutil
import sys import sys
import traceback import traceback
import zipimport
from pathlib import Path from pathlib import Path
from zipfile import ZipFile from zipfile import ZipFile
@@ -202,16 +201,10 @@ def load_plugins(plugin_spec: PluginSpec):
if any(x.startswith('_') for x in module_name.split('.')): if any(x.startswith('_') for x in module_name.split('.')):
continue continue
try: try:
if sys.version_info < (3, 10) and isinstance(finder, zipimport.zipimporter): spec = finder.find_spec(module_name)
# zipimporter.load_module() is deprecated in 3.10 and removed in 3.12 module = importlib.util.module_from_spec(spec)
# The exec_module branch below is the replacement for >= 3.10 sys.modules[module_name] = module
# See: https://docs.python.org/3/library/zipimport.html#zipimport.zipimporter.exec_module spec.loader.exec_module(module)
module = finder.load_module(module_name)
else:
spec = finder.find_spec(module_name)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
except Exception: except Exception:
write_string( write_string(
f'Error while importing module {module_name!r}\n{traceback.format_exc(limit=-1)}', f'Error while importing module {module_name!r}\n{traceback.format_exc(limit=-1)}',

View File

@@ -418,7 +418,7 @@ class FFmpegPostProcessor(PostProcessor):
if concat_opts is None: if concat_opts is None:
concat_opts = [{}] * len(in_files) concat_opts = [{}] * len(in_files)
yield 'ffconcat version 1.0\n' yield 'ffconcat version 1.0\n'
for file, opts in zip(in_files, concat_opts): for file, opts in zip(in_files, concat_opts, strict=True):
yield f'file {cls._quote_for_ffmpeg(cls._ffmpeg_filename_argument(file))}\n' yield f'file {cls._quote_for_ffmpeg(cls._ffmpeg_filename_argument(file))}\n'
# Iterate explicitly to yield the following directives in order, ignoring the rest. # Iterate explicitly to yield the following directives in order, ignoring the rest.
for directive in 'inpoint', 'outpoint', 'duration': for directive in 'inpoint', 'outpoint', 'duration':
@@ -639,7 +639,7 @@ class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
# postprocessor a second time # postprocessor a second time
'-map', '-0:s', '-map', '-0:s',
] ]
for i, (lang, name) in enumerate(zip(sub_langs, sub_names)): for i, (lang, name) in enumerate(zip(sub_langs, sub_names, strict=True)):
opts.extend(['-map', f'{i + 1}:0']) opts.extend(['-map', f'{i + 1}:0'])
lang_code = ISO639Utils.short2long(lang) or lang lang_code = ISO639Utils.short2long(lang) or lang
opts.extend([f'-metadata:s:s:{i}', f'language={lang_code}']) opts.extend([f'-metadata:s:s:{i}', f'language={lang_code}'])

View File

@@ -154,7 +154,7 @@ def _get_binary_name():
def _get_system_deprecation(): def _get_system_deprecation():
MIN_SUPPORTED, MIN_RECOMMENDED = (3, 9), (3, 10) MIN_SUPPORTED, MIN_RECOMMENDED = (3, 10), (3, 10)
if sys.version_info > MIN_RECOMMENDED: if sys.version_info > MIN_RECOMMENDED:
return None return None
@@ -559,11 +559,9 @@ class Updater:
@functools.cached_property @functools.cached_property
def cmd(self): def cmd(self):
"""The command-line to run the executable, if known""" """The command-line to run the executable, if known"""
argv = None argv = sys.orig_argv
# There is no sys.orig_argv in py < 3.10. Also, it can be [] when frozen # sys.orig_argv can be [] when frozen
if getattr(sys, 'orig_argv', None): if not argv and getattr(sys, 'frozen', False):
argv = sys.orig_argv
elif getattr(sys, 'frozen', False):
argv = sys.argv argv = sys.argv
# linux_static exe's argv[0] will be /tmp/staticx-NNNN/yt-dlp_linux if we don't fixup here # linux_static exe's argv[0] will be /tmp/staticx-NNNN/yt-dlp_linux if we don't fixup here
if argv and os.getenv('STATICX_PROG_PATH'): if argv and os.getenv('STATICX_PROG_PATH'):
@@ -572,7 +570,7 @@ class Updater:
def restart(self): def restart(self):
"""Restart the executable""" """Restart the executable"""
assert self.cmd, 'Must be frozen or Py >= 3.10' assert self.cmd, 'Unable to determine argv'
self.ydl.write_debug(f'Restarting: {shell_quote(self.cmd)}') self.ydl.write_debug(f'Restarting: {shell_quote(self.cmd)}')
_, _, returncode = Popen.run(self.cmd) _, _, returncode = Popen.run(self.cmd)
return returncode return returncode

View File

@@ -1,6 +1,4 @@
"""No longer used and new code should not use. Exists only for API compat.""" """No longer used and new code should not use. Exists only for API compat."""
import asyncio
import atexit
import platform import platform
import struct import struct
import sys import sys
@@ -34,77 +32,6 @@ has_certifi = bool(certifi)
has_websockets = bool(websockets) has_websockets = bool(websockets)
class WebSocketsWrapper:
"""Wraps websockets module to use in non-async scopes"""
pool = None
def __init__(self, url, headers=None, connect=True, **ws_kwargs):
self.loop = asyncio.new_event_loop()
# XXX: "loop" is deprecated
self.conn = websockets.connect(
url, extra_headers=headers, ping_interval=None,
close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'), **ws_kwargs)
if connect:
self.__enter__()
atexit.register(self.__exit__, None, None, None)
def __enter__(self):
if not self.pool:
self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
return self
def send(self, *args):
self.run_with_loop(self.pool.send(*args), self.loop)
def recv(self, *args):
return self.run_with_loop(self.pool.recv(*args), self.loop)
def __exit__(self, type, value, traceback):
try:
return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
finally:
self.loop.close()
self._cancel_all_tasks(self.loop)
# taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
# for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
@staticmethod
def run_with_loop(main, loop):
if not asyncio.iscoroutine(main):
raise ValueError(f'a coroutine was expected, got {main!r}')
try:
return loop.run_until_complete(main)
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
if hasattr(loop, 'shutdown_default_executor'):
loop.run_until_complete(loop.shutdown_default_executor())
@staticmethod
def _cancel_all_tasks(loop):
to_cancel = asyncio.all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
# XXX: "loop" is removed in Python 3.10+
loop.run_until_complete(
asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'unhandled exception during asyncio.run() shutdown',
'exception': task.exception(),
'task': task,
})
def load_plugins(name, suffix, namespace): def load_plugins(name, suffix, namespace):
from ..plugins import load_plugins from ..plugins import load_plugins
ret = load_plugins(name, suffix) ret = load_plugins(name, suffix)

View File

@@ -95,7 +95,7 @@ TIMEZONE_NAMES = {
# needed for sanitizing filenames in restricted mode # needed for sanitizing filenames in restricted mode
ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'], itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y'))) 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y'), strict=True))
DATE_FORMATS = ( DATE_FORMATS = (
'%d %B %Y', '%d %B %Y',
@@ -2415,7 +2415,7 @@ class PlaylistEntries:
if self.is_incomplete: if self.is_incomplete:
assert self.is_exhausted assert self.is_exhausted
self._entries = [self.MissingEntry] * max(requested_entries or [0]) self._entries = [self.MissingEntry] * max(requested_entries or [0])
for i, entry in zip(requested_entries, entries): for i, entry in zip(requested_entries, entries): # noqa: B905
self._entries[i - 1] = entry self._entries[i - 1] = entry
elif isinstance(entries, (list, PagedList, LazyList)): elif isinstance(entries, (list, PagedList, LazyList)):
self._entries = entries self._entries = entries
@@ -3184,7 +3184,7 @@ def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
return len(remove_terminal_sequences(string).replace('\t', '')) return len(remove_terminal_sequences(string).replace('\t', ''))
def get_max_lens(table): def get_max_lens(table):
return [max(width(str(v)) for v in col) for col in zip(*table)] return [max(width(str(v)) for v in col) for col in zip(*table, strict=True)]
def filter_using_list(row, filter_array): def filter_using_list(row, filter_array):
return [col for take, col in itertools.zip_longest(filter_array, row, fillvalue=True) if take] return [col for take, col in itertools.zip_longest(filter_array, row, fillvalue=True) if take]
@@ -3540,7 +3540,7 @@ def dfxp2srt(dfxp_data):
continue continue
default_style.update(style) default_style.update(style)
for para, index in zip(paras, itertools.count(1)): for para, index in zip(paras, itertools.count(1), strict=False):
begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
end_time = parse_dfxp_time_expr(para.attrib.get('end')) end_time = parse_dfxp_time_expr(para.attrib.get('end'))
dur = parse_dfxp_time_expr(para.attrib.get('dur')) dur = parse_dfxp_time_expr(para.attrib.get('dur'))
@@ -4854,7 +4854,7 @@ def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re):
return [ return [
merge_dicts( merge_dicts(
{'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])}, {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])},
dict(zip(_keys, max_dimensions)), thumbnail) dict(zip(_keys, max_dimensions, strict=True)), thumbnail)
for thumbnail in thumbnails for thumbnail in thumbnails
] ]

View File

@@ -110,7 +110,7 @@ def parse_iter(parsed: typing.Any, /, *, revivers: dict[str, collections.abc.Cal
elif value[0] == 'Map': elif value[0] == 'Map':
result = [] result = []
for key, new_source in zip(*(iter(value[1:]),) * 2): for key, new_source in zip(*(iter(value[1:]),) * 2, strict=True):
pair = [None, None] pair = [None, None]
stack.append((pair, 0, key)) stack.append((pair, 0, key))
stack.append((pair, 1, new_source)) stack.append((pair, 1, new_source))
@@ -129,7 +129,7 @@ def parse_iter(parsed: typing.Any, /, *, revivers: dict[str, collections.abc.Cal
elif value[0] == 'null': elif value[0] == 'null':
result = {} result = {}
for key, new_source in zip(*(iter(value[1:]),) * 2): for key, new_source in zip(*(iter(value[1:]),) * 2, strict=True):
stack.append((result, key, new_source)) stack.append((result, key, new_source))
elif value[0] in _ARRAY_TYPE_LOOKUP: elif value[0] in _ARRAY_TYPE_LOOKUP:

View File

@@ -1,8 +1,8 @@
# Autogenerated by devscripts/update-version.py # Autogenerated by devscripts/update-version.py
__version__ = '2025.09.23' __version__ = '2025.10.22'
RELEASE_GIT_HEAD = '2e81e298cdce23afadb06a95836284acb38f7018' RELEASE_GIT_HEAD = 'c9356f308dd3c5f9f494cb40ed14c5df017b4fe0'
VARIANT = None VARIANT = None
@@ -12,4 +12,4 @@ CHANNEL = 'stable'
ORIGIN = 'yt-dlp/yt-dlp' ORIGIN = 'yt-dlp/yt-dlp'
_pkg_version = '2025.09.23' _pkg_version = '2025.10.22'

View File

@@ -103,7 +103,7 @@ def _parse_ts(ts):
into an MPEG PES timestamp: a tick counter at 90 kHz resolution. into an MPEG PES timestamp: a tick counter at 90 kHz resolution.
""" """
return 90 * sum( return 90 * sum(
int(part or 0) * mult for part, mult in zip(ts.groups(), (3600_000, 60_000, 1000, 1))) int(part or 0) * mult for part, mult in zip(ts.groups(), (3600_000, 60_000, 1000, 1), strict=True))
def _format_ts(ts): def _format_ts(ts):