mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2026-01-19 05:11:29 +00:00
Compare commits
60 Commits
2023.10.07
...
2023.11.14
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5d3a3cd493 | ||
|
|
a9d3f4b20a | ||
|
|
b012271d01 | ||
|
|
f04b5bedad | ||
|
|
d4f14a72dc | ||
|
|
87264d4fda | ||
|
|
a00af29853 | ||
|
|
0b6ad22e6a | ||
|
|
5438593a35 | ||
|
|
9970d74c83 | ||
|
|
20314dd46f | ||
|
|
1d03633c5a | ||
|
|
8afd9468b0 | ||
|
|
ef12dbdcd3 | ||
|
|
46acc418a5 | ||
|
|
6ba3085616 | ||
|
|
f6e97090d2 | ||
|
|
2863fcf2b6 | ||
|
|
c76c96677f | ||
|
|
15b252dfd2 | ||
|
|
312a2d1e8b | ||
|
|
54579be436 | ||
|
|
05adfd883a | ||
|
|
3ff494f6f4 | ||
|
|
9b5bedf13a | ||
|
|
cb480e390d | ||
|
|
25a4bd345a | ||
|
|
3906de0755 | ||
|
|
7d337ca977 | ||
|
|
10025b715e | ||
|
|
595ea4a99b | ||
|
|
2622c804d1 | ||
|
|
fd8fcf8f4f | ||
|
|
21b25281c5 | ||
|
|
4a601c9eff | ||
|
|
464327acdb | ||
|
|
ef79d20dc9 | ||
|
|
39abae2354 | ||
|
|
4ce2f29a50 | ||
|
|
177f0d963e | ||
|
|
8e02a4dcc8 | ||
|
|
7b8b1cf5eb | ||
|
|
a40e0b37df | ||
|
|
4e38e2ae9d | ||
|
|
8a8b54523a | ||
|
|
700444c23d | ||
|
|
b73c409318 | ||
|
|
b634ba742d | ||
|
|
2acd1d555e | ||
|
|
b286ec68f1 | ||
|
|
e030b6b6fb | ||
|
|
b931664231 | ||
|
|
feebf6d02f | ||
|
|
84e26038d4 | ||
|
|
4de94b9e16 | ||
|
|
88a99c87b6 | ||
|
|
09f815ad52 | ||
|
|
b7098d46b5 | ||
|
|
1c51c520f7 | ||
|
|
9d7ded6419 |
17
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
17
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
@@ -18,7 +18,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting that yt-dlp is broken on a **supported** site
|
- label: I'm reporting that yt-dlp is broken on a **supported** site
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2023.10.07** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
@@ -61,19 +61,18 @@ body:
|
|||||||
description: |
|
description: |
|
||||||
It should start like this:
|
It should start like this:
|
||||||
placeholder: |
|
placeholder: |
|
||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2023.10.07 [9d339c4] (win32_exe)
|
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
|
||||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Request Handlers: urllib, requests
|
||||||
Latest version: 2023.10.07, Current version: 2023.10.07
|
[debug] Loaded 1893 extractors
|
||||||
yt-dlp is up to date (2023.10.07)
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||||
|
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||||
|
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting a new site support request
|
- label: I'm reporting a new site support request
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2023.10.07** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
@@ -73,19 +73,18 @@ body:
|
|||||||
description: |
|
description: |
|
||||||
It should start like this:
|
It should start like this:
|
||||||
placeholder: |
|
placeholder: |
|
||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2023.10.07 [9d339c4] (win32_exe)
|
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
|
||||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Request Handlers: urllib, requests
|
||||||
Latest version: 2023.10.07, Current version: 2023.10.07
|
[debug] Loaded 1893 extractors
|
||||||
yt-dlp is up to date (2023.10.07)
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||||
|
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||||
|
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm requesting a site-specific feature
|
- label: I'm requesting a site-specific feature
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2023.10.07** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
@@ -69,19 +69,18 @@ body:
|
|||||||
description: |
|
description: |
|
||||||
It should start like this:
|
It should start like this:
|
||||||
placeholder: |
|
placeholder: |
|
||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2023.10.07 [9d339c4] (win32_exe)
|
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
|
||||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Request Handlers: urllib, requests
|
||||||
Latest version: 2023.10.07, Current version: 2023.10.07
|
[debug] Loaded 1893 extractors
|
||||||
yt-dlp is up to date (2023.10.07)
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||||
|
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||||
|
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
|||||||
17
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
17
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
@@ -18,7 +18,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting a bug unrelated to a specific site
|
- label: I'm reporting a bug unrelated to a specific site
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2023.10.07** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
@@ -54,19 +54,18 @@ body:
|
|||||||
description: |
|
description: |
|
||||||
It should start like this:
|
It should start like this:
|
||||||
placeholder: |
|
placeholder: |
|
||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2023.10.07 [9d339c4] (win32_exe)
|
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
|
||||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Request Handlers: urllib, requests
|
||||||
Latest version: 2023.10.07, Current version: 2023.10.07
|
[debug] Loaded 1893 extractors
|
||||||
yt-dlp is up to date (2023.10.07)
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||||
|
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||||
|
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
|||||||
17
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
17
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
@@ -20,7 +20,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2023.10.07** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
@@ -50,18 +50,17 @@ body:
|
|||||||
description: |
|
description: |
|
||||||
It should start like this:
|
It should start like this:
|
||||||
placeholder: |
|
placeholder: |
|
||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2023.10.07 [9d339c4] (win32_exe)
|
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
|
||||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Request Handlers: urllib, requests
|
||||||
Latest version: 2023.10.07, Current version: 2023.10.07
|
[debug] Loaded 1893 extractors
|
||||||
yt-dlp is up to date (2023.10.07)
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||||
|
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||||
|
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
|
|||||||
17
.github/ISSUE_TEMPLATE/6_question.yml
vendored
17
.github/ISSUE_TEMPLATE/6_question.yml
vendored
@@ -26,7 +26,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2023.10.07** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
|
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
@@ -56,18 +56,17 @@ body:
|
|||||||
description: |
|
description: |
|
||||||
It should start like this:
|
It should start like this:
|
||||||
placeholder: |
|
placeholder: |
|
||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2023.10.07 [9d339c4] (win32_exe)
|
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
|
||||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Request Handlers: urllib, requests
|
||||||
Latest version: 2023.10.07, Current version: 2023.10.07
|
[debug] Loaded 1893 extractors
|
||||||
yt-dlp is up to date (2023.10.07)
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||||
|
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||||
|
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting that yt-dlp is broken on a **supported** site
|
- label: I'm reporting that yt-dlp is broken on a **supported** site
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting a new site support request
|
- label: I'm reporting a new site support request
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm requesting a site-specific feature
|
- label: I'm requesting a site-specific feature
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
@@ -12,7 +12,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting a bug unrelated to a specific site
|
- label: I'm reporting a bug unrelated to a specific site
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
2
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
@@ -20,7 +20,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
|
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
|||||||
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -40,10 +40,4 @@ Fixes #
|
|||||||
- [ ] Core bug fix/improvement
|
- [ ] Core bug fix/improvement
|
||||||
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
|
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
|
||||||
|
|
||||||
|
|
||||||
<!-- Do NOT edit/remove anything below this! -->
|
|
||||||
</details><details><summary>Copilot Summary</summary>
|
|
||||||
|
|
||||||
copilot:all
|
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|||||||
69
.github/workflows/build.yml
vendored
69
.github/workflows/build.yml
vendored
@@ -30,6 +30,10 @@ on:
|
|||||||
meta_files:
|
meta_files:
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
|
origin:
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
secrets:
|
secrets:
|
||||||
GPG_SIGNING_KEY:
|
GPG_SIGNING_KEY:
|
||||||
required: false
|
required: false
|
||||||
@@ -37,11 +41,13 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
version:
|
version:
|
||||||
description: Version tag (YYYY.MM.DD[.REV])
|
description: |
|
||||||
|
VERSION: yyyy.mm.dd[.rev] or rev
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
channel:
|
channel:
|
||||||
description: Update channel (stable/nightly/...)
|
description: |
|
||||||
|
SOURCE of this build's updates: stable/nightly/master/<repo>
|
||||||
required: true
|
required: true
|
||||||
default: stable
|
default: stable
|
||||||
type: string
|
type: string
|
||||||
@@ -73,16 +79,34 @@ on:
|
|||||||
description: SHA2-256SUMS, SHA2-512SUMS, _update_spec
|
description: SHA2-256SUMS, SHA2-512SUMS, _update_spec
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
|
origin:
|
||||||
|
description: .
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- ''
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
process:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
origin: ${{ steps.process_origin.outputs.origin }}
|
||||||
|
steps:
|
||||||
|
- name: Process origin
|
||||||
|
id: process_origin
|
||||||
|
run: |
|
||||||
|
echo "origin=${{ inputs.origin || github.repository }}" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
unix:
|
unix:
|
||||||
|
needs: process
|
||||||
if: inputs.unix
|
if: inputs.unix
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
@@ -96,22 +120,21 @@ jobs:
|
|||||||
auto-activate-base: false
|
auto-activate-base: false
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get -y install zip pandoc man sed
|
sudo apt -y install zip pandoc man sed
|
||||||
python -m pip install -U pip setuptools wheel
|
|
||||||
python -m pip install -U Pyinstaller -r requirements.txt
|
|
||||||
reqs=$(mktemp)
|
reqs=$(mktemp)
|
||||||
cat > $reqs << EOF
|
cat > "$reqs" << EOF
|
||||||
python=3.10.*
|
python=3.10.*
|
||||||
pyinstaller
|
pyinstaller
|
||||||
cffi
|
cffi
|
||||||
brotli-python
|
brotli-python
|
||||||
|
secretstorage
|
||||||
EOF
|
EOF
|
||||||
sed '/^brotli.*/d' requirements.txt >> $reqs
|
sed -E '/^(brotli|secretstorage).*/d' requirements.txt >> "$reqs"
|
||||||
mamba create -n build --file $reqs
|
mamba create -n build --file "$reqs"
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build Unix platform-independent binary
|
- name: Build Unix platform-independent binary
|
||||||
run: |
|
run: |
|
||||||
@@ -150,6 +173,7 @@ jobs:
|
|||||||
yt-dlp_linux.zip
|
yt-dlp_linux.zip
|
||||||
|
|
||||||
linux_arm:
|
linux_arm:
|
||||||
|
needs: process
|
||||||
if: inputs.linux_arm
|
if: inputs.linux_arm
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@@ -162,7 +186,7 @@ jobs:
|
|||||||
- aarch64
|
- aarch64
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
path: ./repo
|
path: ./repo
|
||||||
- name: Virtualized Install, Prepare & Build
|
- name: Virtualized Install, Prepare & Build
|
||||||
@@ -185,7 +209,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cd repo
|
cd repo
|
||||||
python3.8 -m pip install -U Pyinstaller -r requirements.txt # Cached version may be out of date
|
python3.8 -m pip install -U Pyinstaller -r requirements.txt # Cached version may be out of date
|
||||||
python3.8 devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
python3.8 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||||
python3.8 devscripts/make_lazy_extractors.py
|
python3.8 devscripts/make_lazy_extractors.py
|
||||||
python3.8 pyinst.py
|
python3.8 pyinst.py
|
||||||
|
|
||||||
@@ -206,11 +230,12 @@ jobs:
|
|||||||
repo/dist/yt-dlp_linux_${{ (matrix.architecture == 'armv7' && 'armv7l') || matrix.architecture }}
|
repo/dist/yt-dlp_linux_${{ (matrix.architecture == 'armv7' && 'armv7l') || matrix.architecture }}
|
||||||
|
|
||||||
macos:
|
macos:
|
||||||
|
needs: process
|
||||||
if: inputs.macos
|
if: inputs.macos
|
||||||
runs-on: macos-11
|
runs-on: macos-11
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
# NB: Building universal2 does not work with python from actions/setup-python
|
# NB: Building universal2 does not work with python from actions/setup-python
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
@@ -221,7 +246,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python3 devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
python3 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||||
python3 devscripts/make_lazy_extractors.py
|
python3 devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@@ -247,11 +272,12 @@ jobs:
|
|||||||
dist/yt-dlp_macos.zip
|
dist/yt-dlp_macos.zip
|
||||||
|
|
||||||
macos_legacy:
|
macos_legacy:
|
||||||
|
needs: process
|
||||||
if: inputs.macos_legacy
|
if: inputs.macos_legacy
|
||||||
runs-on: macos-latest
|
runs-on: macos-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Install Python
|
- name: Install Python
|
||||||
# We need the official Python, because the GA ones only support newer macOS versions
|
# We need the official Python, because the GA ones only support newer macOS versions
|
||||||
env:
|
env:
|
||||||
@@ -272,7 +298,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python3 devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
python3 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||||
python3 devscripts/make_lazy_extractors.py
|
python3 devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@@ -296,11 +322,12 @@ jobs:
|
|||||||
dist/yt-dlp_macos_legacy
|
dist/yt-dlp_macos_legacy
|
||||||
|
|
||||||
windows:
|
windows:
|
||||||
|
needs: process
|
||||||
if: inputs.windows
|
if: inputs.windows
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v4
|
||||||
with: # 3.8 is used for Win7 support
|
with: # 3.8 is used for Win7 support
|
||||||
python-version: "3.8"
|
python-version: "3.8"
|
||||||
@@ -311,7 +338,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@@ -343,11 +370,12 @@ jobs:
|
|||||||
dist/yt-dlp_win.zip
|
dist/yt-dlp_win.zip
|
||||||
|
|
||||||
windows32:
|
windows32:
|
||||||
|
needs: process
|
||||||
if: inputs.windows32
|
if: inputs.windows32
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v4
|
||||||
with: # 3.7 is used for Vista support. See https://github.com/yt-dlp/yt-dlp/issues/390
|
with: # 3.7 is used for Vista support. See https://github.com/yt-dlp/yt-dlp/issues/390
|
||||||
python-version: "3.7"
|
python-version: "3.7"
|
||||||
@@ -359,7 +387,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c ${{ inputs.channel }} ${{ inputs.version }}
|
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@@ -387,6 +415,7 @@ jobs:
|
|||||||
meta_files:
|
meta_files:
|
||||||
if: inputs.meta_files && always() && !cancelled()
|
if: inputs.meta_files && always() && !cancelled()
|
||||||
needs:
|
needs:
|
||||||
|
- process
|
||||||
- unix
|
- unix
|
||||||
- linux_arm
|
- linux_arm
|
||||||
- macos
|
- macos
|
||||||
|
|||||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
|
|||||||
6
.github/workflows/core.yml
vendored
6
.github/workflows/core.yml
vendored
@@ -27,13 +27,13 @@ jobs:
|
|||||||
python-version: pypy-3.9
|
python-version: pypy-3.9
|
||||||
run-tests-ext: bat
|
run-tests-ext: bat
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install pytest
|
- name: Install dependencies
|
||||||
run: pip install pytest
|
run: pip install pytest -r requirements.txt
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
continue-on-error: False
|
continue-on-error: False
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
4
.github/workflows/download.yml
vendored
4
.github/workflows/download.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
|||||||
if: "contains(github.event.head_commit.message, 'ci run dl')"
|
if: "contains(github.event.head_commit.message, 'ci run dl')"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
@@ -39,7 +39,7 @@ jobs:
|
|||||||
python-version: pypy-3.9
|
python-version: pypy-3.9
|
||||||
run-tests-ext: bat
|
run-tests-ext: bat
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
|
|||||||
97
.github/workflows/publish.yml
vendored
97
.github/workflows/publish.yml
vendored
@@ -1,97 +0,0 @@
|
|||||||
name: Publish
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
channel:
|
|
||||||
default: stable
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
version:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
target_commitish:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
prerelease:
|
|
||||||
default: false
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
secrets:
|
|
||||||
ARCHIVE_REPO_TOKEN:
|
|
||||||
required: false
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
publish:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- uses: actions/download-artifact@v3
|
|
||||||
- uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: "3.10"
|
|
||||||
|
|
||||||
- name: Generate release notes
|
|
||||||
run: |
|
|
||||||
printf '%s' \
|
|
||||||
'[]' \
|
|
||||||
'(https://github.com/yt-dlp/yt-dlp#installation "Installation instructions") ' \
|
|
||||||
'[]' \
|
|
||||||
'(https://github.com/yt-dlp/yt-dlp/tree/2023.03.04#readme "Documentation") ' \
|
|
||||||
'[]' \
|
|
||||||
'(https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators "Donate") ' \
|
|
||||||
'[]' \
|
|
||||||
'(https://discord.gg/H5MNcFW63r "Discord") ' \
|
|
||||||
${{ inputs.channel != 'nightly' && '"[]" \
|
|
||||||
"(https://github.com/yt-dlp/yt-dlp-nightly-builds/releases/latest \"Nightly builds\")"' || '' }} \
|
|
||||||
> ./RELEASE_NOTES
|
|
||||||
printf '\n\n' >> ./RELEASE_NOTES
|
|
||||||
cat >> ./RELEASE_NOTES << EOF
|
|
||||||
#### A description of the various files are in the [README](https://github.com/yt-dlp/yt-dlp#release-files)
|
|
||||||
---
|
|
||||||
$(python ./devscripts/make_changelog.py -vv --collapsible)
|
|
||||||
EOF
|
|
||||||
printf '%s\n\n' '**This is an automated nightly pre-release build**' >> ./NIGHTLY_NOTES
|
|
||||||
cat ./RELEASE_NOTES >> ./NIGHTLY_NOTES
|
|
||||||
printf '%s\n\n' 'Generated from: https://github.com/${{ github.repository }}/commit/${{ inputs.target_commitish }}' >> ./ARCHIVE_NOTES
|
|
||||||
cat ./RELEASE_NOTES >> ./ARCHIVE_NOTES
|
|
||||||
|
|
||||||
- name: Archive nightly release
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.ARCHIVE_REPO_TOKEN }}
|
|
||||||
GH_REPO: ${{ vars.ARCHIVE_REPO }}
|
|
||||||
if: |
|
|
||||||
inputs.channel == 'nightly' && env.GH_TOKEN != '' && env.GH_REPO != ''
|
|
||||||
run: |
|
|
||||||
gh release create \
|
|
||||||
--notes-file ARCHIVE_NOTES \
|
|
||||||
--title "yt-dlp nightly ${{ inputs.version }}" \
|
|
||||||
${{ inputs.version }} \
|
|
||||||
artifact/*
|
|
||||||
|
|
||||||
- name: Prune old nightly release
|
|
||||||
if: inputs.channel == 'nightly' && !vars.ARCHIVE_REPO
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ github.token }}
|
|
||||||
run: |
|
|
||||||
gh release delete --yes --cleanup-tag "nightly" || true
|
|
||||||
git tag --delete "nightly" || true
|
|
||||||
sleep 5 # Enough time to cover deletion race condition
|
|
||||||
|
|
||||||
- name: Publish release${{ inputs.channel == 'nightly' && ' (nightly)' || '' }}
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ github.token }}
|
|
||||||
if: (inputs.channel == 'nightly' && !vars.ARCHIVE_REPO) || inputs.channel != 'nightly'
|
|
||||||
run: |
|
|
||||||
gh release create \
|
|
||||||
--notes-file ${{ inputs.channel == 'nightly' && 'NIGHTLY_NOTES' || 'RELEASE_NOTES' }} \
|
|
||||||
--target ${{ inputs.target_commitish }} \
|
|
||||||
--title "yt-dlp ${{ inputs.channel == 'nightly' && 'nightly ' || '' }}${{ inputs.version }}" \
|
|
||||||
${{ inputs.prerelease && '--prerelease' || '' }} \
|
|
||||||
${{ inputs.channel == 'nightly' && '"nightly"' || inputs.version }} \
|
|
||||||
artifact/*
|
|
||||||
4
.github/workflows/quick-test.yml
vendored
4
.github/workflows/quick-test.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
|||||||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python 3.11
|
- name: Set up Python 3.11
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
@@ -25,7 +25,7 @@ jobs:
|
|||||||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v4
|
||||||
- name: Install flake8
|
- name: Install flake8
|
||||||
run: pip install flake8
|
run: pip install flake8
|
||||||
|
|||||||
28
.github/workflows/release-master.yml
vendored
Normal file
28
.github/workflows/release-master.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
name: Release (master)
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths:
|
||||||
|
- "yt_dlp/**.py"
|
||||||
|
- "!yt_dlp/version.py"
|
||||||
|
- "setup.py"
|
||||||
|
- "pyinst.py"
|
||||||
|
concurrency:
|
||||||
|
group: release-master
|
||||||
|
cancel-in-progress: true
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
if: vars.BUILD_MASTER != ''
|
||||||
|
uses: ./.github/workflows/release.yml
|
||||||
|
with:
|
||||||
|
prerelease: true
|
||||||
|
source: master
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
packages: write
|
||||||
|
id-token: write # mandatory for trusted publishing
|
||||||
|
secrets: inherit
|
||||||
57
.github/workflows/release-nightly.yml
vendored
57
.github/workflows/release-nightly.yml
vendored
@@ -1,52 +1,35 @@
|
|||||||
name: Release (nightly)
|
name: Release (nightly)
|
||||||
on:
|
on:
|
||||||
push:
|
schedule:
|
||||||
branches:
|
- cron: '23 23 * * *'
|
||||||
- master
|
|
||||||
paths:
|
|
||||||
- "yt_dlp/**.py"
|
|
||||||
- "!yt_dlp/version.py"
|
|
||||||
concurrency:
|
|
||||||
group: release-nightly
|
|
||||||
cancel-in-progress: true
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
prepare:
|
check_nightly:
|
||||||
if: vars.BUILD_NIGHTLY != ''
|
if: vars.BUILD_NIGHTLY != ''
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
version: ${{ steps.get_version.outputs.version }}
|
commit: ${{ steps.check_for_new_commits.outputs.commit }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Get version
|
with:
|
||||||
id: get_version
|
fetch-depth: 0
|
||||||
|
- name: Check for new commits
|
||||||
|
id: check_for_new_commits
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py "$(date -u +"%H%M%S")" | grep -Po "version=\d+(\.\d+){3}" >> "$GITHUB_OUTPUT"
|
relevant_files=("yt_dlp/*.py" ':!yt_dlp/version.py' "setup.py" "pyinst.py")
|
||||||
|
echo "commit=$(git log --format=%H -1 --since="24 hours ago" -- "${relevant_files[@]}")" | tee "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
build:
|
release:
|
||||||
needs: prepare
|
needs: [check_nightly]
|
||||||
uses: ./.github/workflows/build.yml
|
if: ${{ needs.check_nightly.outputs.commit }}
|
||||||
|
uses: ./.github/workflows/release.yml
|
||||||
with:
|
with:
|
||||||
version: ${{ needs.prepare.outputs.version }}
|
prerelease: true
|
||||||
channel: nightly
|
source: nightly
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write # For package cache
|
|
||||||
secrets:
|
|
||||||
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
|
||||||
|
|
||||||
publish:
|
|
||||||
needs: [prepare, build]
|
|
||||||
uses: ./.github/workflows/publish.yml
|
|
||||||
secrets:
|
|
||||||
ARCHIVE_REPO_TOKEN: ${{ secrets.ARCHIVE_REPO_TOKEN }}
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
with:
|
packages: write
|
||||||
channel: nightly
|
id-token: write # mandatory for trusted publishing
|
||||||
prerelease: true
|
secrets: inherit
|
||||||
version: ${{ needs.prepare.outputs.version }}
|
|
||||||
target_commitish: ${{ github.sha }}
|
|
||||||
|
|||||||
358
.github/workflows/release.yml
vendored
358
.github/workflows/release.yml
vendored
@@ -1,14 +1,45 @@
|
|||||||
name: Release
|
name: Release
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_call:
|
||||||
inputs:
|
inputs:
|
||||||
version:
|
prerelease:
|
||||||
description: Version tag (YYYY.MM.DD[.REV])
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
source:
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
type: string
|
type: string
|
||||||
channel:
|
target:
|
||||||
description: Update channel (stable/nightly/...)
|
required: false
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
version:
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
source:
|
||||||
|
description: |
|
||||||
|
SOURCE of this release's updates:
|
||||||
|
channel, repo, tag, or channel/repo@tag
|
||||||
|
(default: <current_repo>)
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
target:
|
||||||
|
description: |
|
||||||
|
TARGET to publish this release to:
|
||||||
|
channel, tag, or channel@tag
|
||||||
|
(default: <source> if writable else <current_repo>[@source_tag])
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
version:
|
||||||
|
description: |
|
||||||
|
VERSION: yyyy.mm.dd[.rev] or rev
|
||||||
|
(default: auto-generated)
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
type: string
|
type: string
|
||||||
@@ -26,12 +57,18 @@ jobs:
|
|||||||
contents: write
|
contents: write
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
channel: ${{ steps.set_channel.outputs.channel }}
|
channel: ${{ steps.setup_variables.outputs.channel }}
|
||||||
version: ${{ steps.update_version.outputs.version }}
|
version: ${{ steps.setup_variables.outputs.version }}
|
||||||
|
target_repo: ${{ steps.setup_variables.outputs.target_repo }}
|
||||||
|
target_repo_token: ${{ steps.setup_variables.outputs.target_repo_token }}
|
||||||
|
target_tag: ${{ steps.setup_variables.outputs.target_tag }}
|
||||||
|
pypi_project: ${{ steps.setup_variables.outputs.pypi_project }}
|
||||||
|
pypi_suffix: ${{ steps.setup_variables.outputs.pypi_suffix }}
|
||||||
|
pypi_token: ${{ steps.setup_variables.outputs.pypi_token }}
|
||||||
head_sha: ${{ steps.get_target.outputs.head_sha }}
|
head_sha: ${{ steps.get_target.outputs.head_sha }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -39,25 +76,133 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
|
|
||||||
- name: Set channel
|
- name: Process inputs
|
||||||
id: set_channel
|
id: process_inputs
|
||||||
run: |
|
run: |
|
||||||
CHANNEL="${{ github.repository == 'yt-dlp/yt-dlp' && 'stable' || github.repository }}"
|
cat << EOF
|
||||||
echo "channel=${{ inputs.channel || '$CHANNEL' }}" > "$GITHUB_OUTPUT"
|
::group::Inputs
|
||||||
|
prerelease=${{ inputs.prerelease }}
|
||||||
|
source=${{ inputs.source }}
|
||||||
|
target=${{ inputs.target }}
|
||||||
|
version=${{ inputs.version }}
|
||||||
|
::endgroup::
|
||||||
|
EOF
|
||||||
|
IFS='@' read -r source_repo source_tag <<<"${{ inputs.source }}"
|
||||||
|
IFS='@' read -r target_repo target_tag <<<"${{ inputs.target }}"
|
||||||
|
cat << EOF >> "$GITHUB_OUTPUT"
|
||||||
|
source_repo=${source_repo}
|
||||||
|
source_tag=${source_tag}
|
||||||
|
target_repo=${target_repo}
|
||||||
|
target_tag=${target_tag}
|
||||||
|
EOF
|
||||||
|
|
||||||
- name: Update version
|
- name: Setup variables
|
||||||
id: update_version
|
id: setup_variables
|
||||||
|
env:
|
||||||
|
source_repo: ${{ steps.process_inputs.outputs.source_repo }}
|
||||||
|
source_tag: ${{ steps.process_inputs.outputs.source_tag }}
|
||||||
|
target_repo: ${{ steps.process_inputs.outputs.target_repo }}
|
||||||
|
target_tag: ${{ steps.process_inputs.outputs.target_tag }}
|
||||||
run: |
|
run: |
|
||||||
REVISION="${{ vars.PUSH_VERSION_COMMIT == '' && '$(date -u +"%H%M%S")' || '' }}"
|
# unholy bash monstrosity (sincere apologies)
|
||||||
REVISION="${{ inputs.prerelease && '$(date -u +"%H%M%S")' || '$REVISION' }}"
|
fallback_token () {
|
||||||
python devscripts/update-version.py ${{ inputs.version || '$REVISION' }} | \
|
if ${{ !secrets.ARCHIVE_REPO_TOKEN }}; then
|
||||||
grep -Po "version=\d+\.\d+\.\d+(\.\d+)?" >> "$GITHUB_OUTPUT"
|
echo "::error::Repository access secret ${target_repo_token^^} not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
target_repo_token=ARCHIVE_REPO_TOKEN
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
source_is_channel=0
|
||||||
|
[[ "${source_repo}" == 'stable' ]] && source_repo='yt-dlp/yt-dlp'
|
||||||
|
if [[ -z "${source_repo}" ]]; then
|
||||||
|
source_repo='${{ github.repository }}'
|
||||||
|
elif [[ '${{ vars[format('{0}_archive_repo', env.source_repo)] }}' ]]; then
|
||||||
|
source_is_channel=1
|
||||||
|
source_channel='${{ vars[format('{0}_archive_repo', env.source_repo)] }}'
|
||||||
|
elif [[ -z "${source_tag}" && "${source_repo}" != */* ]]; then
|
||||||
|
source_tag="${source_repo}"
|
||||||
|
source_repo='${{ github.repository }}'
|
||||||
|
fi
|
||||||
|
resolved_source="${source_repo}"
|
||||||
|
if [[ "${source_tag}" ]]; then
|
||||||
|
resolved_source="${resolved_source}@${source_tag}"
|
||||||
|
elif [[ "${source_repo}" == 'yt-dlp/yt-dlp' ]]; then
|
||||||
|
resolved_source='stable'
|
||||||
|
fi
|
||||||
|
|
||||||
|
revision="${{ (inputs.prerelease || !vars.PUSH_VERSION_COMMIT) && '$(date -u +"%H%M%S")' || '' }}"
|
||||||
|
version="$(
|
||||||
|
python devscripts/update-version.py \
|
||||||
|
-c "${resolved_source}" -r "${{ github.repository }}" ${{ inputs.version || '$revision' }} | \
|
||||||
|
grep -Po "version=\K\d+\.\d+\.\d+(\.\d+)?")"
|
||||||
|
|
||||||
|
if [[ "${target_repo}" ]]; then
|
||||||
|
if [[ -z "${target_tag}" ]]; then
|
||||||
|
if [[ '${{ vars[format('{0}_archive_repo', env.target_repo)] }}' ]]; then
|
||||||
|
target_tag="${source_tag:-${version}}"
|
||||||
|
else
|
||||||
|
target_tag="${target_repo}"
|
||||||
|
target_repo='${{ github.repository }}'
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if [[ "${target_repo}" != '${{ github.repository}}' ]]; then
|
||||||
|
target_repo='${{ vars[format('{0}_archive_repo', env.target_repo)] }}'
|
||||||
|
target_repo_token='${{ env.target_repo }}_archive_repo_token'
|
||||||
|
${{ !!secrets[format('{0}_archive_repo_token', env.target_repo)] }} || fallback_token
|
||||||
|
pypi_project='${{ vars[format('{0}_pypi_project', env.target_repo)] }}'
|
||||||
|
pypi_suffix='${{ vars[format('{0}_pypi_suffix', env.target_repo)] }}'
|
||||||
|
${{ !secrets[format('{0}_pypi_token', env.target_repo)] }} || pypi_token='${{ env.target_repo }}_pypi_token'
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
target_tag="${source_tag:-${version}}"
|
||||||
|
if ((source_is_channel)); then
|
||||||
|
target_repo="${source_channel}"
|
||||||
|
target_repo_token='${{ env.source_repo }}_archive_repo_token'
|
||||||
|
${{ !!secrets[format('{0}_archive_repo_token', env.source_repo)] }} || fallback_token
|
||||||
|
pypi_project='${{ vars[format('{0}_pypi_project', env.source_repo)] }}'
|
||||||
|
pypi_suffix='${{ vars[format('{0}_pypi_suffix', env.source_repo)] }}'
|
||||||
|
${{ !secrets[format('{0}_pypi_token', env.source_repo)] }} || pypi_token='${{ env.source_repo }}_pypi_token'
|
||||||
|
else
|
||||||
|
target_repo='${{ github.repository }}'
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${target_repo}" == '${{ github.repository }}' ]] && ${{ !inputs.prerelease }}; then
|
||||||
|
pypi_project='${{ vars.PYPI_PROJECT }}'
|
||||||
|
fi
|
||||||
|
if [[ -z "${pypi_token}" && "${pypi_project}" ]]; then
|
||||||
|
if ${{ !secrets.PYPI_TOKEN }}; then
|
||||||
|
pypi_token=OIDC
|
||||||
|
else
|
||||||
|
pypi_token=PYPI_TOKEN
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "::group::Output variables"
|
||||||
|
cat << EOF | tee -a "$GITHUB_OUTPUT"
|
||||||
|
channel=${resolved_source}
|
||||||
|
version=${version}
|
||||||
|
target_repo=${target_repo}
|
||||||
|
target_repo_token=${target_repo_token}
|
||||||
|
target_tag=${target_tag}
|
||||||
|
pypi_project=${pypi_project}
|
||||||
|
pypi_suffix=${pypi_suffix}
|
||||||
|
pypi_token=${pypi_token}
|
||||||
|
EOF
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
- name: Update documentation
|
- name: Update documentation
|
||||||
|
env:
|
||||||
|
version: ${{ steps.setup_variables.outputs.version }}
|
||||||
|
target_repo: ${{ steps.setup_variables.outputs.target_repo }}
|
||||||
|
if: |
|
||||||
|
!inputs.prerelease && env.target_repo == github.repository
|
||||||
run: |
|
run: |
|
||||||
make doc
|
make doc
|
||||||
sed '/### /Q' Changelog.md >> ./CHANGELOG
|
sed '/### /Q' Changelog.md >> ./CHANGELOG
|
||||||
echo '### ${{ steps.update_version.outputs.version }}' >> ./CHANGELOG
|
echo '### ${{ env.version }}' >> ./CHANGELOG
|
||||||
python ./devscripts/make_changelog.py -vv -c >> ./CHANGELOG
|
python ./devscripts/make_changelog.py -vv -c >> ./CHANGELOG
|
||||||
echo >> ./CHANGELOG
|
echo >> ./CHANGELOG
|
||||||
grep -Poz '(?s)### \d+\.\d+\.\d+.+' 'Changelog.md' | head -n -1 >> ./CHANGELOG
|
grep -Poz '(?s)### \d+\.\d+\.\d+.+' 'Changelog.md' | head -n -1 >> ./CHANGELOG
|
||||||
@@ -65,12 +210,16 @@ jobs:
|
|||||||
|
|
||||||
- name: Push to release
|
- name: Push to release
|
||||||
id: push_release
|
id: push_release
|
||||||
if: ${{ !inputs.prerelease }}
|
env:
|
||||||
|
version: ${{ steps.setup_variables.outputs.version }}
|
||||||
|
target_repo: ${{ steps.setup_variables.outputs.target_repo }}
|
||||||
|
if: |
|
||||||
|
!inputs.prerelease && env.target_repo == github.repository
|
||||||
run: |
|
run: |
|
||||||
git config --global user.name github-actions
|
git config --global user.name github-actions
|
||||||
git config --global user.email github-actions@example.com
|
git config --global user.email github-actions@github.com
|
||||||
git add -u
|
git add -u
|
||||||
git commit -m "Release ${{ steps.update_version.outputs.version }}" \
|
git commit -m "Release ${{ env.version }}" \
|
||||||
-m "Created by: ${{ github.event.sender.login }}" -m ":ci skip all :ci run dl"
|
-m "Created by: ${{ github.event.sender.login }}" -m ":ci skip all :ci run dl"
|
||||||
git push origin --force ${{ github.event.ref }}:release
|
git push origin --force ${{ github.event.ref }}:release
|
||||||
|
|
||||||
@@ -80,7 +229,10 @@ jobs:
|
|||||||
echo "head_sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT"
|
echo "head_sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
- name: Update master
|
- name: Update master
|
||||||
if: vars.PUSH_VERSION_COMMIT != '' && !inputs.prerelease
|
env:
|
||||||
|
target_repo: ${{ steps.setup_variables.outputs.target_repo }}
|
||||||
|
if: |
|
||||||
|
vars.PUSH_VERSION_COMMIT != '' && !inputs.prerelease && env.target_repo == github.repository
|
||||||
run: git push origin ${{ github.event.ref }}
|
run: git push origin ${{ github.event.ref }}
|
||||||
|
|
||||||
build:
|
build:
|
||||||
@@ -89,75 +241,159 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
version: ${{ needs.prepare.outputs.version }}
|
version: ${{ needs.prepare.outputs.version }}
|
||||||
channel: ${{ needs.prepare.outputs.channel }}
|
channel: ${{ needs.prepare.outputs.channel }}
|
||||||
|
origin: ${{ needs.prepare.outputs.target_repo }}
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write # For package cache
|
packages: write # For package cache
|
||||||
secrets:
|
secrets:
|
||||||
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
||||||
|
|
||||||
publish_pypi_homebrew:
|
publish_pypi:
|
||||||
needs: [prepare, build]
|
needs: [prepare, build]
|
||||||
|
if: ${{ needs.prepare.outputs.pypi_project }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
id-token: write # mandatory for trusted publishing
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
|
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get -y install pandoc man
|
sudo apt -y install pandoc man
|
||||||
python -m pip install -U pip setuptools wheel twine
|
python -m pip install -U pip setuptools wheel twine
|
||||||
python -m pip install -U -r requirements.txt
|
python -m pip install -U -r requirements.txt
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
|
||||||
python devscripts/update-version.py ${{ needs.prepare.outputs.version }}
|
|
||||||
python devscripts/make_lazy_extractors.py
|
|
||||||
|
|
||||||
- name: Build and publish on PyPI
|
|
||||||
env:
|
env:
|
||||||
TWINE_USERNAME: __token__
|
version: ${{ needs.prepare.outputs.version }}
|
||||||
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
suffix: ${{ needs.prepare.outputs.pypi_suffix }}
|
||||||
if: env.TWINE_PASSWORD != '' && !inputs.prerelease
|
channel: ${{ needs.prepare.outputs.channel }}
|
||||||
|
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
||||||
|
pypi_project: ${{ needs.prepare.outputs.pypi_project }}
|
||||||
|
run: |
|
||||||
|
python devscripts/update-version.py -c "${{ env.channel }}" -r "${{ env.target_repo }}" -s "${{ env.suffix }}" "${{ env.version }}"
|
||||||
|
python devscripts/make_lazy_extractors.py
|
||||||
|
sed -i -E "s/(name=')[^']+(', # package name)/\1${{ env.pypi_project }}\2/" setup.py
|
||||||
|
|
||||||
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
rm -rf dist/*
|
rm -rf dist/*
|
||||||
make pypi-files
|
make pypi-files
|
||||||
python devscripts/set-variant.py pip -M "You installed yt-dlp with pip or using the wheel from PyPi; Use that to update"
|
python devscripts/set-variant.py pip -M "You installed yt-dlp with pip or using the wheel from PyPi; Use that to update"
|
||||||
python setup.py sdist bdist_wheel
|
python setup.py sdist bdist_wheel
|
||||||
|
|
||||||
|
- name: Publish to PyPI via token
|
||||||
|
env:
|
||||||
|
TWINE_USERNAME: __token__
|
||||||
|
TWINE_PASSWORD: ${{ secrets[needs.prepare.outputs.pypi_token] }}
|
||||||
|
if: |
|
||||||
|
needs.prepare.outputs.pypi_token != 'OIDC' && env.TWINE_PASSWORD
|
||||||
|
run: |
|
||||||
twine upload dist/*
|
twine upload dist/*
|
||||||
|
|
||||||
- name: Checkout Homebrew repository
|
- name: Publish to PyPI via trusted publishing
|
||||||
env:
|
if: |
|
||||||
BREW_TOKEN: ${{ secrets.BREW_TOKEN }}
|
needs.prepare.outputs.pypi_token == 'OIDC'
|
||||||
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
if: env.BREW_TOKEN != '' && env.PYPI_TOKEN != '' && !inputs.prerelease
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
with:
|
||||||
repository: yt-dlp/homebrew-taps
|
verbose: true
|
||||||
path: taps
|
|
||||||
ssh-key: ${{ secrets.BREW_TOKEN }}
|
|
||||||
|
|
||||||
- name: Update Homebrew Formulae
|
|
||||||
env:
|
|
||||||
BREW_TOKEN: ${{ secrets.BREW_TOKEN }}
|
|
||||||
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
|
|
||||||
if: env.BREW_TOKEN != '' && env.PYPI_TOKEN != '' && !inputs.prerelease
|
|
||||||
run: |
|
|
||||||
python devscripts/update-formulae.py taps/Formula/yt-dlp.rb "${{ needs.prepare.outputs.version }}"
|
|
||||||
git -C taps/ config user.name github-actions
|
|
||||||
git -C taps/ config user.email github-actions@example.com
|
|
||||||
git -C taps/ commit -am 'yt-dlp: ${{ needs.prepare.outputs.version }}'
|
|
||||||
git -C taps/ push
|
|
||||||
|
|
||||||
publish:
|
publish:
|
||||||
needs: [prepare, build]
|
needs: [prepare, build]
|
||||||
uses: ./.github/workflows/publish.yml
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
with:
|
runs-on: ubuntu-latest
|
||||||
channel: ${{ needs.prepare.outputs.channel }}
|
|
||||||
prerelease: ${{ inputs.prerelease }}
|
steps:
|
||||||
version: ${{ needs.prepare.outputs.version }}
|
- uses: actions/checkout@v4
|
||||||
target_commitish: ${{ needs.prepare.outputs.head_sha }}
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
|
||||||
|
- name: Generate release notes
|
||||||
|
env:
|
||||||
|
head_sha: ${{ needs.prepare.outputs.head_sha }}
|
||||||
|
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
||||||
|
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
||||||
|
run: |
|
||||||
|
printf '%s' \
|
||||||
|
'[]' \
|
||||||
|
'(https://github.com/${{ github.repository }}#installation "Installation instructions") ' \
|
||||||
|
'[]' \
|
||||||
|
'(https://github.com/${{ github.repository }}' \
|
||||||
|
'${{ env.target_repo == github.repository && format('/tree/{0}', env.target_tag) || '' }}#readme "Documentation") ' \
|
||||||
|
'[]' \
|
||||||
|
'(https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators "Donate") ' \
|
||||||
|
'[]' \
|
||||||
|
'(https://discord.gg/H5MNcFW63r "Discord") ' \
|
||||||
|
${{ env.target_repo == 'yt-dlp/yt-dlp' && '\
|
||||||
|
"[]" \
|
||||||
|
"(https://github.com/yt-dlp/yt-dlp-nightly-builds/releases/latest \"Nightly builds\") " \
|
||||||
|
"[]" \
|
||||||
|
"(https://github.com/yt-dlp/yt-dlp-master-builds/releases/latest \"Master builds\")"' || '' }} > ./RELEASE_NOTES
|
||||||
|
printf '\n\n' >> ./RELEASE_NOTES
|
||||||
|
cat >> ./RELEASE_NOTES << EOF
|
||||||
|
#### A description of the various files are in the [README](https://github.com/${{ github.repository }}#release-files)
|
||||||
|
---
|
||||||
|
$(python ./devscripts/make_changelog.py -vv --collapsible)
|
||||||
|
EOF
|
||||||
|
printf '%s\n\n' '**This is a pre-release build**' >> ./PRERELEASE_NOTES
|
||||||
|
cat ./RELEASE_NOTES >> ./PRERELEASE_NOTES
|
||||||
|
printf '%s\n\n' 'Generated from: https://github.com/${{ github.repository }}/commit/${{ env.head_sha }}' >> ./ARCHIVE_NOTES
|
||||||
|
cat ./RELEASE_NOTES >> ./ARCHIVE_NOTES
|
||||||
|
|
||||||
|
- name: Publish to archive repo
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets[needs.prepare.outputs.target_repo_token] }}
|
||||||
|
GH_REPO: ${{ needs.prepare.outputs.target_repo }}
|
||||||
|
version: ${{ needs.prepare.outputs.version }}
|
||||||
|
channel: ${{ needs.prepare.outputs.channel }}
|
||||||
|
if: |
|
||||||
|
inputs.prerelease && env.GH_TOKEN != '' && env.GH_REPO != '' && env.GH_REPO != github.repository
|
||||||
|
run: |
|
||||||
|
title="${{ startswith(env.GH_REPO, 'yt-dlp/') && 'yt-dlp ' || '' }}${{ env.channel }}"
|
||||||
|
gh release create \
|
||||||
|
--notes-file ARCHIVE_NOTES \
|
||||||
|
--title "${title} ${{ env.version }}" \
|
||||||
|
${{ env.version }} \
|
||||||
|
artifact/*
|
||||||
|
|
||||||
|
- name: Prune old release
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
version: ${{ needs.prepare.outputs.version }}
|
||||||
|
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
||||||
|
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
||||||
|
if: |
|
||||||
|
env.target_repo == github.repository && env.target_tag != env.version
|
||||||
|
run: |
|
||||||
|
gh release delete --yes --cleanup-tag "${{ env.target_tag }}" || true
|
||||||
|
git tag --delete "${{ env.target_tag }}" || true
|
||||||
|
sleep 5 # Enough time to cover deletion race condition
|
||||||
|
|
||||||
|
- name: Publish release
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
version: ${{ needs.prepare.outputs.version }}
|
||||||
|
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
||||||
|
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
||||||
|
head_sha: ${{ needs.prepare.outputs.head_sha }}
|
||||||
|
if: |
|
||||||
|
env.target_repo == github.repository
|
||||||
|
run: |
|
||||||
|
title="${{ github.repository == 'yt-dlp/yt-dlp' && 'yt-dlp ' || '' }}"
|
||||||
|
title+="${{ env.target_tag != env.version && format('{0} ', env.target_tag) || '' }}"
|
||||||
|
gh release create \
|
||||||
|
--notes-file ${{ inputs.prerelease && 'PRERELEASE_NOTES' || 'RELEASE_NOTES' }} \
|
||||||
|
--target ${{ env.head_sha }} \
|
||||||
|
--title "${title}${{ env.version }}" \
|
||||||
|
${{ inputs.prerelease && '--prerelease' || '' }} \
|
||||||
|
${{ env.target_tag }} \
|
||||||
|
artifact/*
|
||||||
|
|||||||
15
CONTRIBUTORS
15
CONTRIBUTORS
@@ -509,3 +509,18 @@ handlerug
|
|||||||
jiru
|
jiru
|
||||||
madewokherd
|
madewokherd
|
||||||
xofe
|
xofe
|
||||||
|
awalgarg
|
||||||
|
midnightveil
|
||||||
|
naginatana
|
||||||
|
Riteo
|
||||||
|
1100101
|
||||||
|
aniolpages
|
||||||
|
bartbroere
|
||||||
|
CrendKing
|
||||||
|
Esokrates
|
||||||
|
HitomaruKonpaku
|
||||||
|
LoserFox
|
||||||
|
peci1
|
||||||
|
saintliao
|
||||||
|
shubhexists
|
||||||
|
SirElderling
|
||||||
|
|||||||
92
Changelog.md
92
Changelog.md
@@ -4,6 +4,98 @@
|
|||||||
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
### 2023.11.14
|
||||||
|
|
||||||
|
#### Important changes
|
||||||
|
- **The release channels have been adjusted!**
|
||||||
|
* [`master`](https://github.com/yt-dlp/yt-dlp-master-builds) builds are made after each push, containing the latest fixes (but also possibly bugs). This was previously the `nightly` channel.
|
||||||
|
* [`nightly`](https://github.com/yt-dlp/yt-dlp-nightly-builds) builds are now made once a day, if there were any changes.
|
||||||
|
- Security: [[CVE-2023-46121](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-46121)] Patch [Generic Extractor MITM Vulnerability via Arbitrary Proxy Injection](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-3ch3-jhc6-5r8x)
|
||||||
|
- Disallow smuggling of arbitrary `http_headers`; extractors now only use specific headers
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- [Add `--compat-option manifest-filesize-approx`](https://github.com/yt-dlp/yt-dlp/commit/10025b715ea01489557eb2c5a3cc04d361fcdb52) ([#8356](https://github.com/yt-dlp/yt-dlp/issues/8356)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix format sorting with `--load-info-json`](https://github.com/yt-dlp/yt-dlp/commit/595ea4a99b726b8fe9463e7853b7053978d0544e) ([#8521](https://github.com/yt-dlp/yt-dlp/issues/8521)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Include build origin in verbose output](https://github.com/yt-dlp/yt-dlp/commit/20314dd46f25e0e0a7e985a7804049aefa8b909f) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Only ensure playlist thumbnail dir if writing thumbs](https://github.com/yt-dlp/yt-dlp/commit/a40e0b37dfc8c26916b0e01aa3f29f3bc42250b6) ([#8373](https://github.com/yt-dlp/yt-dlp/issues/8373)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **update**: [Overhaul self-updater](https://github.com/yt-dlp/yt-dlp/commit/0b6ad22e6a432006a75df968f0283e6c6b3cfae6) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- [Do not smuggle `http_headers`](https://github.com/yt-dlp/yt-dlp/commit/f04b5bedad7b281bee9814686bba1762bae092eb) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- [Do not test truth value of `xml.etree.ElementTree.Element`](https://github.com/yt-dlp/yt-dlp/commit/d4f14a72dc1dd79396e0e80980268aee902b61e4) ([#8582](https://github.com/yt-dlp/yt-dlp/issues/8582)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **brilliantpala**: [Fix cookies support](https://github.com/yt-dlp/yt-dlp/commit/9b5bedf13a3323074daceb0ec6ebb3cc6e0b9684) ([#8352](https://github.com/yt-dlp/yt-dlp/issues/8352)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **generic**: [Improve direct video link ext detection](https://github.com/yt-dlp/yt-dlp/commit/4ce2f29a50fcfb9920e6f2ffe42192945a2bad7e) ([#8340](https://github.com/yt-dlp/yt-dlp/issues/8340)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **laxarxames**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/312a2d1e8bc247264f9d85c5ec764e33aa0133b5) ([#8412](https://github.com/yt-dlp/yt-dlp/issues/8412)) by [aniolpages](https://github.com/aniolpages)
|
||||||
|
- **n-tv.de**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/8afd9468b0c822843bc480d366d1c86698daabfb) ([#8414](https://github.com/yt-dlp/yt-dlp/issues/8414)) by [1100101](https://github.com/1100101)
|
||||||
|
- **neteasemusic**: [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/46acc418a53470b7f32581b3309c3cb87aa8488d) ([#8531](https://github.com/yt-dlp/yt-dlp/issues/8531)) by [LoserFox](https://github.com/LoserFox)
|
||||||
|
- **nhk**: [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/54579be4364e148277c32e20a5c3efc2c3f52f5b) ([#8388](https://github.com/yt-dlp/yt-dlp/issues/8388)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **novaembed**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/3ff494f6f41c27549420fa88be27555bd449ffdc) ([#8368](https://github.com/yt-dlp/yt-dlp/issues/8368)) by [peci1](https://github.com/peci1)
|
||||||
|
- **npo**: [Send `POST` request to streams API endpoint](https://github.com/yt-dlp/yt-dlp/commit/8e02a4dcc800f9444e9d461edc41edd7b662f435) ([#8413](https://github.com/yt-dlp/yt-dlp/issues/8413)) by [bartbroere](https://github.com/bartbroere)
|
||||||
|
- **ondemandkorea**: [Overhaul extractor](https://github.com/yt-dlp/yt-dlp/commit/05adfd883a4f2ecae0267e670a62a2e45c351aeb) ([#8386](https://github.com/yt-dlp/yt-dlp/issues/8386)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **orf**: podcast: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/6ba3085616652cbf05d1858efc321fdbfc4c6119) ([#8486](https://github.com/yt-dlp/yt-dlp/issues/8486)) by [Esokrates](https://github.com/Esokrates)
|
||||||
|
- **polskieradio**: audition: [Fix playlist extraction](https://github.com/yt-dlp/yt-dlp/commit/464327acdb353ceb91d2115163a5a9621b22fe0d) ([#8459](https://github.com/yt-dlp/yt-dlp/issues/8459)) by [shubhexists](https://github.com/shubhexists)
|
||||||
|
- **qdance**: [Update `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/177f0d963e4b9db749805c482e6f288354c8be84) ([#8426](https://github.com/yt-dlp/yt-dlp/issues/8426)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **radiocomercial**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/ef12dbdcd3e7264bd3d744c1e3107597bd23ad35) ([#8508](https://github.com/yt-dlp/yt-dlp/issues/8508)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **sbs.co.kr**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/25a4bd345a0dcfece6fef752d4537eb403da94d9) ([#8326](https://github.com/yt-dlp/yt-dlp/issues/8326)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **theatercomplextown**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/2863fcf2b6876d0c7965ff7d6d9242eea653dc6b) ([#8560](https://github.com/yt-dlp/yt-dlp/issues/8560)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **thisav**: [Remove](https://github.com/yt-dlp/yt-dlp/commit/cb480e390d85fb3a598c1b6d5eef3438ce729fc9) ([#8346](https://github.com/yt-dlp/yt-dlp/issues/8346)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **thisoldhouse**: [Add login support](https://github.com/yt-dlp/yt-dlp/commit/c76c96677ff6a056f5844a568ef05ee22c46d6f4) ([#8561](https://github.com/yt-dlp/yt-dlp/issues/8561)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **twitcasting**: [Fix livestream extraction](https://github.com/yt-dlp/yt-dlp/commit/7b8b1cf5eb8bf44ce70bc24e1f56f0dba2737e98) ([#8427](https://github.com/yt-dlp/yt-dlp/issues/8427)) by [JC-Chung](https://github.com/JC-Chung), [saintliao](https://github.com/saintliao)
|
||||||
|
- **twitter**
|
||||||
|
- broadcast
|
||||||
|
- [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/7d337ca977d73a0a6c07ab481ed8faa8f6ff8726) ([#8383](https://github.com/yt-dlp/yt-dlp/issues/8383)) by [HitomaruKonpaku](https://github.com/HitomaruKonpaku)
|
||||||
|
- [Support `--wait-for-video`](https://github.com/yt-dlp/yt-dlp/commit/f6e97090d2ed9e05441ab0f4bec3559b816d7a00) ([#8475](https://github.com/yt-dlp/yt-dlp/issues/8475)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **weibo**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/15b252dfd2c6807fe57afc5a95e59abadb32ccd2) ([#8463](https://github.com/yt-dlp/yt-dlp/issues/8463)) by [c-basalt](https://github.com/c-basalt)
|
||||||
|
- **weverse**: [Fix login error handling](https://github.com/yt-dlp/yt-dlp/commit/4a601c9eff9fb42e24a4c8da3fa03628e035b35b) ([#8458](https://github.com/yt-dlp/yt-dlp/issues/8458)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **youtube**: [Check newly uploaded iOS HLS formats](https://github.com/yt-dlp/yt-dlp/commit/ef79d20dc9d27ac002a7196f073b37f2f2721aed) ([#8336](https://github.com/yt-dlp/yt-dlp/issues/8336)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **zoom**: [Extract combined view formats](https://github.com/yt-dlp/yt-dlp/commit/3906de07551fedb00b789345bf24cc27d6ddf128) ([#7847](https://github.com/yt-dlp/yt-dlp/issues/7847)) by [Mipsters](https://github.com/Mipsters)
|
||||||
|
|
||||||
|
#### Downloader changes
|
||||||
|
- **aria2c**: [Remove duplicate `--file-allocation=none`](https://github.com/yt-dlp/yt-dlp/commit/21b25281c51523620706b11bfc1c4a889858e1f2) ([#8332](https://github.com/yt-dlp/yt-dlp/issues/8332)) by [CrendKing](https://github.com/CrendKing)
|
||||||
|
- **dash**: [Force native downloader for `--live-from-start`](https://github.com/yt-dlp/yt-dlp/commit/2622c804d1a5accc3045db398e0fc52074f4bdb3) ([#8339](https://github.com/yt-dlp/yt-dlp/issues/8339)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
#### Networking changes
|
||||||
|
- **Request Handler**: requests: [Add handler for `requests` HTTP library (#3668)](https://github.com/yt-dlp/yt-dlp/commit/8a8b54523addf46dfd50ef599761a81bc22362e6) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [Grub4K](https://github.com/Grub4K) (With fixes in [4e38e2a](https://github.com/yt-dlp/yt-dlp/commit/4e38e2ae9d7380015349e6aee59c78bb3938befd))
|
||||||
|
|
||||||
|
Adds support for HTTPS proxies and persistent connections (keep-alive)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **build**
|
||||||
|
- [Include secretstorage in Linux builds](https://github.com/yt-dlp/yt-dlp/commit/9970d74c8383432c6c8779aa47d3253dcf412b14) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Overhaul and unify release workflow](https://github.com/yt-dlp/yt-dlp/commit/1d03633c5a1621b9f3a756f0a4f9dc61fab3aeaa) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **ci**
|
||||||
|
- [Bump `actions/checkout` to v4](https://github.com/yt-dlp/yt-dlp/commit/5438593a35b7b042fc48fe29cad0b9039f07c9bb) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Run core tests with dependencies](https://github.com/yt-dlp/yt-dlp/commit/700444c23ddb65f618c2abd942acdc0c58c650b1) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- **cleanup**
|
||||||
|
- [Fix changelog typo](https://github.com/yt-dlp/yt-dlp/commit/a9d3f4b20a3533d2a40104c85bc2cc6c2564c800) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Update documentation for master and nightly channels](https://github.com/yt-dlp/yt-dlp/commit/a00af29853b8c7350ce086f4cab8c2c9cf2fcf1d) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- Miscellaneous: [b012271](https://github.com/yt-dlp/yt-dlp/commit/b012271d01b59759e4eefeab0308698cd9e7224c) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [dirkf](https://github.com/dirkf), [gamer191](https://github.com/gamer191), [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev)
|
||||||
|
- **test**: update: [Implement simple updater unit tests](https://github.com/yt-dlp/yt-dlp/commit/87264d4fdadcddd91289b968dd0e4bf58d449267) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
### 2023.10.13
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- [Ensure thumbnail output directory exists](https://github.com/yt-dlp/yt-dlp/commit/2acd1d555ef89851c73773776715d3de9a0e30b9) ([#7985](https://github.com/yt-dlp/yt-dlp/issues/7985)) by [Riteo](https://github.com/Riteo)
|
||||||
|
- **utils**
|
||||||
|
- `js_to_json`: [Fix `Date` constructor parsing](https://github.com/yt-dlp/yt-dlp/commit/9d7ded6419089c1bf252496073f73ad90ed71004) ([#8295](https://github.com/yt-dlp/yt-dlp/issues/8295)) by [awalgarg](https://github.com/awalgarg), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- `write_xattr`: [Use `os.setxattr` if available](https://github.com/yt-dlp/yt-dlp/commit/84e26038d4002e763ea51ca1bdce4f7e63c540bf) ([#8205](https://github.com/yt-dlp/yt-dlp/issues/8205)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- **artetv**: [Support age-restricted content](https://github.com/yt-dlp/yt-dlp/commit/09f815ad52843219a7ee3f2a0dddf6c250c91f0c) ([#8301](https://github.com/yt-dlp/yt-dlp/issues/8301)) by [StefanLobbenmeier](https://github.com/StefanLobbenmeier)
|
||||||
|
- **jtbc**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/b286ec68f1f28798b3e371f888a2ed97d399cf77) ([#8314](https://github.com/yt-dlp/yt-dlp/issues/8314)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **mbn**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/e030b6b6fba7b2f4614ad2ab9f7649d40a2dd305) ([#8312](https://github.com/yt-dlp/yt-dlp/issues/8312)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **nhk**: [Fix Japanese-language VOD extraction](https://github.com/yt-dlp/yt-dlp/commit/4de94b9e165bfd6421a692f5f2eabcdb08edcb71) ([#8309](https://github.com/yt-dlp/yt-dlp/issues/8309)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **radiko**: [Fix bug with `downloader_options`](https://github.com/yt-dlp/yt-dlp/commit/b9316642313bbc9e209ac0d2276d37ba60bceb49) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **tenplay**: [Add support for seasons](https://github.com/yt-dlp/yt-dlp/commit/88a99c87b680ae59002534a517e191f46c42cbd4) ([#7939](https://github.com/yt-dlp/yt-dlp/issues/7939)) by [midnightveil](https://github.com/midnightveil)
|
||||||
|
- **youku**: [Improve tudou.com support](https://github.com/yt-dlp/yt-dlp/commit/b7098d46b552a9322c6cea39ba80be5229f922de) ([#8160](https://github.com/yt-dlp/yt-dlp/issues/8160)) by [naginatana](https://github.com/naginatana)
|
||||||
|
- **youtube**: [Fix bug with `--extractor-retries inf`](https://github.com/yt-dlp/yt-dlp/commit/feebf6d02fc9651331eee2af5e08e6112288163b) ([#8328](https://github.com/yt-dlp/yt-dlp/issues/8328)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
|
||||||
|
#### Downloader changes
|
||||||
|
- **fragment**: [Improve progress calculation](https://github.com/yt-dlp/yt-dlp/commit/1c51c520f7b511ebd9e4eb7322285a8c31eedbbd) ([#8241](https://github.com/yt-dlp/yt-dlp/issues/8241)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **cleanup**: Miscellaneous: [b634ba7](https://github.com/yt-dlp/yt-dlp/commit/b634ba742d8f38ce9ecfa0546485728b0c6c59d1) by [bashonly](https://github.com/bashonly), [gamer191](https://github.com/gamer191)
|
||||||
|
|
||||||
### 2023.10.07
|
### 2023.10.07
|
||||||
|
|
||||||
#### Extractor changes
|
#### Extractor changes
|
||||||
|
|||||||
44
README.md
44
README.md
@@ -89,7 +89,6 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
|||||||
* Fix for [n-sig based throttling](https://github.com/ytdl-org/youtube-dl/issues/29326) **\***
|
* Fix for [n-sig based throttling](https://github.com/ytdl-org/youtube-dl/issues/29326) **\***
|
||||||
* Supports some (but not all) age-gated content without cookies
|
* Supports some (but not all) age-gated content without cookies
|
||||||
* Download livestreams from the start using `--live-from-start` (*experimental*)
|
* Download livestreams from the start using `--live-from-start` (*experimental*)
|
||||||
* `255kbps` audio is extracted (if available) from YouTube Music when premium cookies are given
|
|
||||||
* Channel URLs download all uploads of the channel, including shorts and live
|
* Channel URLs download all uploads of the channel, including shorts and live
|
||||||
|
|
||||||
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]`
|
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]`
|
||||||
@@ -122,7 +121,7 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
|||||||
|
|
||||||
* **Self updater**: The releases can be updated using `yt-dlp -U`, and downgraded using `--update-to` if required
|
* **Self updater**: The releases can be updated using `yt-dlp -U`, and downgraded using `--update-to` if required
|
||||||
|
|
||||||
* **Nightly builds**: [Automated nightly builds](#update-channels) can be used with `--update-to nightly`
|
* **Automated builds**: [Nightly/master builds](#update-channels) can be used with `--update-to nightly` and `--update-to master`
|
||||||
|
|
||||||
See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/commits) for the full list of changes
|
See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/commits) for the full list of changes
|
||||||
|
|
||||||
@@ -158,14 +157,16 @@ Some of yt-dlp's default options are different from that of youtube-dl and youtu
|
|||||||
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
||||||
* yt-dlp tries to parse the external downloader outputs into the standard progress output if possible (Currently implemented: [~~aria2c~~](https://github.com/yt-dlp/yt-dlp/issues/5931)). You can use `--compat-options no-external-downloader-progress` to get the downloader output as-is
|
* yt-dlp tries to parse the external downloader outputs into the standard progress output if possible (Currently implemented: [~~aria2c~~](https://github.com/yt-dlp/yt-dlp/issues/5931)). You can use `--compat-options no-external-downloader-progress` to get the downloader output as-is
|
||||||
* yt-dlp versions between 2021.09.01 and 2023.01.02 applies `--match-filter` to nested playlists. This was an unintentional side-effect of [8f18ac](https://github.com/yt-dlp/yt-dlp/commit/8f18aca8717bb0dd49054555af8d386e5eda3a88) and is fixed in [d7b460](https://github.com/yt-dlp/yt-dlp/commit/d7b460d0e5fc710950582baed2e3fc616ed98a80). Use `--compat-options playlist-match-filter` to revert this
|
* yt-dlp versions between 2021.09.01 and 2023.01.02 applies `--match-filter` to nested playlists. This was an unintentional side-effect of [8f18ac](https://github.com/yt-dlp/yt-dlp/commit/8f18aca8717bb0dd49054555af8d386e5eda3a88) and is fixed in [d7b460](https://github.com/yt-dlp/yt-dlp/commit/d7b460d0e5fc710950582baed2e3fc616ed98a80). Use `--compat-options playlist-match-filter` to revert this
|
||||||
|
* yt-dlp versions between 2021.11.10 and 2023.06.21 estimated `filesize_approx` values for fragmented/manifest formats. This was added for convenience in [f2fe69](https://github.com/yt-dlp/yt-dlp/commit/f2fe69c7b0d208bdb1f6292b4ae92bc1e1a7444a), but was reverted in [0dff8e](https://github.com/yt-dlp/yt-dlp/commit/0dff8e4d1e6e9fb938f4256ea9af7d81f42fd54f) due to the potentially extreme inaccuracy of the estimated values. Use `--compat-options manifest-filesize-approx` to keep extracting the estimated values
|
||||||
|
* yt-dlp uses modern http client backends such as `requests`. Use `--compat-options prefer-legacy-http-handler` to prefer the legacy http handler (`urllib`) to be used for standard http requests.
|
||||||
|
|
||||||
For ease of use, a few more compat options are available:
|
For ease of use, a few more compat options are available:
|
||||||
|
|
||||||
* `--compat-options all`: Use all compat options (Do NOT use)
|
* `--compat-options all`: Use all compat options (Do NOT use)
|
||||||
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter`
|
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter,-manifest-filesize-approx`
|
||||||
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter`
|
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter,-manifest-filesize-approx`
|
||||||
* `--compat-options 2021`: Same as `--compat-options 2022,no-certifi,filename-sanitization,no-youtube-prefer-utc-upload-date`
|
* `--compat-options 2021`: Same as `--compat-options 2022,no-certifi,filename-sanitization,no-youtube-prefer-utc-upload-date`
|
||||||
* `--compat-options 2022`: Same as `--compat-options playlist-match-filter,no-external-downloader-progress`. Use this to enable all future compat options
|
* `--compat-options 2022`: Same as `--compat-options playlist-match-filter,no-external-downloader-progress,prefer-legacy-http-handler,manifest-filesize-approx`. Use this to enable all future compat options
|
||||||
|
|
||||||
|
|
||||||
# INSTALLATION
|
# INSTALLATION
|
||||||
@@ -192,9 +193,11 @@ For other third-party package managers, see [the wiki](https://github.com/yt-dlp
|
|||||||
|
|
||||||
<a id="update-channels"/>
|
<a id="update-channels"/>
|
||||||
|
|
||||||
There are currently two release channels for binaries, `stable` and `nightly`.
|
There are currently three release channels for binaries: `stable`, `nightly` and `master`.
|
||||||
`stable` is the default channel, and many of its changes have been tested by users of the nightly channel.
|
|
||||||
The `nightly` channel has releases built after each push to the master branch, and will have the most recent fixes and additions, but also have more risk of regressions. They are available in [their own repo](https://github.com/yt-dlp/yt-dlp-nightly-builds/releases).
|
* `stable` is the default channel, and many of its changes have been tested by users of the `nightly` and `master` channels.
|
||||||
|
* The `nightly` channel has releases scheduled to build every day around midnight UTC, for a snapshot of the project's new patches and changes. This is the **recommended channel for regular users** of yt-dlp. The `nightly` releases are available from [yt-dlp/yt-dlp-nightly-builds](https://github.com/yt-dlp/yt-dlp-nightly-builds/releases) or as development releases of the `yt-dlp` PyPI package (which can be installed with pip's `--pre` flag).
|
||||||
|
* The `master` channel features releases that are built after each push to the master branch, and these will have the very latest fixes and additions, but may also be more prone to regressions. They are available from [yt-dlp/yt-dlp-master-builds](https://github.com/yt-dlp/yt-dlp-master-builds/releases).
|
||||||
|
|
||||||
When using `--update`/`-U`, a release binary will only update to its current channel.
|
When using `--update`/`-U`, a release binary will only update to its current channel.
|
||||||
`--update-to CHANNEL` can be used to switch to a different channel when a newer version is available. `--update-to [CHANNEL@]TAG` can also be used to upgrade or downgrade to specific tags from a channel.
|
`--update-to CHANNEL` can be used to switch to a different channel when a newer version is available. `--update-to [CHANNEL@]TAG` can also be used to upgrade or downgrade to specific tags from a channel.
|
||||||
@@ -202,10 +205,19 @@ When using `--update`/`-U`, a release binary will only update to its current cha
|
|||||||
You may also use `--update-to <repository>` (`<owner>/<repository>`) to update to a channel on a completely different repository. Be careful with what repository you are updating to though, there is no verification done for binaries from different repositories.
|
You may also use `--update-to <repository>` (`<owner>/<repository>`) to update to a channel on a completely different repository. Be careful with what repository you are updating to though, there is no verification done for binaries from different repositories.
|
||||||
|
|
||||||
Example usage:
|
Example usage:
|
||||||
* `yt-dlp --update-to nightly` change to `nightly` channel and update to its latest release
|
* `yt-dlp --update-to master` switch to the `master` channel and update to its latest release
|
||||||
* `yt-dlp --update-to stable@2023.02.17` upgrade/downgrade to release to `stable` channel tag `2023.02.17`
|
* `yt-dlp --update-to stable@2023.07.06` upgrade/downgrade to release to `stable` channel tag `2023.07.06`
|
||||||
* `yt-dlp --update-to 2023.01.06` upgrade/downgrade to tag `2023.01.06` if it exists on the current channel
|
* `yt-dlp --update-to 2023.10.07` upgrade/downgrade to tag `2023.10.07` if it exists on the current channel
|
||||||
* `yt-dlp --update-to example/yt-dlp@2023.03.01` upgrade/downgrade to the release from the `example/yt-dlp` repository, tag `2023.03.01`
|
* `yt-dlp --update-to example/yt-dlp@2023.09.24` upgrade/downgrade to the release from the `example/yt-dlp` repository, tag `2023.09.24`
|
||||||
|
|
||||||
|
**Important**: Any user experiencing an issue with the `stable` release should install or update to the `nightly` release before submitting a bug report:
|
||||||
|
```
|
||||||
|
# To update to nightly from stable executable/binary:
|
||||||
|
yt-dlp --update-to nightly
|
||||||
|
|
||||||
|
# To install nightly with pip:
|
||||||
|
python -m pip install -U --pre yt-dlp
|
||||||
|
```
|
||||||
|
|
||||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||||
## RELEASE FILES
|
## RELEASE FILES
|
||||||
@@ -275,12 +287,13 @@ While all the other dependencies are optional, `ffmpeg` and `ffprobe` are highly
|
|||||||
* [**certifi**](https://github.com/certifi/python-certifi)\* - Provides Mozilla's root certificate bundle. Licensed under [MPLv2](https://github.com/certifi/python-certifi/blob/master/LICENSE)
|
* [**certifi**](https://github.com/certifi/python-certifi)\* - Provides Mozilla's root certificate bundle. Licensed under [MPLv2](https://github.com/certifi/python-certifi/blob/master/LICENSE)
|
||||||
* [**brotli**](https://github.com/google/brotli)\* or [**brotlicffi**](https://github.com/python-hyper/brotlicffi) - [Brotli](https://en.wikipedia.org/wiki/Brotli) content encoding support. Both licensed under MIT <sup>[1](https://github.com/google/brotli/blob/master/LICENSE) [2](https://github.com/python-hyper/brotlicffi/blob/master/LICENSE) </sup>
|
* [**brotli**](https://github.com/google/brotli)\* or [**brotlicffi**](https://github.com/python-hyper/brotlicffi) - [Brotli](https://en.wikipedia.org/wiki/Brotli) content encoding support. Both licensed under MIT <sup>[1](https://github.com/google/brotli/blob/master/LICENSE) [2](https://github.com/python-hyper/brotlicffi/blob/master/LICENSE) </sup>
|
||||||
* [**websockets**](https://github.com/aaugustin/websockets)\* - For downloading over websocket. Licensed under [BSD-3-Clause](https://github.com/aaugustin/websockets/blob/main/LICENSE)
|
* [**websockets**](https://github.com/aaugustin/websockets)\* - For downloading over websocket. Licensed under [BSD-3-Clause](https://github.com/aaugustin/websockets/blob/main/LICENSE)
|
||||||
|
* [**requests**](https://github.com/psf/requests)\* - HTTP library. For HTTPS proxy and persistent connections support. Licensed under [Apache-2.0](https://github.com/psf/requests/blob/main/LICENSE)
|
||||||
|
|
||||||
### Metadata
|
### Metadata
|
||||||
|
|
||||||
* [**mutagen**](https://github.com/quodlibet/mutagen)\* - For `--embed-thumbnail` in certain formats. Licensed under [GPLv2+](https://github.com/quodlibet/mutagen/blob/master/COPYING)
|
* [**mutagen**](https://github.com/quodlibet/mutagen)\* - For `--embed-thumbnail` in certain formats. Licensed under [GPLv2+](https://github.com/quodlibet/mutagen/blob/master/COPYING)
|
||||||
* [**AtomicParsley**](https://github.com/wez/atomicparsley) - For `--embed-thumbnail` in `mp4`/`m4a` files when `mutagen`/`ffmpeg` cannot. Licensed under [GPLv2+](https://github.com/wez/atomicparsley/blob/master/COPYING)
|
* [**AtomicParsley**](https://github.com/wez/atomicparsley) - For `--embed-thumbnail` in `mp4`/`m4a` files when `mutagen`/`ffmpeg` cannot. Licensed under [GPLv2+](https://github.com/wez/atomicparsley/blob/master/COPYING)
|
||||||
* [**xattr**](https://github.com/xattr/xattr), [**pyxattr**](https://github.com/iustin/pyxattr) or [**setfattr**](http://savannah.nongnu.org/projects/attr) - For writing xattr metadata (`--xattr`) on **Linux**. Licensed under [MIT](https://github.com/xattr/xattr/blob/master/LICENSE.txt), [LGPL2.1](https://github.com/iustin/pyxattr/blob/master/COPYING) and [GPLv2+](http://git.savannah.nongnu.org/cgit/attr.git/tree/doc/COPYING) respectively
|
* [**xattr**](https://github.com/xattr/xattr), [**pyxattr**](https://github.com/iustin/pyxattr) or [**setfattr**](http://savannah.nongnu.org/projects/attr) - For writing xattr metadata (`--xattr`) on **Mac** and **BSD**. Licensed under [MIT](https://github.com/xattr/xattr/blob/master/LICENSE.txt), [LGPL2.1](https://github.com/iustin/pyxattr/blob/master/COPYING) and [GPLv2+](http://git.savannah.nongnu.org/cgit/attr.git/tree/doc/COPYING) respectively
|
||||||
|
|
||||||
### Misc
|
### Misc
|
||||||
|
|
||||||
@@ -367,7 +380,8 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
|
|||||||
CHANNEL can be a repository as well. CHANNEL
|
CHANNEL can be a repository as well. CHANNEL
|
||||||
and TAG default to "stable" and "latest"
|
and TAG default to "stable" and "latest"
|
||||||
respectively if omitted; See "UPDATE" for
|
respectively if omitted; See "UPDATE" for
|
||||||
details. Supported channels: stable, nightly
|
details. Supported channels: stable,
|
||||||
|
nightly, master
|
||||||
-i, --ignore-errors Ignore download and postprocessing errors.
|
-i, --ignore-errors Ignore download and postprocessing errors.
|
||||||
The download will be considered successful
|
The download will be considered successful
|
||||||
even if the postprocessing fails
|
even if the postprocessing fails
|
||||||
@@ -913,7 +927,7 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
|
|||||||
Defaults to ~/.netrc
|
Defaults to ~/.netrc
|
||||||
--netrc-cmd NETRC_CMD Command to execute to get the credentials
|
--netrc-cmd NETRC_CMD Command to execute to get the credentials
|
||||||
for an extractor.
|
for an extractor.
|
||||||
--video-password PASSWORD Video password (vimeo, youku)
|
--video-password PASSWORD Video-specific password
|
||||||
--ap-mso MSO Adobe Pass multiple-system operator (TV
|
--ap-mso MSO Adobe Pass multiple-system operator (TV
|
||||||
provider) identifier, use --ap-list-mso for
|
provider) identifier, use --ap-list-mso for
|
||||||
a list of available MSOs
|
a list of available MSOs
|
||||||
|
|||||||
@@ -98,5 +98,21 @@
|
|||||||
"action": "add",
|
"action": "add",
|
||||||
"when": "61bdf15fc7400601c3da1aa7a43917310a5bf391",
|
"when": "61bdf15fc7400601c3da1aa7a43917310a5bf391",
|
||||||
"short": "[priority] Security: [[CVE-2023-40581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-40581)] [Prevent RCE when using `--exec` with `%q` on Windows](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-42h4-v29r-42qg)\n - The shell escape function is now using `\"\"` instead of `\\\"`.\n - `utils.Popen` has been patched to properly quote commands."
|
"short": "[priority] Security: [[CVE-2023-40581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-40581)] [Prevent RCE when using `--exec` with `%q` on Windows](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-42h4-v29r-42qg)\n - The shell escape function is now using `\"\"` instead of `\\\"`.\n - `utils.Popen` has been patched to properly quote commands."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "8a8b54523addf46dfd50ef599761a81bc22362e6",
|
||||||
|
"short": "[rh:requests] Add handler for `requests` HTTP library (#3668)\n\n\tAdds support for HTTPS proxies and persistent connections (keep-alive)",
|
||||||
|
"authors": ["bashonly", "coletdjnz", "Grub4K"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "add",
|
||||||
|
"when": "1d03633c5a1621b9f3a756f0a4f9dc61fab3aeaa",
|
||||||
|
"short": "[priority] **The release channels have been adjusted!**\n\t* [`master`](https://github.com/yt-dlp/yt-dlp-master-builds) builds are made after each push, containing the latest fixes (but also possibly bugs). This was previously the `nightly` channel.\n\t* [`nightly`](https://github.com/yt-dlp/yt-dlp-nightly-builds) builds are now made once a day, if there were any changes."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "add",
|
||||||
|
"when": "f04b5bedad7b281bee9814686bba1762bae092eb",
|
||||||
|
"short": "[priority] Security: [[CVE-2023-46121](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-46121)] Patch [Generic Extractor MITM Vulnerability via Arbitrary Proxy Injection](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-3ch3-jhc6-5r8x)\n\t- Disallow smuggling of arbitrary `http_headers`; extractors now only use specific headers"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -56,6 +56,7 @@ class CommitGroup(enum.Enum):
|
|||||||
},
|
},
|
||||||
cls.MISC: {
|
cls.MISC: {
|
||||||
'build',
|
'build',
|
||||||
|
'ci',
|
||||||
'cleanup',
|
'cleanup',
|
||||||
'devscripts',
|
'devscripts',
|
||||||
'docs',
|
'docs',
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ import re
|
|||||||
from devscripts.utils import (
|
from devscripts.utils import (
|
||||||
get_filename_args,
|
get_filename_args,
|
||||||
read_file,
|
read_file,
|
||||||
read_version,
|
|
||||||
write_file,
|
write_file,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,19 +34,18 @@ VERBOSE_TMPL = '''
|
|||||||
description: |
|
description: |
|
||||||
It should start like this:
|
It should start like this:
|
||||||
placeholder: |
|
placeholder: |
|
||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version %(version)s [9d339c4] (win32_exe)
|
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
|
||||||
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
|
||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Request Handlers: urllib, requests
|
||||||
Latest version: %(version)s, Current version: %(version)s
|
[debug] Loaded 1893 extractors
|
||||||
yt-dlp is up to date (%(version)s)
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
|
||||||
|
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
|
||||||
|
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
@@ -66,7 +64,7 @@ NO_SKIP = '''
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
fields = {'version': read_version(), 'no_skip': NO_SKIP}
|
fields = {'no_skip': NO_SKIP}
|
||||||
fields['verbose'] = VERBOSE_TMPL % fields
|
fields['verbose'] = VERBOSE_TMPL % fields
|
||||||
fields['verbose_optional'] = re.sub(r'(\n\s+validations:)?\n\s+required: true', '', fields['verbose'])
|
fields['verbose_optional'] = re.sub(r'(\n\s+validations:)?\n\s+required: true', '', fields['verbose'])
|
||||||
|
|
||||||
|
|||||||
@@ -1,39 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
"""
|
|
||||||
Usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
|
|
||||||
version can be either 0-aligned (yt-dlp version) or normalized (PyPi version)
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Allow direct execution
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
|
|
||||||
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import urllib.request
|
|
||||||
|
|
||||||
from devscripts.utils import read_file, write_file
|
|
||||||
|
|
||||||
filename, version = sys.argv[1:]
|
|
||||||
|
|
||||||
normalized_version = '.'.join(str(int(x)) for x in version.split('.'))
|
|
||||||
|
|
||||||
pypi_release = json.loads(urllib.request.urlopen(
|
|
||||||
'https://pypi.org/pypi/yt-dlp/%s/json' % normalized_version
|
|
||||||
).read().decode())
|
|
||||||
|
|
||||||
tarball_file = next(x for x in pypi_release['urls'] if x['filename'].endswith('.tar.gz'))
|
|
||||||
|
|
||||||
sha256sum = tarball_file['digests']['sha256']
|
|
||||||
url = tarball_file['url']
|
|
||||||
|
|
||||||
formulae_text = read_file(filename)
|
|
||||||
|
|
||||||
formulae_text = re.sub(r'sha256 "[0-9a-f]*?"', 'sha256 "%s"' % sha256sum, formulae_text, count=1)
|
|
||||||
formulae_text = re.sub(r'url "[^"]*?"', 'url "%s"' % url, formulae_text, count=1)
|
|
||||||
|
|
||||||
write_file(filename, formulae_text)
|
|
||||||
@@ -20,7 +20,7 @@ def get_new_version(version, revision):
|
|||||||
version = datetime.now(timezone.utc).strftime('%Y.%m.%d')
|
version = datetime.now(timezone.utc).strftime('%Y.%m.%d')
|
||||||
|
|
||||||
if revision:
|
if revision:
|
||||||
assert revision.isdigit(), 'Revision must be a number'
|
assert revision.isdecimal(), 'Revision must be a number'
|
||||||
else:
|
else:
|
||||||
old_version = read_version().split('.')
|
old_version = read_version().split('.')
|
||||||
if version.split('.') == old_version[:3]:
|
if version.split('.') == old_version[:3]:
|
||||||
@@ -46,6 +46,10 @@ VARIANT = None
|
|||||||
UPDATE_HINT = None
|
UPDATE_HINT = None
|
||||||
|
|
||||||
CHANNEL = {channel!r}
|
CHANNEL = {channel!r}
|
||||||
|
|
||||||
|
ORIGIN = {origin!r}
|
||||||
|
|
||||||
|
_pkg_version = {package_version!r}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
@@ -53,6 +57,12 @@ if __name__ == '__main__':
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-c', '--channel', default='stable',
|
'-c', '--channel', default='stable',
|
||||||
help='Select update channel (default: %(default)s)')
|
help='Select update channel (default: %(default)s)')
|
||||||
|
parser.add_argument(
|
||||||
|
'-r', '--origin', default='local',
|
||||||
|
help='Select origin/repository (default: %(default)s)')
|
||||||
|
parser.add_argument(
|
||||||
|
'-s', '--suffix', default='',
|
||||||
|
help='Add an alphanumeric suffix to the package version, e.g. "dev"')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-o', '--output', default='yt_dlp/version.py',
|
'-o', '--output', default='yt_dlp/version.py',
|
||||||
help='The output file to write to (default: %(default)s)')
|
help='The output file to write to (default: %(default)s)')
|
||||||
@@ -66,6 +76,7 @@ if __name__ == '__main__':
|
|||||||
args.version if args.version and '.' in args.version
|
args.version if args.version and '.' in args.version
|
||||||
else get_new_version(None, args.version))
|
else get_new_version(None, args.version))
|
||||||
write_file(args.output, VERSION_TEMPLATE.format(
|
write_file(args.output, VERSION_TEMPLATE.format(
|
||||||
version=version, git_head=git_head, channel=args.channel))
|
version=version, git_head=git_head, channel=args.channel, origin=args.origin,
|
||||||
|
package_version=f'{version}{args.suffix}'))
|
||||||
|
|
||||||
print(f'version={version} ({args.channel}), head={git_head}')
|
print(f'version={version} ({args.channel}), head={git_head}')
|
||||||
|
|||||||
@@ -13,10 +13,11 @@ def write_file(fname, content, mode='w'):
|
|||||||
return f.write(content)
|
return f.write(content)
|
||||||
|
|
||||||
|
|
||||||
def read_version(fname='yt_dlp/version.py'):
|
def read_version(fname='yt_dlp/version.py', varname='__version__'):
|
||||||
"""Get the version without importing the package"""
|
"""Get the version without importing the package"""
|
||||||
exec(compile(read_file(fname), fname, 'exec'))
|
items = {}
|
||||||
return locals()['__version__']
|
exec(compile(read_file(fname), fname, 'exec'), items)
|
||||||
|
return items[varname]
|
||||||
|
|
||||||
|
|
||||||
def get_filename_args(has_infile=False, default_outfile=None):
|
def get_filename_args(has_infile=False, default_outfile=None):
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
mutagen
|
mutagen
|
||||||
pycryptodomex
|
pycryptodomex
|
||||||
websockets
|
websockets
|
||||||
brotli; platform_python_implementation=='CPython'
|
brotli; implementation_name=='cpython'
|
||||||
brotlicffi; platform_python_implementation!='CPython'
|
brotlicffi; implementation_name!='cpython'
|
||||||
certifi
|
certifi
|
||||||
|
requests>=2.31.0,<3
|
||||||
|
urllib3>=1.26.17,<3
|
||||||
|
secretstorage; sys_platform=='linux' and (implementation_name!='pypy' or implementation_version>='7.3.10')
|
||||||
|
|||||||
13
setup.py
13
setup.py
@@ -18,7 +18,7 @@ except ImportError:
|
|||||||
|
|
||||||
from devscripts.utils import read_file, read_version
|
from devscripts.utils import read_file, read_version
|
||||||
|
|
||||||
VERSION = read_version()
|
VERSION = read_version(varname='_pkg_version')
|
||||||
|
|
||||||
DESCRIPTION = 'A youtube-dl fork with additional features and patches'
|
DESCRIPTION = 'A youtube-dl fork with additional features and patches'
|
||||||
|
|
||||||
@@ -62,7 +62,14 @@ def py2exe_params():
|
|||||||
'compressed': 1,
|
'compressed': 1,
|
||||||
'optimize': 2,
|
'optimize': 2,
|
||||||
'dist_dir': './dist',
|
'dist_dir': './dist',
|
||||||
'excludes': ['Crypto', 'Cryptodome'], # py2exe cannot import Crypto
|
'excludes': [
|
||||||
|
# py2exe cannot import Crypto
|
||||||
|
'Crypto',
|
||||||
|
'Cryptodome',
|
||||||
|
# py2exe appears to confuse this with our socks library.
|
||||||
|
# We don't use pysocks and urllib3.contrib.socks would fail to import if tried.
|
||||||
|
'urllib3.contrib.socks'
|
||||||
|
],
|
||||||
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
|
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
|
||||||
# Modules that are only imported dynamically must be added here
|
# Modules that are only imported dynamically must be added here
|
||||||
'includes': ['yt_dlp.compat._legacy', 'yt_dlp.compat._deprecated',
|
'includes': ['yt_dlp.compat._legacy', 'yt_dlp.compat._deprecated',
|
||||||
@@ -135,7 +142,7 @@ def main():
|
|||||||
params = build_params()
|
params = build_params()
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='yt-dlp',
|
name='yt-dlp', # package name (do not change/remove comment)
|
||||||
version=VERSION,
|
version=VERSION,
|
||||||
maintainer='pukkandan',
|
maintainer='pukkandan',
|
||||||
maintainer_email='pukkandan.ytdlp@gmail.com',
|
maintainer_email='pukkandan.ytdlp@gmail.com',
|
||||||
|
|||||||
@@ -657,6 +657,8 @@
|
|||||||
- **Joj**
|
- **Joj**
|
||||||
- **Jove**
|
- **Jove**
|
||||||
- **JStream**
|
- **JStream**
|
||||||
|
- **JTBC**: jtbc.co.kr
|
||||||
|
- **JTBC:program**
|
||||||
- **JWPlatform**
|
- **JWPlatform**
|
||||||
- **Kakao**
|
- **Kakao**
|
||||||
- **Kaltura**
|
- **Kaltura**
|
||||||
@@ -698,6 +700,7 @@
|
|||||||
- **LastFM**
|
- **LastFM**
|
||||||
- **LastFMPlaylist**
|
- **LastFMPlaylist**
|
||||||
- **LastFMUser**
|
- **LastFMUser**
|
||||||
|
- **LaXarxaMes**: [*laxarxames*](## "netrc machine")
|
||||||
- **lbry**
|
- **lbry**
|
||||||
- **lbry:channel**
|
- **lbry:channel**
|
||||||
- **lbry:playlist**
|
- **lbry:playlist**
|
||||||
@@ -766,6 +769,7 @@
|
|||||||
- **massengeschmack.tv**
|
- **massengeschmack.tv**
|
||||||
- **Masters**
|
- **Masters**
|
||||||
- **MatchTV**
|
- **MatchTV**
|
||||||
|
- **MBN**: mbn.co.kr (매일방송)
|
||||||
- **MDR**: MDR.DE and KiKA
|
- **MDR**: MDR.DE and KiKA
|
||||||
- **MedalTV**
|
- **MedalTV**
|
||||||
- **media.ccc.de**
|
- **media.ccc.de**
|
||||||
@@ -1023,6 +1027,7 @@
|
|||||||
- **on24**: ON24
|
- **on24**: ON24
|
||||||
- **OnDemandChinaEpisode**
|
- **OnDemandChinaEpisode**
|
||||||
- **OnDemandKorea**
|
- **OnDemandKorea**
|
||||||
|
- **OnDemandKoreaProgram**
|
||||||
- **OneFootball**
|
- **OneFootball**
|
||||||
- **OnePlacePodcast**
|
- **OnePlacePodcast**
|
||||||
- **onet.pl**
|
- **onet.pl**
|
||||||
@@ -1040,6 +1045,7 @@
|
|||||||
- **OraTV**
|
- **OraTV**
|
||||||
- **orf:fm4:story**: fm4.orf.at stories
|
- **orf:fm4:story**: fm4.orf.at stories
|
||||||
- **orf:iptv**: iptv.ORF.at
|
- **orf:iptv**: iptv.ORF.at
|
||||||
|
- **orf:podcast**
|
||||||
- **orf:radio**
|
- **orf:radio**
|
||||||
- **orf:tvthek**: ORF TVthek
|
- **orf:tvthek**: ORF TVthek
|
||||||
- **OsnatelTV**: [*osnateltv*](## "netrc machine")
|
- **OsnatelTV**: [*osnateltv*](## "netrc machine")
|
||||||
@@ -1177,6 +1183,8 @@
|
|||||||
- **radiobremen**
|
- **radiobremen**
|
||||||
- **radiocanada**
|
- **radiocanada**
|
||||||
- **radiocanada:audiovideo**
|
- **radiocanada:audiovideo**
|
||||||
|
- **RadioComercial**
|
||||||
|
- **RadioComercialPlaylist**
|
||||||
- **radiofrance**
|
- **radiofrance**
|
||||||
- **RadioFranceLive**
|
- **RadioFranceLive**
|
||||||
- **RadioFrancePodcast**
|
- **RadioFrancePodcast**
|
||||||
@@ -1303,6 +1311,9 @@
|
|||||||
- **Sapo**: SAPO Vídeos
|
- **Sapo**: SAPO Vídeos
|
||||||
- **savefrom.net**
|
- **savefrom.net**
|
||||||
- **SBS**: sbs.com.au
|
- **SBS**: sbs.com.au
|
||||||
|
- **sbs.co.kr**
|
||||||
|
- **sbs.co.kr:allvod_program**
|
||||||
|
- **sbs.co.kr:programs_vod**
|
||||||
- **schooltv**
|
- **schooltv**
|
||||||
- **ScienceChannel**
|
- **ScienceChannel**
|
||||||
- **screen.yahoo:search**: Yahoo screen search; "yvsearch:" prefix
|
- **screen.yahoo:search**: Yahoo screen search; "yvsearch:" prefix
|
||||||
@@ -1468,8 +1479,11 @@
|
|||||||
- **Tempo**
|
- **Tempo**
|
||||||
- **TennisTV**: [*tennistv*](## "netrc machine")
|
- **TennisTV**: [*tennistv*](## "netrc machine")
|
||||||
- **TenPlay**: [*10play*](## "netrc machine")
|
- **TenPlay**: [*10play*](## "netrc machine")
|
||||||
|
- **TenPlaySeason**
|
||||||
- **TF1**
|
- **TF1**
|
||||||
- **TFO**
|
- **TFO**
|
||||||
|
- **theatercomplextown:ppv**: [*theatercomplextown*](## "netrc machine")
|
||||||
|
- **theatercomplextown:vod**: [*theatercomplextown*](## "netrc machine")
|
||||||
- **TheHoleTv**
|
- **TheHoleTv**
|
||||||
- **TheIntercept**
|
- **TheIntercept**
|
||||||
- **ThePlatform**
|
- **ThePlatform**
|
||||||
@@ -1478,8 +1492,7 @@
|
|||||||
- **TheSun**
|
- **TheSun**
|
||||||
- **TheWeatherChannel**
|
- **TheWeatherChannel**
|
||||||
- **ThisAmericanLife**
|
- **ThisAmericanLife**
|
||||||
- **ThisAV**
|
- **ThisOldHouse**: [*thisoldhouse*](## "netrc machine")
|
||||||
- **ThisOldHouse**
|
|
||||||
- **ThisVid**
|
- **ThisVid**
|
||||||
- **ThisVidMember**
|
- **ThisVidMember**
|
||||||
- **ThisVidPlaylist**
|
- **ThisVidPlaylist**
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ from http.cookiejar import CookieJar
|
|||||||
|
|
||||||
from test.helper import FakeYDL, http_server_port
|
from test.helper import FakeYDL, http_server_port
|
||||||
from yt_dlp.cookies import YoutubeDLCookieJar
|
from yt_dlp.cookies import YoutubeDLCookieJar
|
||||||
from yt_dlp.dependencies import brotli
|
from yt_dlp.dependencies import brotli, requests, urllib3
|
||||||
from yt_dlp.networking import (
|
from yt_dlp.networking import (
|
||||||
HEADRequest,
|
HEADRequest,
|
||||||
PUTRequest,
|
PUTRequest,
|
||||||
@@ -43,6 +43,7 @@ from yt_dlp.networking.exceptions import (
|
|||||||
HTTPError,
|
HTTPError,
|
||||||
IncompleteRead,
|
IncompleteRead,
|
||||||
NoSupportingHandlers,
|
NoSupportingHandlers,
|
||||||
|
ProxyError,
|
||||||
RequestError,
|
RequestError,
|
||||||
SSLError,
|
SSLError,
|
||||||
TransportError,
|
TransportError,
|
||||||
@@ -305,7 +306,7 @@ class TestRequestHandlerBase:
|
|||||||
|
|
||||||
|
|
||||||
class TestHTTPRequestHandler(TestRequestHandlerBase):
|
class TestHTTPRequestHandler(TestRequestHandlerBase):
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_verify_cert(self, handler):
|
def test_verify_cert(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(CertificateVerifyError):
|
with pytest.raises(CertificateVerifyError):
|
||||||
@@ -316,7 +317,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
assert r.status == 200
|
assert r.status == 200
|
||||||
r.close()
|
r.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_ssl_error(self, handler):
|
def test_ssl_error(self, handler):
|
||||||
# HTTPS server with too old TLS version
|
# HTTPS server with too old TLS version
|
||||||
# XXX: is there a better way to test this than to create a new server?
|
# XXX: is there a better way to test this than to create a new server?
|
||||||
@@ -334,7 +335,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
validate_and_send(rh, Request(f'https://127.0.0.1:{https_port}/headers'))
|
validate_and_send(rh, Request(f'https://127.0.0.1:{https_port}/headers'))
|
||||||
assert not issubclass(exc_info.type, CertificateVerifyError)
|
assert not issubclass(exc_info.type, CertificateVerifyError)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_percent_encode(self, handler):
|
def test_percent_encode(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# Unicode characters should be encoded with uppercase percent-encoding
|
# Unicode characters should be encoded with uppercase percent-encoding
|
||||||
@@ -346,7 +347,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
assert res.status == 200
|
assert res.status == 200
|
||||||
res.close()
|
res.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_remove_dot_segments(self, handler):
|
def test_remove_dot_segments(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# This isn't a comprehensive test,
|
# This isn't a comprehensive test,
|
||||||
@@ -361,14 +362,14 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
assert res.url == f'http://127.0.0.1:{self.http_port}/headers'
|
assert res.url == f'http://127.0.0.1:{self.http_port}/headers'
|
||||||
res.close()
|
res.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_unicode_path_redirection(self, handler):
|
def test_unicode_path_redirection(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
r = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/302-non-ascii-redirect'))
|
r = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/302-non-ascii-redirect'))
|
||||||
assert r.url == f'http://127.0.0.1:{self.http_port}/%E4%B8%AD%E6%96%87.html'
|
assert r.url == f'http://127.0.0.1:{self.http_port}/%E4%B8%AD%E6%96%87.html'
|
||||||
r.close()
|
r.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_raise_http_error(self, handler):
|
def test_raise_http_error(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
for bad_status in (400, 500, 599, 302):
|
for bad_status in (400, 500, 599, 302):
|
||||||
@@ -378,7 +379,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
# Should not raise an error
|
# Should not raise an error
|
||||||
validate_and_send(rh, Request('http://127.0.0.1:%d/gen_200' % self.http_port)).close()
|
validate_and_send(rh, Request('http://127.0.0.1:%d/gen_200' % self.http_port)).close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_response_url(self, handler):
|
def test_response_url(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# Response url should be that of the last url in redirect chain
|
# Response url should be that of the last url in redirect chain
|
||||||
@@ -389,7 +390,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
assert res2.url == f'http://127.0.0.1:{self.http_port}/gen_200'
|
assert res2.url == f'http://127.0.0.1:{self.http_port}/gen_200'
|
||||||
res2.close()
|
res2.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_redirect(self, handler):
|
def test_redirect(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
def do_req(redirect_status, method, assert_no_content=False):
|
def do_req(redirect_status, method, assert_no_content=False):
|
||||||
@@ -444,7 +445,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
with pytest.raises(HTTPError):
|
with pytest.raises(HTTPError):
|
||||||
do_req(code, 'GET')
|
do_req(code, 'GET')
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_request_cookie_header(self, handler):
|
def test_request_cookie_header(self, handler):
|
||||||
# We should accept a Cookie header being passed as in normal headers and handle it appropriately.
|
# We should accept a Cookie header being passed as in normal headers and handle it appropriately.
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
@@ -476,19 +477,19 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
assert b'Cookie: test=ytdlp' not in data
|
assert b'Cookie: test=ytdlp' not in data
|
||||||
assert b'Cookie: test=test' in data
|
assert b'Cookie: test=test' in data
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_redirect_loop(self, handler):
|
def test_redirect_loop(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(HTTPError, match='redirect loop'):
|
with pytest.raises(HTTPError, match='redirect loop'):
|
||||||
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_loop'))
|
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_loop'))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_incompleteread(self, handler):
|
def test_incompleteread(self, handler):
|
||||||
with handler(timeout=2) as rh:
|
with handler(timeout=2) as rh:
|
||||||
with pytest.raises(IncompleteRead):
|
with pytest.raises(IncompleteRead):
|
||||||
validate_and_send(rh, Request('http://127.0.0.1:%d/incompleteread' % self.http_port)).read()
|
validate_and_send(rh, Request('http://127.0.0.1:%d/incompleteread' % self.http_port)).read()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_cookies(self, handler):
|
def test_cookies(self, handler):
|
||||||
cookiejar = YoutubeDLCookieJar()
|
cookiejar = YoutubeDLCookieJar()
|
||||||
cookiejar.set_cookie(http.cookiejar.Cookie(
|
cookiejar.set_cookie(http.cookiejar.Cookie(
|
||||||
@@ -505,7 +506,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions={'cookiejar': cookiejar})).read()
|
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions={'cookiejar': cookiejar})).read()
|
||||||
assert b'Cookie: test=ytdlp' in data
|
assert b'Cookie: test=ytdlp' in data
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_headers(self, handler):
|
def test_headers(self, handler):
|
||||||
|
|
||||||
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
||||||
@@ -521,7 +522,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
assert b'Test2: test2' not in data
|
assert b'Test2: test2' not in data
|
||||||
assert b'Test3: test3' in data
|
assert b'Test3: test3' in data
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_timeout(self, handler):
|
def test_timeout(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# Default timeout is 20 seconds, so this should go through
|
# Default timeout is 20 seconds, so this should go through
|
||||||
@@ -537,7 +538,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
validate_and_send(
|
validate_and_send(
|
||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1', extensions={'timeout': 4}))
|
rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1', extensions={'timeout': 4}))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_source_address(self, handler):
|
def test_source_address(self, handler):
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
with handler(source_address=source_address) as rh:
|
with handler(source_address=source_address) as rh:
|
||||||
@@ -545,13 +546,13 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/source_address')).read().decode()
|
rh, Request(f'http://127.0.0.1:{self.http_port}/source_address')).read().decode()
|
||||||
assert source_address == data
|
assert source_address == data
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_gzip_trailing_garbage(self, handler):
|
def test_gzip_trailing_garbage(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
data = validate_and_send(rh, Request(f'http://localhost:{self.http_port}/trailing_garbage')).read().decode()
|
data = validate_and_send(rh, Request(f'http://localhost:{self.http_port}/trailing_garbage')).read().decode()
|
||||||
assert data == '<html><video src="/vid.mp4" /></html>'
|
assert data == '<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
@pytest.mark.skipif(not brotli, reason='brotli support is not installed')
|
@pytest.mark.skipif(not brotli, reason='brotli support is not installed')
|
||||||
def test_brotli(self, handler):
|
def test_brotli(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
@@ -562,7 +563,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
assert res.headers.get('Content-Encoding') == 'br'
|
assert res.headers.get('Content-Encoding') == 'br'
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_deflate(self, handler):
|
def test_deflate(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
@@ -572,7 +573,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
assert res.headers.get('Content-Encoding') == 'deflate'
|
assert res.headers.get('Content-Encoding') == 'deflate'
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_gzip(self, handler):
|
def test_gzip(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
@@ -582,7 +583,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
assert res.headers.get('Content-Encoding') == 'gzip'
|
assert res.headers.get('Content-Encoding') == 'gzip'
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_multiple_encodings(self, handler):
|
def test_multiple_encodings(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'):
|
for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'):
|
||||||
@@ -593,7 +594,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
assert res.headers.get('Content-Encoding') == pair
|
assert res.headers.get('Content-Encoding') == pair
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_unsupported_encoding(self, handler):
|
def test_unsupported_encoding(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
@@ -603,7 +604,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
|
|||||||
assert res.headers.get('Content-Encoding') == 'unsupported'
|
assert res.headers.get('Content-Encoding') == 'unsupported'
|
||||||
assert res.read() == b'raw'
|
assert res.read() == b'raw'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_read(self, handler):
|
def test_read(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
@@ -633,7 +634,7 @@ class TestHTTPProxy(TestRequestHandlerBase):
|
|||||||
cls.geo_proxy_thread.daemon = True
|
cls.geo_proxy_thread.daemon = True
|
||||||
cls.geo_proxy_thread.start()
|
cls.geo_proxy_thread.start()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_http_proxy(self, handler):
|
def test_http_proxy(self, handler):
|
||||||
http_proxy = f'http://127.0.0.1:{self.proxy_port}'
|
http_proxy = f'http://127.0.0.1:{self.proxy_port}'
|
||||||
geo_proxy = f'http://127.0.0.1:{self.geo_port}'
|
geo_proxy = f'http://127.0.0.1:{self.geo_port}'
|
||||||
@@ -659,7 +660,7 @@ class TestHTTPProxy(TestRequestHandlerBase):
|
|||||||
assert res != f'normal: {real_url}'
|
assert res != f'normal: {real_url}'
|
||||||
assert 'Accept' in res
|
assert 'Accept' in res
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_noproxy(self, handler):
|
def test_noproxy(self, handler):
|
||||||
with handler(proxies={'proxy': f'http://127.0.0.1:{self.proxy_port}'}) as rh:
|
with handler(proxies={'proxy': f'http://127.0.0.1:{self.proxy_port}'}) as rh:
|
||||||
# NO_PROXY
|
# NO_PROXY
|
||||||
@@ -669,7 +670,7 @@ class TestHTTPProxy(TestRequestHandlerBase):
|
|||||||
'utf-8')
|
'utf-8')
|
||||||
assert 'Accept' in nop_response
|
assert 'Accept' in nop_response
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_allproxy(self, handler):
|
def test_allproxy(self, handler):
|
||||||
url = 'http://foo.com/bar'
|
url = 'http://foo.com/bar'
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
@@ -677,7 +678,7 @@ class TestHTTPProxy(TestRequestHandlerBase):
|
|||||||
'utf-8')
|
'utf-8')
|
||||||
assert response == f'normal: {url}'
|
assert response == f'normal: {url}'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_http_proxy_with_idn(self, handler):
|
def test_http_proxy_with_idn(self, handler):
|
||||||
with handler(proxies={
|
with handler(proxies={
|
||||||
'http': f'http://127.0.0.1:{self.proxy_port}',
|
'http': f'http://127.0.0.1:{self.proxy_port}',
|
||||||
@@ -715,27 +716,27 @@ class TestClientCertificate:
|
|||||||
) as rh:
|
) as rh:
|
||||||
validate_and_send(rh, Request(f'https://127.0.0.1:{self.port}/video.html')).read().decode()
|
validate_and_send(rh, Request(f'https://127.0.0.1:{self.port}/video.html')).read().decode()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_certificate_combined_nopass(self, handler):
|
def test_certificate_combined_nopass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'clientwithkey.crt'),
|
'client_certificate': os.path.join(self.certdir, 'clientwithkey.crt'),
|
||||||
})
|
})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_certificate_nocombined_nopass(self, handler):
|
def test_certificate_nocombined_nopass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
||||||
'client_certificate_key': os.path.join(self.certdir, 'client.key'),
|
'client_certificate_key': os.path.join(self.certdir, 'client.key'),
|
||||||
})
|
})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_certificate_combined_pass(self, handler):
|
def test_certificate_combined_pass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'clientwithencryptedkey.crt'),
|
'client_certificate': os.path.join(self.certdir, 'clientwithencryptedkey.crt'),
|
||||||
'client_certificate_password': 'foobar',
|
'client_certificate_password': 'foobar',
|
||||||
})
|
})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_certificate_nocombined_pass(self, handler):
|
def test_certificate_nocombined_pass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
||||||
@@ -819,6 +820,75 @@ class TestUrllibRequestHandler(TestRequestHandlerBase):
|
|||||||
assert not isinstance(exc_info.value, TransportError)
|
assert not isinstance(exc_info.value, TransportError)
|
||||||
|
|
||||||
|
|
||||||
|
class TestRequestsRequestHandler(TestRequestHandlerBase):
|
||||||
|
@pytest.mark.parametrize('raised,expected', [
|
||||||
|
(lambda: requests.exceptions.ConnectTimeout(), TransportError),
|
||||||
|
(lambda: requests.exceptions.ReadTimeout(), TransportError),
|
||||||
|
(lambda: requests.exceptions.Timeout(), TransportError),
|
||||||
|
(lambda: requests.exceptions.ConnectionError(), TransportError),
|
||||||
|
(lambda: requests.exceptions.ProxyError(), ProxyError),
|
||||||
|
(lambda: requests.exceptions.SSLError('12[CERTIFICATE_VERIFY_FAILED]34'), CertificateVerifyError),
|
||||||
|
(lambda: requests.exceptions.SSLError(), SSLError),
|
||||||
|
(lambda: requests.exceptions.InvalidURL(), RequestError),
|
||||||
|
(lambda: requests.exceptions.InvalidHeader(), RequestError),
|
||||||
|
# catch-all: https://github.com/psf/requests/blob/main/src/requests/adapters.py#L535
|
||||||
|
(lambda: urllib3.exceptions.HTTPError(), TransportError),
|
||||||
|
(lambda: requests.exceptions.RequestException(), RequestError)
|
||||||
|
# (lambda: requests.exceptions.TooManyRedirects(), HTTPError) - Needs a response object
|
||||||
|
])
|
||||||
|
@pytest.mark.parametrize('handler', ['Requests'], indirect=True)
|
||||||
|
def test_request_error_mapping(self, handler, monkeypatch, raised, expected):
|
||||||
|
with handler() as rh:
|
||||||
|
def mock_get_instance(*args, **kwargs):
|
||||||
|
class MockSession:
|
||||||
|
def request(self, *args, **kwargs):
|
||||||
|
raise raised()
|
||||||
|
return MockSession()
|
||||||
|
|
||||||
|
monkeypatch.setattr(rh, '_get_instance', mock_get_instance)
|
||||||
|
|
||||||
|
with pytest.raises(expected) as exc_info:
|
||||||
|
rh.send(Request('http://fake'))
|
||||||
|
|
||||||
|
assert exc_info.type is expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('raised,expected,match', [
|
||||||
|
(lambda: urllib3.exceptions.SSLError(), SSLError, None),
|
||||||
|
(lambda: urllib3.exceptions.TimeoutError(), TransportError, None),
|
||||||
|
(lambda: urllib3.exceptions.ReadTimeoutError(None, None, None), TransportError, None),
|
||||||
|
(lambda: urllib3.exceptions.ProtocolError(), TransportError, None),
|
||||||
|
(lambda: urllib3.exceptions.DecodeError(), TransportError, None),
|
||||||
|
(lambda: urllib3.exceptions.HTTPError(), TransportError, None), # catch-all
|
||||||
|
(
|
||||||
|
lambda: urllib3.exceptions.ProtocolError('error', http.client.IncompleteRead(partial=b'abc', expected=4)),
|
||||||
|
IncompleteRead,
|
||||||
|
'3 bytes read, 4 more expected'
|
||||||
|
),
|
||||||
|
(
|
||||||
|
lambda: urllib3.exceptions.ProtocolError('error', urllib3.exceptions.IncompleteRead(partial=3, expected=5)),
|
||||||
|
IncompleteRead,
|
||||||
|
'3 bytes read, 5 more expected'
|
||||||
|
),
|
||||||
|
])
|
||||||
|
@pytest.mark.parametrize('handler', ['Requests'], indirect=True)
|
||||||
|
def test_response_error_mapping(self, handler, monkeypatch, raised, expected, match):
|
||||||
|
from urllib3.response import HTTPResponse as Urllib3Response
|
||||||
|
from requests.models import Response as RequestsResponse
|
||||||
|
from yt_dlp.networking._requests import RequestsResponseAdapter
|
||||||
|
requests_res = RequestsResponse()
|
||||||
|
requests_res.raw = Urllib3Response(body=b'', status=200)
|
||||||
|
res = RequestsResponseAdapter(requests_res)
|
||||||
|
|
||||||
|
def mock_read(*args, **kwargs):
|
||||||
|
raise raised()
|
||||||
|
monkeypatch.setattr(res.fp, 'read', mock_read)
|
||||||
|
|
||||||
|
with pytest.raises(expected, match=match) as exc_info:
|
||||||
|
res.read()
|
||||||
|
|
||||||
|
assert exc_info.type is expected
|
||||||
|
|
||||||
|
|
||||||
def run_validation(handler, error, req, **handler_kwargs):
|
def run_validation(handler, error, req, **handler_kwargs):
|
||||||
with handler(**handler_kwargs) as rh:
|
with handler(**handler_kwargs) as rh:
|
||||||
if error:
|
if error:
|
||||||
@@ -855,6 +925,10 @@ class TestRequestHandlerValidation:
|
|||||||
('file', UnsupportedRequest, {}),
|
('file', UnsupportedRequest, {}),
|
||||||
('file', False, {'enable_file_urls': True}),
|
('file', False, {'enable_file_urls': True}),
|
||||||
]),
|
]),
|
||||||
|
('Requests', [
|
||||||
|
('http', False, {}),
|
||||||
|
('https', False, {}),
|
||||||
|
]),
|
||||||
(NoCheckRH, [('http', False, {})]),
|
(NoCheckRH, [('http', False, {})]),
|
||||||
(ValidationRH, [('http', UnsupportedRequest, {})])
|
(ValidationRH, [('http', UnsupportedRequest, {})])
|
||||||
]
|
]
|
||||||
@@ -870,6 +944,14 @@ class TestRequestHandlerValidation:
|
|||||||
('socks5h', False),
|
('socks5h', False),
|
||||||
('socks', UnsupportedRequest),
|
('socks', UnsupportedRequest),
|
||||||
]),
|
]),
|
||||||
|
('Requests', [
|
||||||
|
('http', False),
|
||||||
|
('https', False),
|
||||||
|
('socks4', False),
|
||||||
|
('socks4a', False),
|
||||||
|
('socks5', False),
|
||||||
|
('socks5h', False),
|
||||||
|
]),
|
||||||
(NoCheckRH, [('http', False)]),
|
(NoCheckRH, [('http', False)]),
|
||||||
(HTTPSupportedRH, [('http', UnsupportedRequest)]),
|
(HTTPSupportedRH, [('http', UnsupportedRequest)]),
|
||||||
]
|
]
|
||||||
@@ -880,6 +962,10 @@ class TestRequestHandlerValidation:
|
|||||||
('all', False),
|
('all', False),
|
||||||
('unrelated', False),
|
('unrelated', False),
|
||||||
]),
|
]),
|
||||||
|
('Requests', [
|
||||||
|
('all', False),
|
||||||
|
('unrelated', False),
|
||||||
|
]),
|
||||||
(NoCheckRH, [('all', False)]),
|
(NoCheckRH, [('all', False)]),
|
||||||
(HTTPSupportedRH, [('all', UnsupportedRequest)]),
|
(HTTPSupportedRH, [('all', UnsupportedRequest)]),
|
||||||
(HTTPSupportedRH, [('no', UnsupportedRequest)]),
|
(HTTPSupportedRH, [('no', UnsupportedRequest)]),
|
||||||
@@ -894,6 +980,13 @@ class TestRequestHandlerValidation:
|
|||||||
({'timeout': 'notatimeout'}, AssertionError),
|
({'timeout': 'notatimeout'}, AssertionError),
|
||||||
({'unsupported': 'value'}, UnsupportedRequest),
|
({'unsupported': 'value'}, UnsupportedRequest),
|
||||||
]),
|
]),
|
||||||
|
('Requests', [
|
||||||
|
({'cookiejar': 'notacookiejar'}, AssertionError),
|
||||||
|
({'cookiejar': YoutubeDLCookieJar()}, False),
|
||||||
|
({'timeout': 1}, False),
|
||||||
|
({'timeout': 'notatimeout'}, AssertionError),
|
||||||
|
({'unsupported': 'value'}, UnsupportedRequest),
|
||||||
|
]),
|
||||||
(NoCheckRH, [
|
(NoCheckRH, [
|
||||||
({'cookiejar': 'notacookiejar'}, False),
|
({'cookiejar': 'notacookiejar'}, False),
|
||||||
({'somerandom': 'test'}, False), # but any extension is allowed through
|
({'somerandom': 'test'}, False), # but any extension is allowed through
|
||||||
@@ -909,7 +1002,7 @@ class TestRequestHandlerValidation:
|
|||||||
def test_url_scheme(self, handler, scheme, fail, handler_kwargs):
|
def test_url_scheme(self, handler, scheme, fail, handler_kwargs):
|
||||||
run_validation(handler, fail, Request(f'{scheme}://'), **(handler_kwargs or {}))
|
run_validation(handler, fail, Request(f'{scheme}://'), **(handler_kwargs or {}))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,fail', [('Urllib', False)], indirect=['handler'])
|
@pytest.mark.parametrize('handler,fail', [('Urllib', False), ('Requests', False)], indirect=['handler'])
|
||||||
def test_no_proxy(self, handler, fail):
|
def test_no_proxy(self, handler, fail):
|
||||||
run_validation(handler, fail, Request('http://', proxies={'no': '127.0.0.1,github.com'}))
|
run_validation(handler, fail, Request('http://', proxies={'no': '127.0.0.1,github.com'}))
|
||||||
run_validation(handler, fail, Request('http://'), proxies={'no': '127.0.0.1,github.com'})
|
run_validation(handler, fail, Request('http://'), proxies={'no': '127.0.0.1,github.com'})
|
||||||
@@ -932,13 +1025,13 @@ class TestRequestHandlerValidation:
|
|||||||
run_validation(handler, fail, Request('http://', proxies={'http': f'{scheme}://example.com'}))
|
run_validation(handler, fail, Request('http://', proxies={'http': f'{scheme}://example.com'}))
|
||||||
run_validation(handler, fail, Request('http://'), proxies={'http': f'{scheme}://example.com'})
|
run_validation(handler, fail, Request('http://'), proxies={'http': f'{scheme}://example.com'})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', HTTPSupportedRH], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', HTTPSupportedRH, 'Requests'], indirect=True)
|
||||||
def test_empty_proxy(self, handler):
|
def test_empty_proxy(self, handler):
|
||||||
run_validation(handler, False, Request('http://', proxies={'http': None}))
|
run_validation(handler, False, Request('http://', proxies={'http': None}))
|
||||||
run_validation(handler, False, Request('http://'), proxies={'http': None})
|
run_validation(handler, False, Request('http://'), proxies={'http': None})
|
||||||
|
|
||||||
@pytest.mark.parametrize('proxy_url', ['//example.com', 'example.com', '127.0.0.1', '/a/b/c'])
|
@pytest.mark.parametrize('proxy_url', ['//example.com', 'example.com', '127.0.0.1', '/a/b/c'])
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_invalid_proxy_url(self, handler, proxy_url):
|
def test_invalid_proxy_url(self, handler, proxy_url):
|
||||||
run_validation(handler, UnsupportedRequest, Request('http://', proxies={'http': proxy_url}))
|
run_validation(handler, UnsupportedRequest, Request('http://', proxies={'http': proxy_url}))
|
||||||
|
|
||||||
@@ -1200,6 +1293,10 @@ class TestYoutubeDLNetworking:
|
|||||||
assert 'Youtubedl-no-compression' not in rh.headers
|
assert 'Youtubedl-no-compression' not in rh.headers
|
||||||
assert rh.headers.get('Accept-Encoding') == 'identity'
|
assert rh.headers.get('Accept-Encoding') == 'identity'
|
||||||
|
|
||||||
|
with FakeYDL({'http_headers': {'Ytdl-socks-proxy': 'socks://localhost:1080'}}) as ydl:
|
||||||
|
rh = self.build_handler(ydl)
|
||||||
|
assert 'Ytdl-socks-proxy' not in rh.headers
|
||||||
|
|
||||||
def test_build_handler_params(self):
|
def test_build_handler_params(self):
|
||||||
with FakeYDL({
|
with FakeYDL({
|
||||||
'http_headers': {'test': 'testtest'},
|
'http_headers': {'test': 'testtest'},
|
||||||
@@ -1242,6 +1339,13 @@ class TestYoutubeDLNetworking:
|
|||||||
rh = self.build_handler(ydl, UrllibRH)
|
rh = self.build_handler(ydl, UrllibRH)
|
||||||
assert rh.enable_file_urls is True
|
assert rh.enable_file_urls is True
|
||||||
|
|
||||||
|
def test_compat_opt_prefer_urllib(self):
|
||||||
|
# This assumes urllib only has a preference when this compat opt is given
|
||||||
|
with FakeYDL({'compat_opts': ['prefer-legacy-http-handler']}) as ydl:
|
||||||
|
director = ydl.build_request_director([UrllibRH])
|
||||||
|
assert len(director.preferences) == 1
|
||||||
|
assert director.preferences.pop()(UrllibRH, None)
|
||||||
|
|
||||||
|
|
||||||
class TestRequest:
|
class TestRequest:
|
||||||
|
|
||||||
|
|||||||
@@ -263,7 +263,7 @@ def ctx(request):
|
|||||||
|
|
||||||
|
|
||||||
class TestSocks4Proxy:
|
class TestSocks4Proxy:
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_socks4_no_auth(self, handler, ctx):
|
def test_socks4_no_auth(self, handler, ctx):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
@@ -271,7 +271,7 @@ class TestSocks4Proxy:
|
|||||||
rh, proxies={'all': f'socks4://{server_address}'})
|
rh, proxies={'all': f'socks4://{server_address}'})
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_socks4_auth(self, handler, ctx):
|
def test_socks4_auth(self, handler, ctx):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with ctx.socks_server(Socks4ProxyHandler, user_id='user') as server_address:
|
with ctx.socks_server(Socks4ProxyHandler, user_id='user') as server_address:
|
||||||
@@ -281,7 +281,7 @@ class TestSocks4Proxy:
|
|||||||
rh, proxies={'all': f'socks4://user:@{server_address}'})
|
rh, proxies={'all': f'socks4://user:@{server_address}'})
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_socks4a_ipv4_target(self, handler, ctx):
|
def test_socks4a_ipv4_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
||||||
@@ -289,7 +289,7 @@ class TestSocks4Proxy:
|
|||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
assert (response['ipv4_address'] == '127.0.0.1') != (response['domain_address'] == '127.0.0.1')
|
assert (response['ipv4_address'] == '127.0.0.1') != (response['domain_address'] == '127.0.0.1')
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_socks4a_domain_target(self, handler, ctx):
|
def test_socks4a_domain_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
||||||
@@ -298,7 +298,7 @@ class TestSocks4Proxy:
|
|||||||
assert response['ipv4_address'] is None
|
assert response['ipv4_address'] is None
|
||||||
assert response['domain_address'] == 'localhost'
|
assert response['domain_address'] == 'localhost'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_ipv4_client_source_address(self, handler, ctx):
|
def test_ipv4_client_source_address(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
@@ -308,7 +308,7 @@ class TestSocks4Proxy:
|
|||||||
assert response['client_address'][0] == source_address
|
assert response['client_address'][0] == source_address
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
@pytest.mark.parametrize('reply_code', [
|
@pytest.mark.parametrize('reply_code', [
|
||||||
Socks4CD.REQUEST_REJECTED_OR_FAILED,
|
Socks4CD.REQUEST_REJECTED_OR_FAILED,
|
||||||
Socks4CD.REQUEST_REJECTED_CANNOT_CONNECT_TO_IDENTD,
|
Socks4CD.REQUEST_REJECTED_CANNOT_CONNECT_TO_IDENTD,
|
||||||
@@ -320,7 +320,7 @@ class TestSocks4Proxy:
|
|||||||
with pytest.raises(ProxyError):
|
with pytest.raises(ProxyError):
|
||||||
ctx.socks_info_request(rh)
|
ctx.socks_info_request(rh)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_ipv6_socks4_proxy(self, handler, ctx):
|
def test_ipv6_socks4_proxy(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler, bind_ip='::1') as server_address:
|
with ctx.socks_server(Socks4ProxyHandler, bind_ip='::1') as server_address:
|
||||||
with handler(proxies={'all': f'socks4://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks4://{server_address}'}) as rh:
|
||||||
@@ -329,7 +329,7 @@ class TestSocks4Proxy:
|
|||||||
assert response['ipv4_address'] == '127.0.0.1'
|
assert response['ipv4_address'] == '127.0.0.1'
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_timeout(self, handler, ctx):
|
def test_timeout(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler, sleep=2) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler, sleep=2) as server_address:
|
||||||
with handler(proxies={'all': f'socks4://{server_address}'}, timeout=0.5) as rh:
|
with handler(proxies={'all': f'socks4://{server_address}'}, timeout=0.5) as rh:
|
||||||
@@ -339,7 +339,7 @@ class TestSocks4Proxy:
|
|||||||
|
|
||||||
class TestSocks5Proxy:
|
class TestSocks5Proxy:
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_socks5_no_auth(self, handler, ctx):
|
def test_socks5_no_auth(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
@@ -347,7 +347,7 @@ class TestSocks5Proxy:
|
|||||||
assert response['auth_methods'] == [0x0]
|
assert response['auth_methods'] == [0x0]
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_socks5_user_pass(self, handler, ctx):
|
def test_socks5_user_pass(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler, auth=('test', 'testpass')) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler, auth=('test', 'testpass')) as server_address:
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
@@ -360,7 +360,7 @@ class TestSocks5Proxy:
|
|||||||
assert response['auth_methods'] == [Socks5Auth.AUTH_NONE, Socks5Auth.AUTH_USER_PASS]
|
assert response['auth_methods'] == [Socks5Auth.AUTH_NONE, Socks5Auth.AUTH_USER_PASS]
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_socks5_ipv4_target(self, handler, ctx):
|
def test_socks5_ipv4_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
@@ -368,7 +368,7 @@ class TestSocks5Proxy:
|
|||||||
assert response['ipv4_address'] == '127.0.0.1'
|
assert response['ipv4_address'] == '127.0.0.1'
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_socks5_domain_target(self, handler, ctx):
|
def test_socks5_domain_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
@@ -376,7 +376,7 @@ class TestSocks5Proxy:
|
|||||||
assert (response['ipv4_address'] == '127.0.0.1') != (response['ipv6_address'] == '::1')
|
assert (response['ipv4_address'] == '127.0.0.1') != (response['ipv6_address'] == '::1')
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_socks5h_domain_target(self, handler, ctx):
|
def test_socks5h_domain_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
||||||
@@ -385,7 +385,7 @@ class TestSocks5Proxy:
|
|||||||
assert response['domain_address'] == 'localhost'
|
assert response['domain_address'] == 'localhost'
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_socks5h_ip_target(self, handler, ctx):
|
def test_socks5h_ip_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
||||||
@@ -394,7 +394,7 @@ class TestSocks5Proxy:
|
|||||||
assert response['domain_address'] is None
|
assert response['domain_address'] is None
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_socks5_ipv6_destination(self, handler, ctx):
|
def test_socks5_ipv6_destination(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
@@ -402,7 +402,7 @@ class TestSocks5Proxy:
|
|||||||
assert response['ipv6_address'] == '::1'
|
assert response['ipv6_address'] == '::1'
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_ipv6_socks5_proxy(self, handler, ctx):
|
def test_ipv6_socks5_proxy(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler, bind_ip='::1') as server_address:
|
with ctx.socks_server(Socks5ProxyHandler, bind_ip='::1') as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
@@ -413,7 +413,7 @@ class TestSocks5Proxy:
|
|||||||
|
|
||||||
# XXX: is there any feasible way of testing IPv6 source addresses?
|
# XXX: is there any feasible way of testing IPv6 source addresses?
|
||||||
# Same would go for non-proxy source_address test...
|
# Same would go for non-proxy source_address test...
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
def test_ipv4_client_source_address(self, handler, ctx):
|
def test_ipv4_client_source_address(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
@@ -422,7 +422,7 @@ class TestSocks5Proxy:
|
|||||||
assert response['client_address'][0] == source_address
|
assert response['client_address'][0] == source_address
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http')], indirect=True)
|
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http')], indirect=True)
|
||||||
@pytest.mark.parametrize('reply_code', [
|
@pytest.mark.parametrize('reply_code', [
|
||||||
Socks5Reply.GENERAL_FAILURE,
|
Socks5Reply.GENERAL_FAILURE,
|
||||||
Socks5Reply.CONNECTION_NOT_ALLOWED,
|
Socks5Reply.CONNECTION_NOT_ALLOWED,
|
||||||
|
|||||||
199
test/test_update.py
Normal file
199
test/test_update.py
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
from test.helper import FakeYDL, report_warning
|
||||||
|
from yt_dlp.update import Updater, UpdateInfo
|
||||||
|
|
||||||
|
TEST_API_DATA = {
|
||||||
|
'yt-dlp/yt-dlp/latest': {
|
||||||
|
'tag_name': '2023.12.31',
|
||||||
|
'target_commitish': 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb',
|
||||||
|
'name': 'yt-dlp 2023.12.31',
|
||||||
|
'body': 'BODY',
|
||||||
|
},
|
||||||
|
'yt-dlp/yt-dlp-nightly-builds/latest': {
|
||||||
|
'tag_name': '2023.12.31.123456',
|
||||||
|
'target_commitish': 'master',
|
||||||
|
'name': 'yt-dlp nightly 2023.12.31.123456',
|
||||||
|
'body': 'Generated from: https://github.com/yt-dlp/yt-dlp/commit/cccccccccccccccccccccccccccccccccccccccc',
|
||||||
|
},
|
||||||
|
'yt-dlp/yt-dlp-master-builds/latest': {
|
||||||
|
'tag_name': '2023.12.31.987654',
|
||||||
|
'target_commitish': 'master',
|
||||||
|
'name': 'yt-dlp master 2023.12.31.987654',
|
||||||
|
'body': 'Generated from: https://github.com/yt-dlp/yt-dlp/commit/dddddddddddddddddddddddddddddddddddddddd',
|
||||||
|
},
|
||||||
|
'yt-dlp/yt-dlp/tags/testing': {
|
||||||
|
'tag_name': 'testing',
|
||||||
|
'target_commitish': '9999999999999999999999999999999999999999',
|
||||||
|
'name': 'testing',
|
||||||
|
'body': 'BODY',
|
||||||
|
},
|
||||||
|
'fork/yt-dlp/latest': {
|
||||||
|
'tag_name': '2050.12.31',
|
||||||
|
'target_commitish': 'eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee',
|
||||||
|
'name': '2050.12.31',
|
||||||
|
'body': 'BODY',
|
||||||
|
},
|
||||||
|
'fork/yt-dlp/tags/pr0000': {
|
||||||
|
'tag_name': 'pr0000',
|
||||||
|
'target_commitish': 'ffffffffffffffffffffffffffffffffffffffff',
|
||||||
|
'name': 'pr1234 2023.11.11.000000',
|
||||||
|
'body': 'BODY',
|
||||||
|
},
|
||||||
|
'fork/yt-dlp/tags/pr1234': {
|
||||||
|
'tag_name': 'pr1234',
|
||||||
|
'target_commitish': '0000000000000000000000000000000000000000',
|
||||||
|
'name': 'pr1234 2023.12.31.555555',
|
||||||
|
'body': 'BODY',
|
||||||
|
},
|
||||||
|
'fork/yt-dlp/tags/pr9999': {
|
||||||
|
'tag_name': 'pr9999',
|
||||||
|
'target_commitish': '1111111111111111111111111111111111111111',
|
||||||
|
'name': 'pr9999',
|
||||||
|
'body': 'BODY',
|
||||||
|
},
|
||||||
|
'fork/yt-dlp-satellite/tags/pr987': {
|
||||||
|
'tag_name': 'pr987',
|
||||||
|
'target_commitish': 'master',
|
||||||
|
'name': 'pr987',
|
||||||
|
'body': 'Generated from: https://github.com/yt-dlp/yt-dlp/commit/2222222222222222222222222222222222222222',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_LOCKFILE_V1 = '''# This file is used for regulating self-update
|
||||||
|
lock 2022.08.18.36 .+ Python 3.6
|
||||||
|
lock 2023.11.13 .+ Python 3.7
|
||||||
|
'''
|
||||||
|
|
||||||
|
TEST_LOCKFILE_V2 = '''# This file is used for regulating self-update
|
||||||
|
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3.6
|
||||||
|
lockV2 yt-dlp/yt-dlp 2023.11.13 .+ Python 3.7
|
||||||
|
'''
|
||||||
|
|
||||||
|
TEST_LOCKFILE_V1_V2 = '''# This file is used for regulating self-update
|
||||||
|
lock 2022.08.18.36 .+ Python 3.6
|
||||||
|
lock 2023.11.13 .+ Python 3.7
|
||||||
|
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3.6
|
||||||
|
lockV2 yt-dlp/yt-dlp 2023.11.13 .+ Python 3.7
|
||||||
|
lockV2 fork/yt-dlp pr0000 .+ Python 3.6
|
||||||
|
lockV2 fork/yt-dlp pr1234 .+ Python 3.7
|
||||||
|
lockV2 fork/yt-dlp pr9999 .+ Python 3.11
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
class FakeUpdater(Updater):
|
||||||
|
current_version = '2022.01.01'
|
||||||
|
current_commit = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
|
||||||
|
|
||||||
|
_channel = 'stable'
|
||||||
|
_origin = 'yt-dlp/yt-dlp'
|
||||||
|
|
||||||
|
def _download_update_spec(self, *args, **kwargs):
|
||||||
|
return TEST_LOCKFILE_V1_V2
|
||||||
|
|
||||||
|
def _call_api(self, tag):
|
||||||
|
tag = f'tags/{tag}' if tag != 'latest' else tag
|
||||||
|
return TEST_API_DATA[f'{self.requested_repo}/{tag}']
|
||||||
|
|
||||||
|
def _report_error(self, msg, *args, **kwargs):
|
||||||
|
report_warning(msg)
|
||||||
|
|
||||||
|
|
||||||
|
class TestUpdate(unittest.TestCase):
|
||||||
|
maxDiff = None
|
||||||
|
|
||||||
|
def test_update_spec(self):
|
||||||
|
ydl = FakeYDL()
|
||||||
|
updater = FakeUpdater(ydl, 'stable@latest')
|
||||||
|
|
||||||
|
def test(lockfile, identifier, input_tag, expect_tag, exact=False, repo='yt-dlp/yt-dlp'):
|
||||||
|
updater._identifier = identifier
|
||||||
|
updater._exact = exact
|
||||||
|
updater.requested_repo = repo
|
||||||
|
result = updater._process_update_spec(lockfile, input_tag)
|
||||||
|
self.assertEqual(
|
||||||
|
result, expect_tag,
|
||||||
|
f'{identifier!r} requesting {repo}@{input_tag} (exact={exact}) '
|
||||||
|
f'returned {result!r} instead of {expect_tag!r}')
|
||||||
|
|
||||||
|
test(TEST_LOCKFILE_V1, 'zip Python 3.11.0', '2023.11.13', '2023.11.13')
|
||||||
|
test(TEST_LOCKFILE_V1, 'zip stable Python 3.11.0', '2023.11.13', '2023.11.13', exact=True)
|
||||||
|
test(TEST_LOCKFILE_V1, 'zip Python 3.6.0', '2023.11.13', '2022.08.18.36')
|
||||||
|
test(TEST_LOCKFILE_V1, 'zip stable Python 3.6.0', '2023.11.13', None, exact=True)
|
||||||
|
test(TEST_LOCKFILE_V1, 'zip Python 3.7.0', '2023.11.13', '2023.11.13')
|
||||||
|
test(TEST_LOCKFILE_V1, 'zip stable Python 3.7.1', '2023.11.13', '2023.11.13')
|
||||||
|
test(TEST_LOCKFILE_V1, 'zip Python 3.7.1', '2023.12.31', '2023.11.13')
|
||||||
|
test(TEST_LOCKFILE_V1, 'zip stable Python 3.7.1', '2023.12.31', '2023.11.13')
|
||||||
|
|
||||||
|
test(TEST_LOCKFILE_V2, 'zip Python 3.11.1', '2023.11.13', '2023.11.13')
|
||||||
|
test(TEST_LOCKFILE_V2, 'zip stable Python 3.11.1', '2023.12.31', '2023.12.31')
|
||||||
|
test(TEST_LOCKFILE_V2, 'zip Python 3.6.1', '2023.11.13', '2022.08.18.36')
|
||||||
|
test(TEST_LOCKFILE_V2, 'zip stable Python 3.7.2', '2023.11.13', '2023.11.13')
|
||||||
|
test(TEST_LOCKFILE_V2, 'zip Python 3.7.2', '2023.12.31', '2023.11.13')
|
||||||
|
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.11.2', '2023.11.13', '2023.11.13')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip stable Python 3.11.2', '2023.12.31', '2023.12.31')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.6.2', '2023.11.13', '2022.08.18.36')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip stable Python 3.7.3', '2023.11.13', '2023.11.13')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.7.3', '2023.12.31', '2023.11.13')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.6.3', 'pr0000', None, repo='fork/yt-dlp')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip stable Python 3.7.4', 'pr0000', 'pr0000', repo='fork/yt-dlp')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.6.4', 'pr0000', None, repo='fork/yt-dlp')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.7.4', 'pr1234', None, repo='fork/yt-dlp')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip stable Python 3.8.1', 'pr1234', 'pr1234', repo='fork/yt-dlp')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.7.5', 'pr1234', None, repo='fork/yt-dlp')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.11.3', 'pr9999', None, repo='fork/yt-dlp')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip stable Python 3.12.0', 'pr9999', 'pr9999', repo='fork/yt-dlp')
|
||||||
|
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.11.4', 'pr9999', None, repo='fork/yt-dlp')
|
||||||
|
|
||||||
|
def test_query_update(self):
|
||||||
|
ydl = FakeYDL()
|
||||||
|
|
||||||
|
def test(target, expected, current_version=None, current_commit=None, identifier=None):
|
||||||
|
updater = FakeUpdater(ydl, target)
|
||||||
|
if current_version:
|
||||||
|
updater.current_version = current_version
|
||||||
|
if current_commit:
|
||||||
|
updater.current_commit = current_commit
|
||||||
|
updater._identifier = identifier or 'zip'
|
||||||
|
update_info = updater.query_update(_output=True)
|
||||||
|
self.assertDictEqual(
|
||||||
|
update_info.__dict__ if update_info else {}, expected.__dict__ if expected else {})
|
||||||
|
|
||||||
|
test('yt-dlp/yt-dlp@latest', UpdateInfo(
|
||||||
|
'2023.12.31', version='2023.12.31', requested_version='2023.12.31', commit='b' * 40))
|
||||||
|
test('yt-dlp/yt-dlp-nightly-builds@latest', UpdateInfo(
|
||||||
|
'2023.12.31.123456', version='2023.12.31.123456', requested_version='2023.12.31.123456', commit='c' * 40))
|
||||||
|
test('yt-dlp/yt-dlp-master-builds@latest', UpdateInfo(
|
||||||
|
'2023.12.31.987654', version='2023.12.31.987654', requested_version='2023.12.31.987654', commit='d' * 40))
|
||||||
|
test('fork/yt-dlp@latest', UpdateInfo(
|
||||||
|
'2050.12.31', version='2050.12.31', requested_version='2050.12.31', commit='e' * 40))
|
||||||
|
test('fork/yt-dlp@pr0000', UpdateInfo(
|
||||||
|
'pr0000', version='2023.11.11.000000', requested_version='2023.11.11.000000', commit='f' * 40))
|
||||||
|
test('fork/yt-dlp@pr1234', UpdateInfo(
|
||||||
|
'pr1234', version='2023.12.31.555555', requested_version='2023.12.31.555555', commit='0' * 40))
|
||||||
|
test('fork/yt-dlp@pr9999', UpdateInfo(
|
||||||
|
'pr9999', version=None, requested_version=None, commit='1' * 40))
|
||||||
|
test('fork/yt-dlp-satellite@pr987', UpdateInfo(
|
||||||
|
'pr987', version=None, requested_version=None, commit='2' * 40))
|
||||||
|
test('yt-dlp/yt-dlp', None, current_version='2024.01.01')
|
||||||
|
test('stable', UpdateInfo(
|
||||||
|
'2023.12.31', version='2023.12.31', requested_version='2023.12.31', commit='b' * 40))
|
||||||
|
test('nightly', UpdateInfo(
|
||||||
|
'2023.12.31.123456', version='2023.12.31.123456', requested_version='2023.12.31.123456', commit='c' * 40))
|
||||||
|
test('master', UpdateInfo(
|
||||||
|
'2023.12.31.987654', version='2023.12.31.987654', requested_version='2023.12.31.987654', commit='d' * 40))
|
||||||
|
test('testing', None, current_commit='9' * 40)
|
||||||
|
test('testing', UpdateInfo('testing', commit='9' * 40))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# Allow direct execution
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
|
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
from yt_dlp.update import rsa_verify
|
|
||||||
|
|
||||||
|
|
||||||
class TestUpdate(unittest.TestCase):
|
|
||||||
def test_rsa_verify(self):
|
|
||||||
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
|
|
||||||
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'versions.json'), 'rb') as f:
|
|
||||||
versions_info = f.read().decode()
|
|
||||||
versions_info = json.loads(versions_info)
|
|
||||||
signature = versions_info['signature']
|
|
||||||
del versions_info['signature']
|
|
||||||
self.assertTrue(rsa_verify(
|
|
||||||
json.dumps(versions_info, sort_keys=True).encode(),
|
|
||||||
signature, UPDATES_RSA_KEY))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
@@ -1209,6 +1209,9 @@ class TestUtil(unittest.TestCase):
|
|||||||
on = js_to_json('\'"\\""\'')
|
on = js_to_json('\'"\\""\'')
|
||||||
self.assertEqual(json.loads(on), '"""', msg='Unnecessary quote escape should be escaped')
|
self.assertEqual(json.loads(on), '"""', msg='Unnecessary quote escape should be escaped')
|
||||||
|
|
||||||
|
on = js_to_json('[new Date("spam"), \'("eggs")\']')
|
||||||
|
self.assertEqual(json.loads(on), ['spam', '("eggs")'], msg='Date regex should match a single string')
|
||||||
|
|
||||||
def test_js_to_json_malformed(self):
|
def test_js_to_json_malformed(self):
|
||||||
self.assertEqual(js_to_json('42a1'), '42"a1"')
|
self.assertEqual(js_to_json('42a1'), '42"a1"')
|
||||||
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
|
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
|
||||||
@@ -1220,11 +1223,13 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(js_to_json('`${name}"${name}"`', {'name': '5'}), '"5\\"5\\""')
|
self.assertEqual(js_to_json('`${name}"${name}"`', {'name': '5'}), '"5\\"5\\""')
|
||||||
self.assertEqual(js_to_json('`${name}`', {}), '"name"')
|
self.assertEqual(js_to_json('`${name}`', {}), '"name"')
|
||||||
|
|
||||||
def test_js_to_json_map_array_constructors(self):
|
def test_js_to_json_common_constructors(self):
|
||||||
self.assertEqual(json.loads(js_to_json('new Map([["a", 5]])')), {'a': 5})
|
self.assertEqual(json.loads(js_to_json('new Map([["a", 5]])')), {'a': 5})
|
||||||
self.assertEqual(json.loads(js_to_json('Array(5, 10)')), [5, 10])
|
self.assertEqual(json.loads(js_to_json('Array(5, 10)')), [5, 10])
|
||||||
self.assertEqual(json.loads(js_to_json('new Array(15,5)')), [15, 5])
|
self.assertEqual(json.loads(js_to_json('new Array(15,5)')), [15, 5])
|
||||||
self.assertEqual(json.loads(js_to_json('new Map([Array(5, 10),new Array(15,5)])')), {'5': 10, '15': 5})
|
self.assertEqual(json.loads(js_to_json('new Map([Array(5, 10),new Array(15,5)])')), {'5': 10, '15': 5})
|
||||||
|
self.assertEqual(json.loads(js_to_json('new Date("123")')), "123")
|
||||||
|
self.assertEqual(json.loads(js_to_json('new Date(\'2023-10-19\')')), "2023-10-19")
|
||||||
|
|
||||||
def test_extract_attributes(self):
|
def test_extract_attributes(self):
|
||||||
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
||||||
|
|||||||
@@ -1,34 +0,0 @@
|
|||||||
{
|
|
||||||
"latest": "2013.01.06",
|
|
||||||
"signature": "72158cdba391628569ffdbea259afbcf279bbe3d8aeb7492690735dc1cfa6afa754f55c61196f3871d429599ab22f2667f1fec98865527b32632e7f4b3675a7ef0f0fbe084d359256ae4bba68f0d33854e531a70754712f244be71d4b92e664302aa99653ee4df19800d955b6c4149cd2b3f24288d6e4b40b16126e01f4c8ce6",
|
|
||||||
"versions": {
|
|
||||||
"2013.01.02": {
|
|
||||||
"bin": [
|
|
||||||
"http://youtube-dl.org/downloads/2013.01.02/youtube-dl",
|
|
||||||
"f5b502f8aaa77675c4884938b1e4871ebca2611813a0c0e74f60c0fbd6dcca6b"
|
|
||||||
],
|
|
||||||
"exe": [
|
|
||||||
"http://youtube-dl.org/downloads/2013.01.02/youtube-dl.exe",
|
|
||||||
"75fa89d2ce297d102ff27675aa9d92545bbc91013f52ec52868c069f4f9f0422"
|
|
||||||
],
|
|
||||||
"tar": [
|
|
||||||
"http://youtube-dl.org/downloads/2013.01.02/youtube-dl-2013.01.02.tar.gz",
|
|
||||||
"6a66d022ac8e1c13da284036288a133ec8dba003b7bd3a5179d0c0daca8c8196"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"2013.01.06": {
|
|
||||||
"bin": [
|
|
||||||
"http://youtube-dl.org/downloads/2013.01.06/youtube-dl",
|
|
||||||
"64b6ed8865735c6302e836d4d832577321b4519aa02640dc508580c1ee824049"
|
|
||||||
],
|
|
||||||
"exe": [
|
|
||||||
"http://youtube-dl.org/downloads/2013.01.06/youtube-dl.exe",
|
|
||||||
"58609baf91e4389d36e3ba586e21dab882daaaee537e4448b1265392ae86ff84"
|
|
||||||
],
|
|
||||||
"tar": [
|
|
||||||
"http://youtube-dl.org/downloads/2013.01.06/youtube-dl-2013.01.06.tar.gz",
|
|
||||||
"fe77ab20a95d980ed17a659aa67e371fdd4d656d19c4c7950e7b720b0c2f1a86"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -60,7 +60,7 @@ from .postprocessor import (
|
|||||||
get_postprocessor,
|
get_postprocessor,
|
||||||
)
|
)
|
||||||
from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
|
from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
|
||||||
from .update import REPOSITORY, _get_system_deprecation, current_git_head, detect_variant
|
from .update import REPOSITORY, _get_system_deprecation, _make_label, current_git_head, detect_variant
|
||||||
from .utils import (
|
from .utils import (
|
||||||
DEFAULT_OUTTMPL,
|
DEFAULT_OUTTMPL,
|
||||||
IDENTITY,
|
IDENTITY,
|
||||||
@@ -158,7 +158,7 @@ from .utils.networking import (
|
|||||||
clean_proxies,
|
clean_proxies,
|
||||||
std_headers,
|
std_headers,
|
||||||
)
|
)
|
||||||
from .version import CHANNEL, RELEASE_GIT_HEAD, VARIANT, __version__
|
from .version import CHANNEL, ORIGIN, RELEASE_GIT_HEAD, VARIANT, __version__
|
||||||
|
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
import ctypes
|
import ctypes
|
||||||
@@ -2338,7 +2338,7 @@ class YoutubeDL:
|
|||||||
return
|
return
|
||||||
|
|
||||||
for f in formats:
|
for f in formats:
|
||||||
if f.get('has_drm'):
|
if f.get('has_drm') or f.get('__needs_testing'):
|
||||||
yield from self._check_formats([f])
|
yield from self._check_formats([f])
|
||||||
else:
|
else:
|
||||||
yield f
|
yield f
|
||||||
@@ -2764,7 +2764,8 @@ class YoutubeDL:
|
|||||||
format['dynamic_range'] = 'SDR'
|
format['dynamic_range'] = 'SDR'
|
||||||
if format.get('aspect_ratio') is None:
|
if format.get('aspect_ratio') is None:
|
||||||
format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
|
format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
|
||||||
if (not format.get('manifest_url') # For fragmented formats, "tbr" is often max bitrate and not average
|
# For fragmented formats, "tbr" is often max bitrate and not average
|
||||||
|
if (('manifest-filesize-approx' in self.params['compat_opts'] or not format.get('manifest_url'))
|
||||||
and info_dict.get('duration') and format.get('tbr')
|
and info_dict.get('duration') and format.get('tbr')
|
||||||
and not format.get('filesize') and not format.get('filesize_approx')):
|
and not format.get('filesize') and not format.get('filesize_approx')):
|
||||||
format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
|
format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
|
||||||
@@ -3543,14 +3544,14 @@ class YoutubeDL:
|
|||||||
'version': __version__,
|
'version': __version__,
|
||||||
'current_git_head': current_git_head(),
|
'current_git_head': current_git_head(),
|
||||||
'release_git_head': RELEASE_GIT_HEAD,
|
'release_git_head': RELEASE_GIT_HEAD,
|
||||||
'repository': REPOSITORY,
|
'repository': ORIGIN,
|
||||||
})
|
})
|
||||||
|
|
||||||
if remove_private_keys:
|
if remove_private_keys:
|
||||||
reject = lambda k, v: v is None or k.startswith('__') or k in {
|
reject = lambda k, v: v is None or k.startswith('__') or k in {
|
||||||
'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
|
'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
|
||||||
'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
|
'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
|
||||||
'playlist_autonumber', '_format_sort_fields',
|
'playlist_autonumber',
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
reject = lambda k, v: False
|
reject = lambda k, v: False
|
||||||
@@ -3926,8 +3927,8 @@ class YoutubeDL:
|
|||||||
source += '*'
|
source += '*'
|
||||||
klass = type(self)
|
klass = type(self)
|
||||||
write_debug(join_nonempty(
|
write_debug(join_nonempty(
|
||||||
f'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
|
f'{REPOSITORY.rpartition("/")[2]} version',
|
||||||
f'{CHANNEL}@{__version__}',
|
_make_label(ORIGIN, CHANNEL.partition('@')[2] or __version__, __version__),
|
||||||
f'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD else '',
|
f'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD else '',
|
||||||
'' if source == 'unknown' else f'({source})',
|
'' if source == 'unknown' else f'({source})',
|
||||||
'' if _IN_CLI else 'API' if klass == YoutubeDL else f'API:{self.__module__}.{klass.__qualname__}',
|
'' if _IN_CLI else 'API' if klass == YoutubeDL else f'API:{self.__module__}.{klass.__qualname__}',
|
||||||
@@ -3968,7 +3969,7 @@ class YoutubeDL:
|
|||||||
})) or 'none'))
|
})) or 'none'))
|
||||||
|
|
||||||
write_debug(f'Proxy map: {self.proxies}')
|
write_debug(f'Proxy map: {self.proxies}')
|
||||||
# write_debug(f'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}')
|
write_debug(f'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}')
|
||||||
for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items():
|
for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items():
|
||||||
display_list = ['%s%s' % (
|
display_list = ['%s%s' % (
|
||||||
klass.__name__, '' if klass.__name__ == name else f' as {name}')
|
klass.__name__, '' if klass.__name__ == name else f' as {name}')
|
||||||
@@ -4057,6 +4058,9 @@ class YoutubeDL:
|
|||||||
raise RequestError(
|
raise RequestError(
|
||||||
'file:// URLs are disabled by default in yt-dlp for security reasons. '
|
'file:// URLs are disabled by default in yt-dlp for security reasons. '
|
||||||
'Use --enable-file-urls to enable at your own risk.', cause=ue) from ue
|
'Use --enable-file-urls to enable at your own risk.', cause=ue) from ue
|
||||||
|
if 'unsupported proxy type: "https"' in ue.msg.lower():
|
||||||
|
raise RequestError(
|
||||||
|
'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests')
|
||||||
raise
|
raise
|
||||||
except SSLError as e:
|
except SSLError as e:
|
||||||
if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e):
|
if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e):
|
||||||
@@ -4099,6 +4103,8 @@ class YoutubeDL:
|
|||||||
}),
|
}),
|
||||||
))
|
))
|
||||||
director.preferences.update(preferences or [])
|
director.preferences.update(preferences or [])
|
||||||
|
if 'prefer-legacy-http-handler' in self.params['compat_opts']:
|
||||||
|
director.preferences.add(lambda rh, _: 500 if rh.RH_KEY == 'Urllib' else 0)
|
||||||
return director
|
return director
|
||||||
|
|
||||||
def encode(self, s):
|
def encode(self, s):
|
||||||
@@ -4221,7 +4227,7 @@ class YoutubeDL:
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
|
def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
|
||||||
''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
|
''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error '''
|
||||||
write_all = self.params.get('write_all_thumbnails', False)
|
write_all = self.params.get('write_all_thumbnails', False)
|
||||||
thumbnails, ret = [], []
|
thumbnails, ret = [], []
|
||||||
if write_all or self.params.get('writethumbnail', False):
|
if write_all or self.params.get('writethumbnail', False):
|
||||||
@@ -4237,6 +4243,9 @@ class YoutubeDL:
|
|||||||
self.write_debug(f'Skipping writing {label} thumbnail')
|
self.write_debug(f'Skipping writing {label} thumbnail')
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
if thumbnails and not self._ensure_dir_exists(filename):
|
||||||
|
return None
|
||||||
|
|
||||||
for idx, t in list(enumerate(thumbnails))[::-1]:
|
for idx, t in list(enumerate(thumbnails))[::-1]:
|
||||||
thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
|
thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
|
||||||
thumb_display_id = f'{label} thumbnail {t["id"]}'
|
thumb_display_id = f'{label} thumbnail {t["id"]}'
|
||||||
|
|||||||
@@ -21,9 +21,11 @@ def get_hidden_imports():
|
|||||||
yield from ('yt_dlp.compat._legacy', 'yt_dlp.compat._deprecated')
|
yield from ('yt_dlp.compat._legacy', 'yt_dlp.compat._deprecated')
|
||||||
yield from ('yt_dlp.utils._legacy', 'yt_dlp.utils._deprecated')
|
yield from ('yt_dlp.utils._legacy', 'yt_dlp.utils._deprecated')
|
||||||
yield pycryptodome_module()
|
yield pycryptodome_module()
|
||||||
yield from collect_submodules('websockets')
|
# Only `websockets` is required, others are collected just in case
|
||||||
|
for module in ('websockets', 'requests', 'urllib3'):
|
||||||
|
yield from collect_submodules(module)
|
||||||
# These are auto-detected, but explicitly add them just in case
|
# These are auto-detected, but explicitly add them just in case
|
||||||
yield from ('mutagen', 'brotli', 'certifi')
|
yield from ('mutagen', 'brotli', 'certifi', 'secretstorage')
|
||||||
|
|
||||||
|
|
||||||
hiddenimports = list(get_hidden_imports())
|
hiddenimports = list(get_hidden_imports())
|
||||||
|
|||||||
@@ -58,6 +58,15 @@ except (ImportError, SyntaxError):
|
|||||||
# See https://github.com/yt-dlp/yt-dlp/issues/2633
|
# See https://github.com/yt-dlp/yt-dlp/issues/2633
|
||||||
websockets = None
|
websockets = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib3
|
||||||
|
except ImportError:
|
||||||
|
urllib3 = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
except ImportError:
|
||||||
|
requests = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import xattr # xattr or pyxattr
|
import xattr # xattr or pyxattr
|
||||||
|
|||||||
@@ -15,12 +15,15 @@ class DashSegmentsFD(FragmentFD):
|
|||||||
FD_NAME = 'dashsegments'
|
FD_NAME = 'dashsegments'
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
if info_dict.get('is_live') and set(info_dict['protocol'].split('+')) != {'http_dash_segments_generator'}:
|
if 'http_dash_segments_generator' in info_dict['protocol'].split('+'):
|
||||||
self.report_error('Live DASH videos are not supported')
|
real_downloader = None # No external FD can support --live-from-start
|
||||||
|
else:
|
||||||
|
if info_dict.get('is_live'):
|
||||||
|
self.report_error('Live DASH videos are not supported')
|
||||||
|
real_downloader = get_suitable_downloader(
|
||||||
|
info_dict, self.params, None, protocol='dash_frag_urls', to_stdout=(filename == '-'))
|
||||||
|
|
||||||
real_start = time.time()
|
real_start = time.time()
|
||||||
real_downloader = get_suitable_downloader(
|
|
||||||
info_dict, self.params, None, protocol='dash_frag_urls', to_stdout=(filename == '-'))
|
|
||||||
|
|
||||||
requested_formats = [{**info_dict, **fmt} for fmt in info_dict.get('requested_formats', [])]
|
requested_formats = [{**info_dict, **fmt} for fmt in info_dict.get('requested_formats', [])]
|
||||||
args = []
|
args = []
|
||||||
|
|||||||
@@ -335,7 +335,7 @@ class Aria2cFD(ExternalFD):
|
|||||||
cmd += ['--auto-file-renaming=false']
|
cmd += ['--auto-file-renaming=false']
|
||||||
|
|
||||||
if 'fragments' in info_dict:
|
if 'fragments' in info_dict:
|
||||||
cmd += ['--file-allocation=none', '--uri-selector=inorder']
|
cmd += ['--uri-selector=inorder']
|
||||||
url_list_file = '%s.frag.urls' % tmpfilename
|
url_list_file = '%s.frag.urls' % tmpfilename
|
||||||
url_list = []
|
url_list = []
|
||||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ from ..networking import Request
|
|||||||
from ..networking.exceptions import HTTPError, IncompleteRead
|
from ..networking.exceptions import HTTPError, IncompleteRead
|
||||||
from ..utils import DownloadError, RetryManager, encodeFilename, traverse_obj
|
from ..utils import DownloadError, RetryManager, encodeFilename, traverse_obj
|
||||||
from ..utils.networking import HTTPHeaderDict
|
from ..utils.networking import HTTPHeaderDict
|
||||||
|
from ..utils.progress import ProgressCalculator
|
||||||
|
|
||||||
|
|
||||||
class HttpQuietDownloader(HttpFD):
|
class HttpQuietDownloader(HttpFD):
|
||||||
@@ -226,8 +227,7 @@ class FragmentFD(FileDownloader):
|
|||||||
resume_len = ctx['complete_frags_downloaded_bytes']
|
resume_len = ctx['complete_frags_downloaded_bytes']
|
||||||
total_frags = ctx['total_frags']
|
total_frags = ctx['total_frags']
|
||||||
ctx_id = ctx.get('ctx_id')
|
ctx_id = ctx.get('ctx_id')
|
||||||
# This dict stores the download progress, it's updated by the progress
|
# Stores the download progress, updated by the progress hook
|
||||||
# hook
|
|
||||||
state = {
|
state = {
|
||||||
'status': 'downloading',
|
'status': 'downloading',
|
||||||
'downloaded_bytes': resume_len,
|
'downloaded_bytes': resume_len,
|
||||||
@@ -237,14 +237,8 @@ class FragmentFD(FileDownloader):
|
|||||||
'tmpfilename': ctx['tmpfilename'],
|
'tmpfilename': ctx['tmpfilename'],
|
||||||
}
|
}
|
||||||
|
|
||||||
start = time.time()
|
ctx['started'] = time.time()
|
||||||
ctx.update({
|
progress = ProgressCalculator(resume_len)
|
||||||
'started': start,
|
|
||||||
'fragment_started': start,
|
|
||||||
# Amount of fragment's bytes downloaded by the time of the previous
|
|
||||||
# frag progress hook invocation
|
|
||||||
'prev_frag_downloaded_bytes': 0,
|
|
||||||
})
|
|
||||||
|
|
||||||
def frag_progress_hook(s):
|
def frag_progress_hook(s):
|
||||||
if s['status'] not in ('downloading', 'finished'):
|
if s['status'] not in ('downloading', 'finished'):
|
||||||
@@ -259,38 +253,35 @@ class FragmentFD(FileDownloader):
|
|||||||
state['max_progress'] = ctx.get('max_progress')
|
state['max_progress'] = ctx.get('max_progress')
|
||||||
state['progress_idx'] = ctx.get('progress_idx')
|
state['progress_idx'] = ctx.get('progress_idx')
|
||||||
|
|
||||||
time_now = time.time()
|
state['elapsed'] = progress.elapsed
|
||||||
state['elapsed'] = time_now - start
|
|
||||||
frag_total_bytes = s.get('total_bytes') or 0
|
frag_total_bytes = s.get('total_bytes') or 0
|
||||||
s['fragment_info_dict'] = s.pop('info_dict', {})
|
s['fragment_info_dict'] = s.pop('info_dict', {})
|
||||||
|
|
||||||
|
# XXX: Fragment resume is not accounted for here
|
||||||
if not ctx['live']:
|
if not ctx['live']:
|
||||||
estimated_size = (
|
estimated_size = (
|
||||||
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes)
|
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes)
|
||||||
/ (state['fragment_index'] + 1) * total_frags)
|
/ (state['fragment_index'] + 1) * total_frags)
|
||||||
state['total_bytes_estimate'] = estimated_size
|
progress.total = estimated_size
|
||||||
|
progress.update(s.get('downloaded_bytes'))
|
||||||
|
state['total_bytes_estimate'] = progress.total
|
||||||
|
else:
|
||||||
|
progress.update(s.get('downloaded_bytes'))
|
||||||
|
|
||||||
if s['status'] == 'finished':
|
if s['status'] == 'finished':
|
||||||
state['fragment_index'] += 1
|
state['fragment_index'] += 1
|
||||||
ctx['fragment_index'] = state['fragment_index']
|
ctx['fragment_index'] = state['fragment_index']
|
||||||
state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes']
|
progress.thread_reset()
|
||||||
ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes']
|
|
||||||
ctx['speed'] = state['speed'] = self.calc_speed(
|
state['downloaded_bytes'] = ctx['complete_frags_downloaded_bytes'] = progress.downloaded
|
||||||
ctx['fragment_started'], time_now, frag_total_bytes)
|
state['speed'] = ctx['speed'] = progress.speed.smooth
|
||||||
ctx['fragment_started'] = time.time()
|
state['eta'] = progress.eta.smooth
|
||||||
ctx['prev_frag_downloaded_bytes'] = 0
|
|
||||||
else:
|
|
||||||
frag_downloaded_bytes = s['downloaded_bytes']
|
|
||||||
state['downloaded_bytes'] += frag_downloaded_bytes - ctx['prev_frag_downloaded_bytes']
|
|
||||||
ctx['speed'] = state['speed'] = self.calc_speed(
|
|
||||||
ctx['fragment_started'], time_now, frag_downloaded_bytes - ctx.get('frag_resume_len', 0))
|
|
||||||
if not ctx['live']:
|
|
||||||
state['eta'] = self.calc_eta(state['speed'], estimated_size - state['downloaded_bytes'])
|
|
||||||
ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
|
|
||||||
self._hook_progress(state, info_dict)
|
self._hook_progress(state, info_dict)
|
||||||
|
|
||||||
ctx['dl'].add_progress_hook(frag_progress_hook)
|
ctx['dl'].add_progress_hook(frag_progress_hook)
|
||||||
|
|
||||||
return start
|
return ctx['started']
|
||||||
|
|
||||||
def _finish_frag_download(self, ctx, info_dict):
|
def _finish_frag_download(self, ctx, info_dict):
|
||||||
ctx['dest_stream'].close()
|
ctx['dest_stream'].close()
|
||||||
@@ -500,7 +491,6 @@ class FragmentFD(FileDownloader):
|
|||||||
download_fragment(fragment, ctx_copy)
|
download_fragment(fragment, ctx_copy)
|
||||||
return fragment, fragment['frag_index'], ctx_copy.get('fragment_filename_sanitized')
|
return fragment, fragment['frag_index'], ctx_copy.get('fragment_filename_sanitized')
|
||||||
|
|
||||||
self.report_warning('The download speed shown is only of one thread. This is a known issue')
|
|
||||||
with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
||||||
try:
|
try:
|
||||||
for fragment, frag_index, frag_filename in pool.map(_download_fragment, fragments):
|
for fragment, frag_index, frag_filename in pool.map(_download_fragment, fragments):
|
||||||
|
|||||||
@@ -896,6 +896,10 @@ from .jeuxvideo import JeuxVideoIE
|
|||||||
from .jove import JoveIE
|
from .jove import JoveIE
|
||||||
from .joj import JojIE
|
from .joj import JojIE
|
||||||
from .jstream import JStreamIE
|
from .jstream import JStreamIE
|
||||||
|
from .jtbc import (
|
||||||
|
JTBCIE,
|
||||||
|
JTBCProgramIE,
|
||||||
|
)
|
||||||
from .jwplatform import JWPlatformIE
|
from .jwplatform import JWPlatformIE
|
||||||
from .kakao import KakaoIE
|
from .kakao import KakaoIE
|
||||||
from .kaltura import KalturaIE
|
from .kaltura import KalturaIE
|
||||||
@@ -949,6 +953,7 @@ from .lastfm import (
|
|||||||
LastFMPlaylistIE,
|
LastFMPlaylistIE,
|
||||||
LastFMUserIE,
|
LastFMUserIE,
|
||||||
)
|
)
|
||||||
|
from .laxarxames import LaXarxaMesIE
|
||||||
from .lbry import (
|
from .lbry import (
|
||||||
LBRYIE,
|
LBRYIE,
|
||||||
LBRYChannelIE,
|
LBRYChannelIE,
|
||||||
@@ -1053,6 +1058,7 @@ from .markiza import (
|
|||||||
from .massengeschmacktv import MassengeschmackTVIE
|
from .massengeschmacktv import MassengeschmackTVIE
|
||||||
from .masters import MastersIE
|
from .masters import MastersIE
|
||||||
from .matchtv import MatchTVIE
|
from .matchtv import MatchTVIE
|
||||||
|
from .mbn import MBNIE
|
||||||
from .mdr import MDRIE
|
from .mdr import MDRIE
|
||||||
from .medaltv import MedalTVIE
|
from .medaltv import MedalTVIE
|
||||||
from .mediaite import MediaiteIE
|
from .mediaite import MediaiteIE
|
||||||
@@ -1382,7 +1388,10 @@ from .oftv import (
|
|||||||
from .oktoberfesttv import OktoberfestTVIE
|
from .oktoberfesttv import OktoberfestTVIE
|
||||||
from .olympics import OlympicsReplayIE
|
from .olympics import OlympicsReplayIE
|
||||||
from .on24 import On24IE
|
from .on24 import On24IE
|
||||||
from .ondemandkorea import OnDemandKoreaIE
|
from .ondemandkorea import (
|
||||||
|
OnDemandKoreaIE,
|
||||||
|
OnDemandKoreaProgramIE,
|
||||||
|
)
|
||||||
from .onefootball import OneFootballIE
|
from .onefootball import OneFootballIE
|
||||||
from .onenewsnz import OneNewsNZIE
|
from .onenewsnz import OneNewsNZIE
|
||||||
from .oneplace import OnePlacePodcastIE
|
from .oneplace import OnePlacePodcastIE
|
||||||
@@ -1411,6 +1420,7 @@ from .orf import (
|
|||||||
ORFTVthekIE,
|
ORFTVthekIE,
|
||||||
ORFFM4StoryIE,
|
ORFFM4StoryIE,
|
||||||
ORFRadioIE,
|
ORFRadioIE,
|
||||||
|
ORFPodcastIE,
|
||||||
ORFIPTVIE,
|
ORFIPTVIE,
|
||||||
)
|
)
|
||||||
from .outsidetv import OutsideTVIE
|
from .outsidetv import OutsideTVIE
|
||||||
@@ -1573,6 +1583,10 @@ from .radiocanada import (
|
|||||||
RadioCanadaIE,
|
RadioCanadaIE,
|
||||||
RadioCanadaAudioVideoIE,
|
RadioCanadaAudioVideoIE,
|
||||||
)
|
)
|
||||||
|
from .radiocomercial import (
|
||||||
|
RadioComercialIE,
|
||||||
|
RadioComercialPlaylistIE,
|
||||||
|
)
|
||||||
from .radiode import RadioDeIE
|
from .radiode import RadioDeIE
|
||||||
from .radiojavan import RadioJavanIE
|
from .radiojavan import RadioJavanIE
|
||||||
from .radiobremen import RadioBremenIE
|
from .radiobremen import RadioBremenIE
|
||||||
@@ -1753,6 +1767,11 @@ from .samplefocus import SampleFocusIE
|
|||||||
from .sapo import SapoIE
|
from .sapo import SapoIE
|
||||||
from .savefrom import SaveFromIE
|
from .savefrom import SaveFromIE
|
||||||
from .sbs import SBSIE
|
from .sbs import SBSIE
|
||||||
|
from .sbscokr import (
|
||||||
|
SBSCoKrIE,
|
||||||
|
SBSCoKrAllvodProgramIE,
|
||||||
|
SBSCoKrProgramsVodIE,
|
||||||
|
)
|
||||||
from .screen9 import Screen9IE
|
from .screen9 import Screen9IE
|
||||||
from .screencast import ScreencastIE
|
from .screencast import ScreencastIE
|
||||||
from .screencastify import ScreencastifyIE
|
from .screencastify import ScreencastifyIE
|
||||||
@@ -1897,6 +1916,8 @@ from .srmediathek import SRMediathekIE
|
|||||||
from .stacommu import (
|
from .stacommu import (
|
||||||
StacommuLiveIE,
|
StacommuLiveIE,
|
||||||
StacommuVODIE,
|
StacommuVODIE,
|
||||||
|
TheaterComplexTownVODIE,
|
||||||
|
TheaterComplexTownPPVIE,
|
||||||
)
|
)
|
||||||
from .stanfordoc import StanfordOpenClassroomIE
|
from .stanfordoc import StanfordOpenClassroomIE
|
||||||
from .startv import StarTVIE
|
from .startv import StarTVIE
|
||||||
@@ -1992,7 +2013,10 @@ from .tencent import (
|
|||||||
WeTvSeriesIE,
|
WeTvSeriesIE,
|
||||||
)
|
)
|
||||||
from .tennistv import TennisTVIE
|
from .tennistv import TennisTVIE
|
||||||
from .tenplay import TenPlayIE
|
from .tenplay import (
|
||||||
|
TenPlayIE,
|
||||||
|
TenPlaySeasonIE,
|
||||||
|
)
|
||||||
from .testurl import TestURLIE
|
from .testurl import TestURLIE
|
||||||
from .tf1 import TF1IE
|
from .tf1 import TF1IE
|
||||||
from .tfo import TFOIE
|
from .tfo import TFOIE
|
||||||
@@ -2006,7 +2030,6 @@ from .thestar import TheStarIE
|
|||||||
from .thesun import TheSunIE
|
from .thesun import TheSunIE
|
||||||
from .theweatherchannel import TheWeatherChannelIE
|
from .theweatherchannel import TheWeatherChannelIE
|
||||||
from .thisamericanlife import ThisAmericanLifeIE
|
from .thisamericanlife import ThisAmericanLifeIE
|
||||||
from .thisav import ThisAVIE
|
|
||||||
from .thisoldhouse import ThisOldHouseIE
|
from .thisoldhouse import ThisOldHouseIE
|
||||||
from .thisvid import (
|
from .thisvid import (
|
||||||
ThisVidIE,
|
ThisVidIE,
|
||||||
|
|||||||
@@ -48,17 +48,7 @@ class ArteTVIE(ArteTVBaseIE):
|
|||||||
}, {
|
}, {
|
||||||
'note': 'No alt_title',
|
'note': 'No alt_title',
|
||||||
'url': 'https://www.arte.tv/fr/videos/110371-000-A/la-chaleur-supplice-des-arbres-de-rue/',
|
'url': 'https://www.arte.tv/fr/videos/110371-000-A/la-chaleur-supplice-des-arbres-de-rue/',
|
||||||
'info_dict': {
|
'only_matching': True,
|
||||||
'id': '110371-000-A',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'upload_date': '20220718',
|
|
||||||
'duration': 154,
|
|
||||||
'timestamp': 1658162460,
|
|
||||||
'description': 'md5:5890f36fe7dccfadb8b7c0891de54786',
|
|
||||||
'title': 'La chaleur, supplice des arbres de rue',
|
|
||||||
'thumbnail': 'https://api-cdn.arte.tv/img/v2/image/CPE2sQDtD8GLQgt8DuYHLf/940x530',
|
|
||||||
},
|
|
||||||
'params': {'skip_download': 'm3u8'}
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://api.arte.tv/api/player/v2/config/de/100605-013-A',
|
'url': 'https://api.arte.tv/api/player/v2/config/de/100605-013-A',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@@ -67,19 +57,20 @@ class ArteTVIE(ArteTVBaseIE):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.arte.tv/de/videos/110203-006-A/zaz/',
|
'url': 'https://www.arte.tv/de/videos/110203-006-A/zaz/',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'note': 'age-restricted',
|
||||||
|
'url': 'https://www.arte.tv/de/videos/006785-000-A/the-element-of-crime/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '110203-006-A',
|
'id': '006785-000-A',
|
||||||
'chapters': 'count:16',
|
'description': 'md5:c2f94fdfefc8a280e4dab68ab96ab0ba',
|
||||||
'description': 'md5:cf592f1df52fe52007e3f8eac813c084',
|
'title': 'The Element of Crime',
|
||||||
'alt_title': 'Zaz',
|
'timestamp': 1696111200,
|
||||||
'title': 'Baloise Session 2022',
|
'duration': 5849,
|
||||||
'timestamp': 1668445200,
|
'thumbnail': 'https://api-cdn.arte.tv/img/v2/image/q82dTTfyuCXupPsGxXsd7B/940x530',
|
||||||
'duration': 4054,
|
'upload_date': '20230930',
|
||||||
'thumbnail': 'https://api-cdn.arte.tv/img/v2/image/ubQjmVCGyRx3hmBuZEK9QZ/940x530',
|
|
||||||
'upload_date': '20221114',
|
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
},
|
}
|
||||||
'expected_warnings': ['geo restricted']
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_GEO_BYPASS = True
|
_GEO_BYPASS = True
|
||||||
@@ -136,7 +127,9 @@ class ArteTVIE(ArteTVBaseIE):
|
|||||||
lang = mobj.group('lang') or mobj.group('lang_2')
|
lang = mobj.group('lang') or mobj.group('lang_2')
|
||||||
langauge_code = self._LANG_MAP.get(lang)
|
langauge_code = self._LANG_MAP.get(lang)
|
||||||
|
|
||||||
config = self._download_json(f'{self._API_BASE}/config/{lang}/{video_id}', video_id)
|
config = self._download_json(f'{self._API_BASE}/config/{lang}/{video_id}', video_id, headers={
|
||||||
|
'x-validated-age': '18'
|
||||||
|
})
|
||||||
|
|
||||||
geoblocking = traverse_obj(config, ('data', 'attributes', 'restriction', 'geoblocking')) or {}
|
geoblocking = traverse_obj(config, ('data', 'attributes', 'restriction', 'geoblocking')) or {}
|
||||||
if geoblocking.get('restrictedArea'):
|
if geoblocking.get('restrictedArea'):
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ class BanByeBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class BanByeIE(BanByeBaseIE):
|
class BanByeIE(BanByeBaseIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?banbye.com/(?:en/)?watch/(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://(?:www\.)?banbye\.com/(?:en/)?watch/(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://banbye.com/watch/v_ytfmvkVYLE8T',
|
'url': 'https://banbye.com/watch/v_ytfmvkVYLE8T',
|
||||||
'md5': '2f4ea15c5ca259a73d909b2cfd558eb5',
|
'md5': '2f4ea15c5ca259a73d909b2cfd558eb5',
|
||||||
@@ -120,7 +120,7 @@ class BanByeIE(BanByeBaseIE):
|
|||||||
|
|
||||||
|
|
||||||
class BanByeChannelIE(BanByeBaseIE):
|
class BanByeChannelIE(BanByeBaseIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?banbye.com/(?:en/)?channel/(?P<id>\w+)'
|
_VALID_URL = r'https?://(?:www\.)?banbye\.com/(?:en/)?channel/(?P<id>\w+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://banbye.com/channel/ch_wrealu24',
|
'url': 'https://banbye.com/channel/ch_wrealu24',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ from .common import InfoExtractor
|
|||||||
|
|
||||||
|
|
||||||
class BreitBartIE(InfoExtractor):
|
class BreitBartIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?:\/\/(?:www\.)breitbart.com/videos/v/(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?breitbart\.com/videos/v/(?P<id>[^/?#]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.breitbart.com/videos/v/5cOz1yup/?pl=Ij6NDOji',
|
'url': 'https://www.breitbart.com/videos/v/5cOz1yup/?pl=Ij6NDOji',
|
||||||
'md5': '0aa6d1d6e183ac5ca09207fe49f17ade',
|
'md5': '0aa6d1d6e183ac5ca09207fe49f17ade',
|
||||||
|
|||||||
@@ -21,10 +21,10 @@ class BrilliantpalaBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
def _get_logged_in_username(self, url, video_id):
|
def _get_logged_in_username(self, url, video_id):
|
||||||
webpage, urlh = self._download_webpage_handle(url, video_id)
|
webpage, urlh = self._download_webpage_handle(url, video_id)
|
||||||
if self._LOGIN_API == urlh.url:
|
if urlh.url.startswith(self._LOGIN_API):
|
||||||
self.raise_login_required()
|
self.raise_login_required()
|
||||||
return self._html_search_regex(
|
return self._html_search_regex(
|
||||||
r'"username"\s*:\s*"(?P<username>[^"]+)"', webpage, 'stream page info', 'username')
|
r'"username"\s*:\s*"(?P<username>[^"]+)"', webpage, 'logged-in username')
|
||||||
|
|
||||||
def _perform_login(self, username, password):
|
def _perform_login(self, username, password):
|
||||||
login_form = self._hidden_inputs(self._download_webpage(
|
login_form = self._hidden_inputs(self._download_webpage(
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
import re
|
|
||||||
import json
|
|
||||||
import base64
|
import base64
|
||||||
|
import json
|
||||||
|
import re
|
||||||
import time
|
import time
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
@@ -387,7 +388,7 @@ class CBCGemIE(InfoExtractor):
|
|||||||
url = re.sub(r'(Manifest\(.*?),format=[\w-]+(.*?\))', r'\1\2', base_url)
|
url = re.sub(r'(Manifest\(.*?),format=[\w-]+(.*?\))', r'\1\2', base_url)
|
||||||
|
|
||||||
secret_xml = self._download_xml(url, video_id, note='Downloading secret XML', fatal=False)
|
secret_xml = self._download_xml(url, video_id, note='Downloading secret XML', fatal=False)
|
||||||
if not secret_xml:
|
if not isinstance(secret_xml, xml.etree.ElementTree.Element):
|
||||||
return
|
return
|
||||||
|
|
||||||
for child in secret_xml:
|
for child in secret_xml:
|
||||||
|
|||||||
@@ -2225,7 +2225,9 @@ class InfoExtractor:
|
|||||||
mpd_url, video_id,
|
mpd_url, video_id,
|
||||||
note='Downloading MPD VOD manifest' if note is None else note,
|
note='Downloading MPD VOD manifest' if note is None else note,
|
||||||
errnote='Failed to download VOD manifest' if errnote is None else errnote,
|
errnote='Failed to download VOD manifest' if errnote is None else errnote,
|
||||||
fatal=False, data=data, headers=headers, query=query) or {}
|
fatal=False, data=data, headers=headers, query=query)
|
||||||
|
if not isinstance(mpd_doc, xml.etree.ElementTree.Element):
|
||||||
|
return None
|
||||||
return int_or_none(parse_duration(mpd_doc.get('mediaPresentationDuration')))
|
return int_or_none(parse_duration(mpd_doc.get('mediaPresentationDuration')))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class CraftsyIE(InfoExtractor):
|
class CraftsyIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://www.craftsy.com/class/(?P<id>[a-z0-9_-]+)/'
|
_VALID_URL = r'https?://www\.craftsy\.com/class/(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.craftsy.com/class/the-midnight-quilt-show-season-5/',
|
'url': 'https://www.craftsy.com/class/the-midnight-quilt-show-season-5/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ class CybraryBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class CybraryIE(CybraryBaseIE):
|
class CybraryIE(CybraryBaseIE):
|
||||||
_VALID_URL = r'https?://app.cybrary.it/immersive/(?P<enrollment>[0-9]+)/activity/(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://app\.cybrary\.it/immersive/(?P<enrollment>[0-9]+)/activity/(?P<id>[0-9]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://app.cybrary.it/immersive/12487950/activity/63102',
|
'url': 'https://app.cybrary.it/immersive/12487950/activity/63102',
|
||||||
'md5': '9ae12d37e555cb2ed554223a71a701d0',
|
'md5': '9ae12d37e555cb2ed554223a71a701d0',
|
||||||
@@ -105,12 +105,12 @@ class CybraryIE(CybraryBaseIE):
|
|||||||
'chapter': module.get('title'),
|
'chapter': module.get('title'),
|
||||||
'chapter_id': str_or_none(module.get('id')),
|
'chapter_id': str_or_none(module.get('id')),
|
||||||
'title': activity.get('title'),
|
'title': activity.get('title'),
|
||||||
'url': smuggle_url(f'https://player.vimeo.com/video/{vimeo_id}', {'http_headers': {'Referer': 'https://api.cybrary.it'}})
|
'url': smuggle_url(f'https://player.vimeo.com/video/{vimeo_id}', {'referer': 'https://api.cybrary.it'})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class CybraryCourseIE(CybraryBaseIE):
|
class CybraryCourseIE(CybraryBaseIE):
|
||||||
_VALID_URL = r'https://app.cybrary.it/browse/course/(?P<id>[\w-]+)/?(?:$|[#?])'
|
_VALID_URL = r'https://app\.cybrary\.it/browse/course/(?P<id>[\w-]+)/?(?:$|[#?])'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://app.cybrary.it/browse/course/az-500-microsoft-azure-security-technologies',
|
'url': 'https://app.cybrary.it/browse/course/az-500-microsoft-azure-security-technologies',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ class DubokuIE(InfoExtractor):
|
|||||||
# of the video.
|
# of the video.
|
||||||
return {
|
return {
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
'url': smuggle_url(data_url, {'http_headers': headers}),
|
'url': smuggle_url(data_url, {'referer': webpage_url}),
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'series': series_title,
|
'series': series_title,
|
||||||
|
|||||||
@@ -106,4 +106,4 @@ class EmbedlyIE(InfoExtractor):
|
|||||||
return self.url_result(src, YoutubeTabIE)
|
return self.url_result(src, YoutubeTabIE)
|
||||||
return self.url_result(smuggle_url(
|
return self.url_result(smuggle_url(
|
||||||
urllib.parse.unquote(traverse_obj(qs, ('src', 0), ('url', 0))),
|
urllib.parse.unquote(traverse_obj(qs, ('src', 0), ('url', 0))),
|
||||||
{'http_headers': {'Referer': url}}))
|
{'referer': url}))
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class FifaIE(InfoExtractor):
|
class FifaIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://www.fifa.com/fifaplus/(?P<locale>\w{2})/watch/([^#?]+/)?(?P<id>\w+)'
|
_VALID_URL = r'https?://www\.fifa\.com/fifaplus/(?P<locale>\w{2})/watch/([^#?]+/)?(?P<id>\w+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.fifa.com/fifaplus/en/watch/7on10qPcnyLajDDU3ntg6y',
|
'url': 'https://www.fifa.com/fifaplus/en/watch/7on10qPcnyLajDDU3ntg6y',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from ..utils import int_or_none
|
|||||||
|
|
||||||
|
|
||||||
class FilmmoduIE(InfoExtractor):
|
class FilmmoduIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www.)?filmmodu.org/(?P<id>[^/]+-(?:turkce-dublaj-izle|altyazili-izle))'
|
_VALID_URL = r'https?://(?:www\.)?filmmodu\.org/(?P<id>[^/]+-(?:turkce-dublaj-izle|altyazili-izle))'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.filmmodu.org/f9-altyazili-izle',
|
'url': 'https://www.filmmodu.org/f9-altyazili-izle',
|
||||||
'md5': 'aeefd955c2a508a5bdaa3bcec8eeb0d4',
|
'md5': 'aeefd955c2a508a5bdaa3bcec8eeb0d4',
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ from ..utils import (
|
|||||||
determine_protocol,
|
determine_protocol,
|
||||||
dict_get,
|
dict_get,
|
||||||
extract_basic_auth,
|
extract_basic_auth,
|
||||||
|
filter_dict,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
is_html,
|
is_html,
|
||||||
@@ -34,6 +35,7 @@ from ..utils import (
|
|||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
|
urlhandle_detect_ext,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
urljoin,
|
urljoin,
|
||||||
variadic,
|
variadic,
|
||||||
@@ -2434,10 +2436,10 @@ class GenericIE(InfoExtractor):
|
|||||||
# to accept raw bytes and being able to download only a chunk.
|
# to accept raw bytes and being able to download only a chunk.
|
||||||
# It may probably better to solve this by checking Content-Type for application/octet-stream
|
# It may probably better to solve this by checking Content-Type for application/octet-stream
|
||||||
# after a HEAD request, but not sure if we can rely on this.
|
# after a HEAD request, but not sure if we can rely on this.
|
||||||
full_response = self._request_webpage(url, video_id, headers={
|
full_response = self._request_webpage(url, video_id, headers=filter_dict({
|
||||||
'Accept-Encoding': 'identity',
|
'Accept-Encoding': 'identity',
|
||||||
**smuggled_data.get('http_headers', {})
|
'Referer': smuggled_data.get('referer'),
|
||||||
})
|
}))
|
||||||
new_url = full_response.url
|
new_url = full_response.url
|
||||||
url = urllib.parse.urlparse(url)._replace(scheme=urllib.parse.urlparse(new_url).scheme).geturl()
|
url = urllib.parse.urlparse(url)._replace(scheme=urllib.parse.urlparse(new_url).scheme).geturl()
|
||||||
if new_url != extract_basic_auth(url)[0]:
|
if new_url != extract_basic_auth(url)[0]:
|
||||||
@@ -2457,9 +2459,9 @@ class GenericIE(InfoExtractor):
|
|||||||
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
|
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
|
||||||
if m:
|
if m:
|
||||||
self.report_detected('direct video link')
|
self.report_detected('direct video link')
|
||||||
headers = smuggled_data.get('http_headers', {})
|
headers = filter_dict({'Referer': smuggled_data.get('referer')})
|
||||||
format_id = str(m.group('format_id'))
|
format_id = str(m.group('format_id'))
|
||||||
ext = determine_ext(url)
|
ext = determine_ext(url, default_ext=None) or urlhandle_detect_ext(full_response)
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
if format_id.endswith('mpegurl') or ext == 'm3u8':
|
if format_id.endswith('mpegurl') or ext == 'm3u8':
|
||||||
formats, subtitles = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4', headers=headers)
|
formats, subtitles = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4', headers=headers)
|
||||||
@@ -2471,6 +2473,7 @@ class GenericIE(InfoExtractor):
|
|||||||
formats = [{
|
formats = [{
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
'url': url,
|
'url': url,
|
||||||
|
'ext': ext,
|
||||||
'vcodec': 'none' if m.group('type') == 'audio' else None
|
'vcodec': 'none' if m.group('type') == 'audio' else None
|
||||||
}]
|
}]
|
||||||
info_dict['direct'] = True
|
info_dict['direct'] = True
|
||||||
@@ -2708,7 +2711,7 @@ class GenericIE(InfoExtractor):
|
|||||||
'url': smuggle_url(json_ld['url'], {
|
'url': smuggle_url(json_ld['url'], {
|
||||||
'force_videoid': video_id,
|
'force_videoid': video_id,
|
||||||
'to_generic': True,
|
'to_generic': True,
|
||||||
'http_headers': {'Referer': url},
|
'referer': url,
|
||||||
}),
|
}),
|
||||||
}, json_ld)]
|
}, json_ld)]
|
||||||
|
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ class ITProTVBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class ITProTVIE(ITProTVBaseIE):
|
class ITProTVIE(ITProTVBaseIE):
|
||||||
_VALID_URL = r'https://app.itpro.tv/course/(?P<course>[\w-]+)/(?P<id>[\w-]+)'
|
_VALID_URL = r'https://app\.itpro\.tv/course/(?P<course>[\w-]+)/(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://app.itpro.tv/course/guided-tour/introductionitprotv',
|
'url': 'https://app.itpro.tv/course/guided-tour/introductionitprotv',
|
||||||
'md5': 'bca4a28c2667fd1a63052e71a94bb88c',
|
'md5': 'bca4a28c2667fd1a63052e71a94bb88c',
|
||||||
@@ -102,7 +102,7 @@ class ITProTVIE(ITProTVBaseIE):
|
|||||||
|
|
||||||
|
|
||||||
class ITProTVCourseIE(ITProTVBaseIE):
|
class ITProTVCourseIE(ITProTVBaseIE):
|
||||||
_VALID_URL = r'https?://app.itpro.tv/course/(?P<id>[\w-]+)/?(?:$|[#?])'
|
_VALID_URL = r'https?://app\.itpro\.tv/course/(?P<id>[\w-]+)/?(?:$|[#?])'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
'url': 'https://app.itpro.tv/course/guided-tour',
|
'url': 'https://app.itpro.tv/course/guided-tour',
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class JableIE(InfoExtractor):
|
class JableIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?jable.tv/videos/(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://(?:www\.)?jable\.tv/videos/(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://jable.tv/videos/pppd-812/',
|
'url': 'https://jable.tv/videos/pppd-812/',
|
||||||
'md5': 'f1537283a9bc073c31ff86ca35d9b2a6',
|
'md5': 'f1537283a9bc073c31ff86ca35d9b2a6',
|
||||||
@@ -64,7 +64,7 @@ class JableIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class JablePlaylistIE(InfoExtractor):
|
class JablePlaylistIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?jable.tv/(?:categories|models|tags)/(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://(?:www\.)?jable\.tv/(?:categories|models|tags)/(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://jable.tv/models/kaede-karen/',
|
'url': 'https://jable.tv/models/kaede-karen/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|||||||
156
yt_dlp/extractor/jtbc.py
Normal file
156
yt_dlp/extractor/jtbc.py
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
parse_duration,
|
||||||
|
url_or_none,
|
||||||
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
|
class JTBCIE(InfoExtractor):
|
||||||
|
IE_DESC = 'jtbc.co.kr'
|
||||||
|
_VALID_URL = r'''(?x)
|
||||||
|
https?://(?:
|
||||||
|
vod\.jtbc\.co\.kr/player/(?:program|clip)
|
||||||
|
|tv\.jtbc\.co\.kr/(?:replay|trailer|clip)/pr\d+/pm\d+
|
||||||
|
)/(?P<id>(?:ep|vo)\d+)'''
|
||||||
|
_GEO_COUNTRIES = ['KR']
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://tv.jtbc.co.kr/replay/pr10011629/pm10067930/ep20216321/view',
|
||||||
|
'md5': 'e6ade71d8c8685bbfd6e6ce4167c6a6c',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'VO10721192',
|
||||||
|
'display_id': 'ep20216321',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '힘쎈여자 강남순 2회 다시보기',
|
||||||
|
'description': 'md5:043c1d9019100ce271dba09995dbd1e2',
|
||||||
|
'duration': 3770.0,
|
||||||
|
'release_date': '20231008',
|
||||||
|
'age_limit': 15,
|
||||||
|
'thumbnail': 'https://fs.jtbc.co.kr//joydata/CP00000001/prog/drama/stronggirlnamsoon/img/20231008_163541_522_1.jpg',
|
||||||
|
'series': '힘쎈여자 강남순',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://vod.jtbc.co.kr/player/program/ep20216733',
|
||||||
|
'md5': '217a6d190f115a75e4bda0ceaa4cd7f4',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'VO10721429',
|
||||||
|
'display_id': 'ep20216733',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '헬로 마이 닥터 친절한 진료실 149회 다시보기',
|
||||||
|
'description': 'md5:1d70788a982dd5de26874a92fcffddb8',
|
||||||
|
'duration': 2720.0,
|
||||||
|
'release_date': '20231009',
|
||||||
|
'age_limit': 15,
|
||||||
|
'thumbnail': 'https://fs.jtbc.co.kr//joydata/CP00000001/prog/culture/hellomydoctor/img/20231009_095002_528_1.jpg',
|
||||||
|
'series': '헬로 마이 닥터 친절한 진료실',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://vod.jtbc.co.kr/player/clip/vo10721270',
|
||||||
|
'md5': '05782e2dc22a9c548aebefe62ae4328a',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'VO10721270',
|
||||||
|
'display_id': 'vo10721270',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '뭉쳐야 찬다3 2회 예고편 - A매치로 향하는 마지막 관문💥',
|
||||||
|
'description': 'md5:d48b51a8655c84843b4ed8d0c39aae68',
|
||||||
|
'duration': 46.0,
|
||||||
|
'release_date': '20231015',
|
||||||
|
'age_limit': 15,
|
||||||
|
'thumbnail': 'https://fs.jtbc.co.kr//joydata/CP00000001/prog/enter/soccer3/img/20231008_210957_775_1.jpg',
|
||||||
|
'series': '뭉쳐야 찬다3',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://tv.jtbc.co.kr/trailer/pr10010392/pm10032526/vo10720912/view',
|
||||||
|
'md5': '367d480eb3ef54a9cd7a4b4d69c4b32d',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'VO10720912',
|
||||||
|
'display_id': 'vo10720912',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '아는 형님 404회 예고편 | 10월 14일(토) 저녁 8시 50분 방송!',
|
||||||
|
'description': 'md5:2743bb1079ceb85bb00060f2ad8f0280',
|
||||||
|
'duration': 148.0,
|
||||||
|
'release_date': '20231014',
|
||||||
|
'age_limit': 15,
|
||||||
|
'thumbnail': 'https://fs.jtbc.co.kr//joydata/CP00000001/prog/enter/jtbcbros/img/20231006_230023_802_1.jpg',
|
||||||
|
'series': '아는 형님',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
|
if display_id.startswith('vo'):
|
||||||
|
video_id = display_id.upper()
|
||||||
|
else:
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
video_id = self._search_regex(r'data-vod="(VO\d+)"', webpage, 'vod id')
|
||||||
|
|
||||||
|
playback_data = self._download_json(
|
||||||
|
f'https://api.jtbc.co.kr/vod/{video_id}', video_id, note='Downloading VOD playback data')
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
for sub in traverse_obj(playback_data, ('tracks', lambda _, v: v['file'])):
|
||||||
|
subtitles.setdefault(sub.get('label', 'und'), []).append({'url': sub['file']})
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for stream_url in traverse_obj(playback_data, ('sources', 'HLS', ..., 'file', {url_or_none})):
|
||||||
|
stream_url = re.sub(r'/playlist(?:_pd\d+)?\.m3u8', '/index.m3u8', stream_url)
|
||||||
|
formats.extend(self._extract_m3u8_formats(stream_url, video_id, fatal=False))
|
||||||
|
|
||||||
|
metadata = self._download_json(
|
||||||
|
'https://now-api.jtbc.co.kr/v1/vod/detail', video_id,
|
||||||
|
note='Downloading mobile details', fatal=False, query={'vodFileId': video_id})
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
**traverse_obj(metadata, ('vodDetail', {
|
||||||
|
'title': 'vodTitleView',
|
||||||
|
'series': 'programTitle',
|
||||||
|
'age_limit': ('watchAge', {int_or_none}),
|
||||||
|
'release_date': ('broadcastDate', {lambda x: re.match(r'\d{8}', x.replace('.', ''))}, 0),
|
||||||
|
'description': 'episodeContents',
|
||||||
|
'thumbnail': ('imgFileUrl', {url_or_none}),
|
||||||
|
})),
|
||||||
|
'duration': parse_duration(playback_data.get('playTime')),
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class JTBCProgramIE(InfoExtractor):
|
||||||
|
IE_NAME = 'JTBC:program'
|
||||||
|
_VALID_URL = r'https?://(?:vod\.jtbc\.co\.kr/program|tv\.jtbc\.co\.kr/replay)/(?P<id>pr\d+)/(?:replay|pm\d+)/?(?:$|[?#])'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://tv.jtbc.co.kr/replay/pr10010392/pm10032710',
|
||||||
|
'info_dict': {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': 'pr10010392',
|
||||||
|
},
|
||||||
|
'playlist_count': 398,
|
||||||
|
}, {
|
||||||
|
'url': 'https://vod.jtbc.co.kr/program/pr10011491/replay',
|
||||||
|
'info_dict': {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': 'pr10011491',
|
||||||
|
},
|
||||||
|
'playlist_count': 59,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
program_id = self._match_id(url)
|
||||||
|
|
||||||
|
vod_list = self._download_json(
|
||||||
|
'https://now-api.jtbc.co.kr/v1/vodClip/programHome/programReplayVodList', program_id,
|
||||||
|
note='Downloading program replay list', query={
|
||||||
|
'programId': program_id,
|
||||||
|
'rowCount': '10000',
|
||||||
|
})
|
||||||
|
|
||||||
|
entries = [self.url_result(f'https://vod.jtbc.co.kr/player/program/{video_id}', JTBCIE, video_id)
|
||||||
|
for video_id in traverse_obj(vod_list, ('programReplayVodList', ..., 'episodeId'))]
|
||||||
|
return self.playlist_result(entries, program_id)
|
||||||
@@ -3,7 +3,7 @@ from ..utils import update_url
|
|||||||
|
|
||||||
|
|
||||||
class KommunetvIE(InfoExtractor):
|
class KommunetvIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://(\w+).kommunetv.no/archive/(?P<id>\w+)'
|
_VALID_URL = r'https://\w+\.kommunetv\.no/archive/(?P<id>\w+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'https://oslo.kommunetv.no/archive/921',
|
'url': 'https://oslo.kommunetv.no/archive/921',
|
||||||
'md5': '5f102be308ee759be1e12b63d5da4bbc',
|
'md5': '5f102be308ee759be1e12b63d5da4bbc',
|
||||||
|
|||||||
@@ -208,9 +208,9 @@ class LA7PodcastIE(LA7PodcastEpisodeIE): # XXX: Do not subclass from concrete I
|
|||||||
'url': 'https://www.la7.it/propagandalive/podcast',
|
'url': 'https://www.la7.it/propagandalive/podcast',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'propagandalive',
|
'id': 'propagandalive',
|
||||||
'title': "Propaganda Live",
|
'title': 'Propaganda Live',
|
||||||
},
|
},
|
||||||
'playlist_count_min': 10,
|
'playlist_mincount': 10,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|||||||
73
yt_dlp/extractor/laxarxames.py
Normal file
73
yt_dlp/extractor/laxarxames.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
import json
|
||||||
|
|
||||||
|
from .brightcove import BrightcoveNewIE
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import ExtractorError
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
|
class LaXarxaMesIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?laxarxames\.cat/(?:[^/?#]+/)*?(player|movie-details)/(?P<id>\d+)'
|
||||||
|
_NETRC_MACHINE = 'laxarxames'
|
||||||
|
_TOKEN = None
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.laxarxames.cat/player/3459421',
|
||||||
|
'md5': '0966f46c34275934c19af78f3df6e2bc',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '6339612436112',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Resum | UA Horta — UD Viladecans',
|
||||||
|
'timestamp': 1697905186,
|
||||||
|
'thumbnail': r're:https?://.*\.jpg',
|
||||||
|
'description': '',
|
||||||
|
'upload_date': '20231021',
|
||||||
|
'duration': 129.44,
|
||||||
|
'tags': ['ott', 'esports', '23-24', ' futbol', ' futbol-partits', 'elit', 'resum'],
|
||||||
|
'uploader_id': '5779379807001',
|
||||||
|
},
|
||||||
|
'skip': 'Requires login',
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _perform_login(self, username, password):
|
||||||
|
if self._TOKEN:
|
||||||
|
return
|
||||||
|
|
||||||
|
login = self._download_json(
|
||||||
|
'https://api.laxarxames.cat/Authorization/SignIn', None, note='Logging in', headers={
|
||||||
|
'X-Tenantorigin': 'https://laxarxames.cat',
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
}, data=json.dumps({
|
||||||
|
'Username': username,
|
||||||
|
'Password': password,
|
||||||
|
'Device': {
|
||||||
|
'PlatformCode': 'WEB',
|
||||||
|
'Name': 'Mac OS ()',
|
||||||
|
},
|
||||||
|
}).encode(), expected_status=401)
|
||||||
|
|
||||||
|
self._TOKEN = traverse_obj(login, ('AuthorizationToken', 'Token', {str}))
|
||||||
|
if not self._TOKEN:
|
||||||
|
raise ExtractorError('Login failed', expected=True)
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
if not self._TOKEN:
|
||||||
|
self.raise_login_required()
|
||||||
|
|
||||||
|
media_play_info = self._download_json(
|
||||||
|
'https://api.laxarxames.cat/Media/GetMediaPlayInfo', video_id,
|
||||||
|
data=json.dumps({
|
||||||
|
'MediaId': int(video_id),
|
||||||
|
'StreamType': 'MAIN'
|
||||||
|
}).encode(), headers={
|
||||||
|
'Authorization': f'Bearer {self._TOKEN}',
|
||||||
|
'X-Tenantorigin': 'https://laxarxames.cat',
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
})
|
||||||
|
|
||||||
|
if not traverse_obj(media_play_info, ('ContentUrl', {str})):
|
||||||
|
self.raise_no_formats('No video found', expected=True)
|
||||||
|
|
||||||
|
return self.url_result(
|
||||||
|
f'https://players.brightcove.net/5779379807001/default_default/index.html?videoId={media_play_info["ContentUrl"]}',
|
||||||
|
BrightcoveNewIE, video_id, media_play_info.get('Title'))
|
||||||
@@ -13,7 +13,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class MainStreamingIE(InfoExtractor):
|
class MainStreamingIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:webtools-?)?(?P<host>[A-Za-z0-9-]*\.msvdn.net)/(?:embed|amp_embed|content)/(?P<id>\w+)'
|
_VALID_URL = r'https?://(?:webtools-?)?(?P<host>[A-Za-z0-9-]*\.msvdn\.net)/(?:embed|amp_embed|content)/(?P<id>\w+)'
|
||||||
_EMBED_REGEX = [rf'<iframe[^>]+?src=["\']?(?P<url>{_VALID_URL})["\']?']
|
_EMBED_REGEX = [rf'<iframe[^>]+?src=["\']?(?P<url>{_VALID_URL})["\']?']
|
||||||
IE_DESC = 'MainStreaming Player'
|
IE_DESC = 'MainStreaming Player'
|
||||||
|
|
||||||
|
|||||||
89
yt_dlp/extractor/mbn.py
Normal file
89
yt_dlp/extractor/mbn.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
unified_strdate,
|
||||||
|
url_or_none,
|
||||||
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
|
class MBNIE(InfoExtractor):
|
||||||
|
IE_DESC = 'mbn.co.kr (매일방송)'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?mbn\.co\.kr/vod/programContents/preview(?:list)?/\d+/\d+/(?P<id>\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://mbn.co.kr/vod/programContents/previewlist/861/5433/1276155',
|
||||||
|
'md5': '85e1694e5b247c04d1386b7e3c90fd76',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1276155',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '결국 사로잡힌 권유리, 그녀를 목숨 걸고 구하려는 정일우!',
|
||||||
|
'duration': 3891,
|
||||||
|
'release_date': '20210703',
|
||||||
|
'thumbnail': 'http://img.vod.mbn.co.kr/mbnvod2img/861/2021/07/03/20210703230811_20_861_1276155_360_7_0.jpg',
|
||||||
|
'series': '보쌈 - 운명을 훔치다',
|
||||||
|
'episode': 'Episode 19',
|
||||||
|
'episode_number': 19,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.mbn.co.kr/vod/programContents/previewlist/835/5294/1084744',
|
||||||
|
'md5': 'fc65d3aac85e85e0b5056f4ef99cde4a',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1084744',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '김정은♥최원영, 제자리를 찾은 위험한 부부! "결혼은 투쟁이면서, 어려운 방식이야.."',
|
||||||
|
'duration': 93,
|
||||||
|
'release_date': '20201124',
|
||||||
|
'thumbnail': 'http://img.vod.mbn.co.kr/mbnvod2img/835/2020/11/25/20201125000221_21_835_1084744_360_7_0.jpg',
|
||||||
|
'series': '나의 위험한 아내',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.mbn.co.kr/vod/programContents/preview/952/6088/1054797?next=1',
|
||||||
|
'md5': 'c711103c72aeac8323a5cf1751f10097',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1054797',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '[2차 티저] MBN 주말 미니시리즈 <완벽한 결혼의 정석> l 그녀에게 주어진 두 번째 인생',
|
||||||
|
'duration': 65,
|
||||||
|
'release_date': '20231028',
|
||||||
|
'thumbnail': 'http://img.vod.mbn.co.kr/vod2/952/2023/09/11/20230911130223_22_952_1054797_1080_7.jpg',
|
||||||
|
'series': '완벽한 결혼의 정석',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
content_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, content_id)
|
||||||
|
|
||||||
|
content_cls_cd = self._search_regex(
|
||||||
|
r'"\?content_cls_cd=(\d+)&', webpage, 'content cls cd', fatal=False) or '20'
|
||||||
|
media_info = self._download_json(
|
||||||
|
'https://www.mbn.co.kr/player/mbnVodPlayer_2020.mbn', content_id,
|
||||||
|
note='Fetching playback data', query={
|
||||||
|
'content_cls_cd': content_cls_cd,
|
||||||
|
'content_id': content_id,
|
||||||
|
'relay_type': '1',
|
||||||
|
})
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for stream_url in traverse_obj(media_info, ('movie_list', ..., 'url', {url_or_none})):
|
||||||
|
stream_url = re.sub(r'/(?:chunk|play)list(?:_pd\d+)?\.m3u8', '/manifest.m3u8', stream_url)
|
||||||
|
final_url = url_or_none(self._download_webpage(
|
||||||
|
f'https://www.mbn.co.kr/player/mbnStreamAuth_new_vod.mbn?vod_url={stream_url}',
|
||||||
|
content_id, note='Fetching authenticated m3u8 url'))
|
||||||
|
|
||||||
|
formats.extend(self._extract_m3u8_formats(final_url, content_id, fatal=False))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': content_id,
|
||||||
|
**traverse_obj(media_info, {
|
||||||
|
'title': ('movie_title', {str}),
|
||||||
|
'duration': ('play_sec', {int_or_none}),
|
||||||
|
'release_date': ('bcast_date', {lambda x: x.replace('.', '')}, {unified_strdate}),
|
||||||
|
'thumbnail': ('movie_start_Img', {url_or_none}),
|
||||||
|
'series': ('prog_nm', {str}),
|
||||||
|
'episode_number': ('ad_contentnumber', {int_or_none}),
|
||||||
|
}),
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
@@ -2,7 +2,7 @@ from .common import InfoExtractor
|
|||||||
|
|
||||||
|
|
||||||
class MediaiteIE(InfoExtractor):
|
class MediaiteIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?mediaite.com(?!/category)(?:/[\w-]+){2}'
|
_VALID_URL = r'https?://(?:www\.)?mediaite\.com(?!/category)(?:/[\w-]+){2}'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.mediaite.com/sports/bill-burr-roasts-nfl-for-promoting-black-lives-matter-while-scheduling-more-games-after-all-the-sht-they-know-about-cte/',
|
'url': 'https://www.mediaite.com/sports/bill-burr-roasts-nfl-for-promoting-black-lives-matter-while-scheduling-more-games-after-all-the-sht-they-know-about-cte/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from ..utils import int_or_none, traverse_obj
|
|||||||
|
|
||||||
|
|
||||||
class MochaVideoIE(InfoExtractor):
|
class MochaVideoIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://video.mocha.com.vn/(?P<video_slug>[\w-]+)'
|
_VALID_URL = r'https?://video\.mocha\.com\.vn/(?P<video_slug>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://video.mocha.com.vn/chuyen-meo-gia-su-tu-thong-diep-cuoc-song-v18694039',
|
'url': 'http://video.mocha.com.vn/chuyen-meo-gia-su-tu-thong-diep-cuoc-song-v18694039',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import re
|
import re
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
@@ -137,7 +138,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
|||||||
mediagen_doc = self._download_xml(
|
mediagen_doc = self._download_xml(
|
||||||
mediagen_url, video_id, 'Downloading video urls', fatal=False)
|
mediagen_url, video_id, 'Downloading video urls', fatal=False)
|
||||||
|
|
||||||
if mediagen_doc is False:
|
if not isinstance(mediagen_doc, xml.etree.ElementTree.Element):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
item = mediagen_doc.find('./video/item')
|
item = mediagen_doc.find('./video/item')
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import base64
|
import base64
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .theplatform import ThePlatformIE, default_ns
|
from .theplatform import ThePlatformIE, default_ns
|
||||||
@@ -803,8 +804,10 @@ class NBCStationsIE(InfoExtractor):
|
|||||||
smil = self._download_xml(
|
smil = self._download_xml(
|
||||||
f'https://link.theplatform.com/s/{pdk_acct}/{player_id}', video_id,
|
f'https://link.theplatform.com/s/{pdk_acct}/{player_id}', video_id,
|
||||||
note='Downloading SMIL data', query=query, fatal=is_live)
|
note='Downloading SMIL data', query=query, fatal=is_live)
|
||||||
subtitles = self._parse_smil_subtitles(smil, default_ns) if smil else {}
|
if not isinstance(smil, xml.etree.ElementTree.Element):
|
||||||
for video in smil.findall(self._xpath_ns('.//video', default_ns)) if smil else []:
|
smil = None
|
||||||
|
subtitles = self._parse_smil_subtitles(smil, default_ns) if smil is not None else {}
|
||||||
|
for video in smil.findall(self._xpath_ns('.//video', default_ns)) if smil is not None else []:
|
||||||
info['duration'] = float_or_none(remove_end(video.get('dur'), 'ms'), 1000)
|
info['duration'] = float_or_none(remove_end(video.get('dur'), 'ms'), 1000)
|
||||||
video_src_url = video.get('src')
|
video_src_url = video.get('src')
|
||||||
ext = mimetype2ext(video.get('type'), default=determine_ext(video_src_url))
|
ext = mimetype2ext(video.get('type'), default=determine_ext(video_src_url))
|
||||||
|
|||||||
@@ -142,6 +142,9 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
|
|||||||
'subtitles': {'lyrics': [{'ext': 'lrc'}]},
|
'subtitles': {'lyrics': [{'ext': 'lrc'}]},
|
||||||
"duration": 256,
|
"duration": 256,
|
||||||
'thumbnail': r're:^http.*\.jpg',
|
'thumbnail': r're:^http.*\.jpg',
|
||||||
|
'album': '偶像练习生 表演曲目合集',
|
||||||
|
'average_rating': int,
|
||||||
|
'album_artist': '偶像练习生',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'note': 'No lyrics.',
|
'note': 'No lyrics.',
|
||||||
@@ -155,6 +158,9 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
|
|||||||
'timestamp': 1202745600,
|
'timestamp': 1202745600,
|
||||||
'duration': 263,
|
'duration': 263,
|
||||||
'thumbnail': r're:^http.*\.jpg',
|
'thumbnail': r're:^http.*\.jpg',
|
||||||
|
'album': 'Piano Solos Vol. 2',
|
||||||
|
'album_artist': 'Dustin O\'Halloran',
|
||||||
|
'average_rating': int,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://y.music.163.com/m/song?app_version=8.8.45&id=95670&uct2=sKnvS4+0YStsWkqsPhFijw%3D%3D&dlt=0846',
|
'url': 'https://y.music.163.com/m/song?app_version=8.8.45&id=95670&uct2=sKnvS4+0YStsWkqsPhFijw%3D%3D&dlt=0846',
|
||||||
@@ -171,6 +177,9 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
|
|||||||
'duration': 268,
|
'duration': 268,
|
||||||
'alt_title': '伴唱:现代人乐队 合唱:总政歌舞团',
|
'alt_title': '伴唱:现代人乐队 合唱:总政歌舞团',
|
||||||
'thumbnail': r're:^http.*\.jpg',
|
'thumbnail': r're:^http.*\.jpg',
|
||||||
|
'average_rating': int,
|
||||||
|
'album': '红色摇滚',
|
||||||
|
'album_artist': '侯牧人',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://music.163.com/#/song?id=32102397',
|
'url': 'http://music.163.com/#/song?id=32102397',
|
||||||
@@ -186,6 +195,9 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
|
|||||||
'subtitles': {'lyrics': [{'ext': 'lrc'}]},
|
'subtitles': {'lyrics': [{'ext': 'lrc'}]},
|
||||||
'duration': 199,
|
'duration': 199,
|
||||||
'thumbnail': r're:^http.*\.jpg',
|
'thumbnail': r're:^http.*\.jpg',
|
||||||
|
'album': 'Bad Blood',
|
||||||
|
'average_rating': int,
|
||||||
|
'album_artist': 'Taylor Swift',
|
||||||
},
|
},
|
||||||
'skip': 'Blocked outside Mainland China',
|
'skip': 'Blocked outside Mainland China',
|
||||||
}, {
|
}, {
|
||||||
@@ -203,6 +215,9 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
|
|||||||
'duration': 229,
|
'duration': 229,
|
||||||
'alt_title': '说出愿望吧(Genie)',
|
'alt_title': '说出愿望吧(Genie)',
|
||||||
'thumbnail': r're:^http.*\.jpg',
|
'thumbnail': r're:^http.*\.jpg',
|
||||||
|
'average_rating': int,
|
||||||
|
'album': 'Oh!',
|
||||||
|
'album_artist': '少女时代',
|
||||||
},
|
},
|
||||||
'skip': 'Blocked outside Mainland China',
|
'skip': 'Blocked outside Mainland China',
|
||||||
}]
|
}]
|
||||||
@@ -253,12 +268,15 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
|
|||||||
'formats': formats,
|
'formats': formats,
|
||||||
'alt_title': '/'.join(traverse_obj(info, (('transNames', 'alias'), ...))) or None,
|
'alt_title': '/'.join(traverse_obj(info, (('transNames', 'alias'), ...))) or None,
|
||||||
'creator': ' / '.join(traverse_obj(info, ('artists', ..., 'name'))) or None,
|
'creator': ' / '.join(traverse_obj(info, ('artists', ..., 'name'))) or None,
|
||||||
|
'album_artist': ' / '.join(traverse_obj(info, ('album', 'artists', ..., 'name'))) or None,
|
||||||
**lyric_data,
|
**lyric_data,
|
||||||
**traverse_obj(info, {
|
**traverse_obj(info, {
|
||||||
'title': ('name', {str}),
|
'title': ('name', {str}),
|
||||||
'timestamp': ('album', 'publishTime', {self.kilo_or_none}),
|
'timestamp': ('album', 'publishTime', {self.kilo_or_none}),
|
||||||
'thumbnail': ('album', 'picUrl', {url_or_none}),
|
'thumbnail': ('album', 'picUrl', {url_or_none}),
|
||||||
'duration': ('duration', {self.kilo_or_none}),
|
'duration': ('duration', {self.kilo_or_none}),
|
||||||
|
'album': ('album', 'name', {str}),
|
||||||
|
'average_rating': ('score', {int_or_none}),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -247,7 +247,7 @@ class NFLArticleIE(NFLBaseIE):
|
|||||||
|
|
||||||
class NFLPlusReplayIE(NFLBaseIE):
|
class NFLPlusReplayIE(NFLBaseIE):
|
||||||
IE_NAME = 'nfl.com:plus:replay'
|
IE_NAME = 'nfl.com:plus:replay'
|
||||||
_VALID_URL = r'https?://(?:www\.)?nfl.com/plus/games/(?P<slug>[\w-]+)(?:/(?P<id>\d+))?'
|
_VALID_URL = r'https?://(?:www\.)?nfl\.com/plus/games/(?P<slug>[\w-]+)(?:/(?P<id>\d+))?'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.nfl.com/plus/games/giants-at-vikings-2022-post-1/1572108',
|
'url': 'https://www.nfl.com/plus/games/giants-at-vikings-2022-post-1/1572108',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -342,7 +342,7 @@ class NFLPlusReplayIE(NFLBaseIE):
|
|||||||
|
|
||||||
class NFLPlusEpisodeIE(NFLBaseIE):
|
class NFLPlusEpisodeIE(NFLBaseIE):
|
||||||
IE_NAME = 'nfl.com:plus:episode'
|
IE_NAME = 'nfl.com:plus:episode'
|
||||||
_VALID_URL = r'https?://(?:www\.)?nfl.com/plus/episodes/(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://(?:www\.)?nfl\.com/plus/episodes/(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'note': 'Subscription required',
|
'note': 'Subscription required',
|
||||||
'url': 'https://www.nfl.com/plus/episodes/kurt-s-qb-insider-conference-championships',
|
'url': 'https://www.nfl.com/plus/episodes/kurt-s-qb-insider-conference-championships',
|
||||||
|
|||||||
@@ -3,6 +3,8 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
clean_html,
|
||||||
|
get_element_by_class,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
@@ -45,42 +47,54 @@ class NhkBaseIE(InfoExtractor):
|
|||||||
self.cache.store('nhk', 'api_info', api_info)
|
self.cache.store('nhk', 'api_info', api_info)
|
||||||
return api_info
|
return api_info
|
||||||
|
|
||||||
def _extract_formats_and_subtitles(self, vod_id):
|
def _extract_stream_info(self, vod_id):
|
||||||
for refresh in (False, True):
|
for refresh in (False, True):
|
||||||
api_info = self._get_api_info(refresh)
|
api_info = self._get_api_info(refresh)
|
||||||
if not api_info:
|
if not api_info:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
api_url = api_info.pop('url')
|
api_url = api_info.pop('url')
|
||||||
stream_url = traverse_obj(
|
meta = traverse_obj(
|
||||||
self._download_json(
|
self._download_json(
|
||||||
api_url, vod_id, 'Downloading stream url info', fatal=False, query={
|
api_url, vod_id, 'Downloading stream url info', fatal=False, query={
|
||||||
**api_info,
|
**api_info,
|
||||||
'type': 'json',
|
'type': 'json',
|
||||||
'optional_id': vod_id,
|
'optional_id': vod_id,
|
||||||
'active_flg': 1,
|
'active_flg': 1,
|
||||||
}),
|
}), ('meta', 0))
|
||||||
('meta', 0, 'movie_url', ('mb_auto', 'auto_sp', 'auto_pc'), {url_or_none}), get_all=False)
|
stream_url = traverse_obj(
|
||||||
if stream_url:
|
meta, ('movie_url', ('mb_auto', 'auto_sp', 'auto_pc'), {url_or_none}), get_all=False)
|
||||||
return self._extract_m3u8_formats_and_subtitles(stream_url, vod_id)
|
|
||||||
|
|
||||||
|
if stream_url:
|
||||||
|
formats, subtitles = self._extract_m3u8_formats_and_subtitles(stream_url, vod_id)
|
||||||
|
return {
|
||||||
|
**traverse_obj(meta, {
|
||||||
|
'duration': ('duration', {int_or_none}),
|
||||||
|
'timestamp': ('publication_date', {unified_timestamp}),
|
||||||
|
'release_timestamp': ('insert_date', {unified_timestamp}),
|
||||||
|
'modified_timestamp': ('update_date', {unified_timestamp}),
|
||||||
|
}),
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
raise ExtractorError('Unable to extract stream url')
|
raise ExtractorError('Unable to extract stream url')
|
||||||
|
|
||||||
def _extract_episode_info(self, url, episode=None):
|
def _extract_episode_info(self, url, episode=None):
|
||||||
fetch_episode = episode is None
|
fetch_episode = episode is None
|
||||||
lang, m_type, episode_id = NhkVodIE._match_valid_url(url).groups()
|
lang, m_type, episode_id = NhkVodIE._match_valid_url(url).group('lang', 'type', 'id')
|
||||||
if len(episode_id) == 7:
|
is_video = m_type == 'video'
|
||||||
|
|
||||||
|
if is_video:
|
||||||
episode_id = episode_id[:4] + '-' + episode_id[4:]
|
episode_id = episode_id[:4] + '-' + episode_id[4:]
|
||||||
|
|
||||||
is_video = m_type == 'video'
|
|
||||||
if fetch_episode:
|
if fetch_episode:
|
||||||
episode = self._call_api(
|
episode = self._call_api(
|
||||||
episode_id, lang, is_video, True, episode_id[:4] == '9999')[0]
|
episode_id, lang, is_video, True, episode_id[:4] == '9999')[0]
|
||||||
title = episode.get('sub_title_clean') or episode['sub_title']
|
|
||||||
|
|
||||||
def get_clean_field(key):
|
def get_clean_field(key):
|
||||||
return episode.get(key + '_clean') or episode.get(key)
|
return clean_html(episode.get(key + '_clean') or episode.get(key))
|
||||||
|
|
||||||
|
title = get_clean_field('sub_title')
|
||||||
series = get_clean_field('title')
|
series = get_clean_field('title')
|
||||||
|
|
||||||
thumbnails = []
|
thumbnails = []
|
||||||
@@ -95,22 +109,30 @@ class NhkBaseIE(InfoExtractor):
|
|||||||
'url': 'https://www3.nhk.or.jp' + img_path,
|
'url': 'https://www3.nhk.or.jp' + img_path,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
episode_name = title
|
||||||
|
if series and title:
|
||||||
|
title = f'{series} - {title}'
|
||||||
|
elif series and not title:
|
||||||
|
title = series
|
||||||
|
series = None
|
||||||
|
episode_name = None
|
||||||
|
else: # title, no series
|
||||||
|
episode_name = None
|
||||||
|
|
||||||
info = {
|
info = {
|
||||||
'id': episode_id + '-' + lang,
|
'id': episode_id + '-' + lang,
|
||||||
'title': '%s - %s' % (series, title) if series and title else title,
|
'title': title,
|
||||||
'description': get_clean_field('description'),
|
'description': get_clean_field('description'),
|
||||||
'thumbnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
'series': series,
|
'series': series,
|
||||||
'episode': title,
|
'episode': episode_name,
|
||||||
}
|
}
|
||||||
|
|
||||||
if is_video:
|
if is_video:
|
||||||
vod_id = episode['vod_id']
|
vod_id = episode['vod_id']
|
||||||
formats, subs = self._extract_formats_and_subtitles(vod_id)
|
|
||||||
|
|
||||||
info.update({
|
info.update({
|
||||||
|
**self._extract_stream_info(vod_id),
|
||||||
'id': vod_id,
|
'id': vod_id,
|
||||||
'formats': formats,
|
|
||||||
'subtitles': subs,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@@ -133,47 +155,61 @@ class NhkBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
class NhkVodIE(NhkBaseIE):
|
class NhkVodIE(NhkBaseIE):
|
||||||
# the 7-character IDs can have alphabetic chars too: assume [a-z] rather than just [a-f], eg
|
# the 7-character IDs can have alphabetic chars too: assume [a-z] rather than just [a-f], eg
|
||||||
_VALID_URL = r'%s%s(?P<id>[0-9a-z]{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
|
_VALID_URL = [rf'{NhkBaseIE._BASE_URL_REGEX}/(?P<type>video)/(?P<id>[0-9a-z]+)',
|
||||||
|
rf'{NhkBaseIE._BASE_URL_REGEX}/(?P<type>audio)/(?P<id>[^/?#]+?-\d{{8}}-[0-9a-z]+)']
|
||||||
# Content available only for a limited period of time. Visit
|
# Content available only for a limited period of time. Visit
|
||||||
# https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
|
# https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2061601/',
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2049126/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'yd8322ch',
|
'id': 'nw_vod_v_en_2049_126_20230413233000_01_1681398302',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'description': 'md5:109c8b05d67a62d0592f2b445d2cd898',
|
'title': 'Japan Railway Journal - The Tohoku Shinkansen: Full Speed Ahead',
|
||||||
'title': 'GRAND SUMO Highlights - [Recap] May Tournament Day 1 (Opening Day)',
|
'description': 'md5:49f7c5b206e03868a2fdf0d0814b92f6',
|
||||||
'upload_date': '20230514',
|
'thumbnail': 'md5:51bcef4a21936e7fea1ff4e06353f463',
|
||||||
'timestamp': 1684083791,
|
'episode': 'The Tohoku Shinkansen: Full Speed Ahead',
|
||||||
'series': 'GRAND SUMO Highlights',
|
'series': 'Japan Railway Journal',
|
||||||
'episode': '[Recap] May Tournament Day 1 (Opening Day)',
|
'modified_timestamp': 1694243656,
|
||||||
'thumbnail': 'https://mz-edge.stream.co.jp/thumbs/aid/t1684084443/4028649.jpg?w=1920&h=1080',
|
'timestamp': 1681428600,
|
||||||
|
'release_timestamp': 1693883728,
|
||||||
|
'duration': 1679,
|
||||||
|
'upload_date': '20230413',
|
||||||
|
'modified_date': '20230909',
|
||||||
|
'release_date': '20230905',
|
||||||
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# video clip
|
# video clip
|
||||||
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/',
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/',
|
||||||
'md5': '7a90abcfe610ec22a6bfe15bd46b30ca',
|
'md5': '153c3016dfd252ba09726588149cf0e7',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'a95j5iza',
|
'id': 'lpZXIwaDE6_Z-976CPsFdxyICyWUzlT5',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': "Dining with the Chef - Chef Saito's Family recipe: MENCHI-KATSU",
|
'title': 'Dining with the Chef - Chef Saito\'s Family recipe: MENCHI-KATSU',
|
||||||
'description': 'md5:5aee4a9f9d81c26281862382103b0ea5',
|
'description': 'md5:5aee4a9f9d81c26281862382103b0ea5',
|
||||||
'timestamp': 1565965194,
|
'thumbnail': 'md5:d6a4d9b6e9be90aaadda0bcce89631ed',
|
||||||
'upload_date': '20190816',
|
|
||||||
'thumbnail': 'https://mz-edge.stream.co.jp/thumbs/aid/t1567086278/3715195.jpg?w=1920&h=1080',
|
|
||||||
'series': 'Dining with the Chef',
|
'series': 'Dining with the Chef',
|
||||||
'episode': 'Chef Saito\'s Family recipe: MENCHI-KATSU',
|
'episode': 'Chef Saito\'s Family recipe: MENCHI-KATSU',
|
||||||
|
'duration': 148,
|
||||||
|
'upload_date': '20190816',
|
||||||
|
'release_date': '20230902',
|
||||||
|
'release_timestamp': 1693619292,
|
||||||
|
'modified_timestamp': 1694168033,
|
||||||
|
'modified_date': '20230908',
|
||||||
|
'timestamp': 1565997540,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# audio clip
|
# radio
|
||||||
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/r_inventions-20201104-1/',
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/livinginjapan-20231001-1/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'r_inventions-20201104-1-en',
|
'id': 'livinginjapan-20231001-1-en',
|
||||||
'ext': 'm4a',
|
'ext': 'm4a',
|
||||||
'title': "Japan's Top Inventions - Miniature Video Cameras",
|
'title': 'Living in Japan - Tips for Travelers to Japan / Ramen Vending Machines',
|
||||||
'description': 'md5:07ea722bdbbb4936fdd360b6a480c25b',
|
'series': 'Living in Japan',
|
||||||
|
'description': 'md5:0a0e2077d8f07a03071e990a6f51bfab',
|
||||||
|
'thumbnail': 'md5:960622fb6e06054a4a1a0c97ea752545',
|
||||||
|
'episode': 'Tips for Travelers to Japan / Ramen Vending Machines'
|
||||||
},
|
},
|
||||||
'skip': '404 Not Found',
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/',
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@@ -199,6 +235,36 @@ class NhkVodIE(NhkBaseIE):
|
|||||||
'timestamp': 1623722008,
|
'timestamp': 1623722008,
|
||||||
},
|
},
|
||||||
'skip': '404 Not Found',
|
'skip': '404 Not Found',
|
||||||
|
}, {
|
||||||
|
# japanese-language, longer id than english
|
||||||
|
'url': 'https://www3.nhk.or.jp/nhkworld/ja/ondemand/video/0020271111/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'nw_ja_v_jvod_ohayou_20231008',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'おはよう日本(7時台) - 10月8日放送',
|
||||||
|
'series': 'おはよう日本(7時台)',
|
||||||
|
'episode': '10月8日放送',
|
||||||
|
'thumbnail': 'md5:d733b1c8e965ab68fb02b2d347d0e9b4',
|
||||||
|
'description': 'md5:9c1d6cbeadb827b955b20e99ab920ff0',
|
||||||
|
},
|
||||||
|
'skip': 'expires 2023-10-15',
|
||||||
|
}, {
|
||||||
|
# a one-off (single-episode series). title from the api is just '<p></p>'
|
||||||
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/3004952/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'nw_vod_v_en_3004_952_20230723091000_01_1690074552',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Barakan Discovers AMAMI OSHIMA: Isson\'s Treasure Island',
|
||||||
|
'description': 'md5:5db620c46a0698451cc59add8816b797',
|
||||||
|
'thumbnail': 'md5:67d9ff28009ba379bfa85ad1aaa0e2bd',
|
||||||
|
'release_date': '20230905',
|
||||||
|
'timestamp': 1690103400,
|
||||||
|
'duration': 2939,
|
||||||
|
'release_timestamp': 1693898699,
|
||||||
|
'modified_timestamp': 1698057495,
|
||||||
|
'modified_date': '20231023',
|
||||||
|
'upload_date': '20230723',
|
||||||
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -206,20 +272,22 @@ class NhkVodIE(NhkBaseIE):
|
|||||||
|
|
||||||
|
|
||||||
class NhkVodProgramIE(NhkBaseIE):
|
class NhkVodProgramIE(NhkBaseIE):
|
||||||
_VALID_URL = r'%s/program%s(?P<id>[0-9a-z]+)(?:.+?\btype=(?P<episode_type>clip|(?:radio|tv)Episode))?' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
|
_VALID_URL = rf'{NhkBaseIE._BASE_URL_REGEX}/program{NhkBaseIE._TYPE_REGEX}(?P<id>\w+)(?:.+?\btype=(?P<episode_type>clip|(?:radio|tv)Episode))?'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# video program episodes
|
# video program episodes
|
||||||
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/sumo',
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/sumo',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'sumo',
|
'id': 'sumo',
|
||||||
'title': 'GRAND SUMO Highlights',
|
'title': 'GRAND SUMO Highlights',
|
||||||
|
'description': 'md5:fc20d02dc6ce85e4b72e0273aa52fdbf',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 12,
|
'playlist_mincount': 0,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway',
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'japanrailway',
|
'id': 'japanrailway',
|
||||||
'title': 'Japan Railway Journal',
|
'title': 'Japan Railway Journal',
|
||||||
|
'description': 'md5:ea39d93af7d05835baadf10d1aae0e3f',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 12,
|
'playlist_mincount': 12,
|
||||||
}, {
|
}, {
|
||||||
@@ -228,6 +296,7 @@ class NhkVodProgramIE(NhkBaseIE):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'japanrailway',
|
'id': 'japanrailway',
|
||||||
'title': 'Japan Railway Journal',
|
'title': 'Japan Railway Journal',
|
||||||
|
'description': 'md5:ea39d93af7d05835baadf10d1aae0e3f',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 5,
|
'playlist_mincount': 5,
|
||||||
}, {
|
}, {
|
||||||
@@ -240,8 +309,7 @@ class NhkVodProgramIE(NhkBaseIE):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
lang, m_type, program_id, episode_type = self._match_valid_url(url).groups()
|
lang, m_type, program_id, episode_type = self._match_valid_url(url).group('lang', 'type', 'id', 'episode_type')
|
||||||
|
|
||||||
episodes = self._call_api(
|
episodes = self._call_api(
|
||||||
program_id, lang, m_type == 'video', False, episode_type == 'clip')
|
program_id, lang, m_type == 'video', False, episode_type == 'clip')
|
||||||
|
|
||||||
@@ -253,11 +321,11 @@ class NhkVodProgramIE(NhkBaseIE):
|
|||||||
entries.append(self._extract_episode_info(
|
entries.append(self._extract_episode_info(
|
||||||
urljoin(url, episode_path), episode))
|
urljoin(url, episode_path), episode))
|
||||||
|
|
||||||
program_title = None
|
html = self._download_webpage(url, program_id)
|
||||||
if entries:
|
program_title = clean_html(get_element_by_class('p-programDetail__title', html))
|
||||||
program_title = entries[0].get('series')
|
program_description = clean_html(get_element_by_class('p-programDetail__text', html))
|
||||||
|
|
||||||
return self.playlist_result(entries, program_id, program_title)
|
return self.playlist_result(entries, program_id, program_title, program_description)
|
||||||
|
|
||||||
|
|
||||||
class NhkForSchoolBangumiIE(InfoExtractor):
|
class NhkForSchoolBangumiIE(InfoExtractor):
|
||||||
@@ -409,6 +477,7 @@ class NhkRadiruIE(InfoExtractor):
|
|||||||
'skip': 'Episode expired on 2023-04-16',
|
'skip': 'Episode expired on 2023-04-16',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'channel': 'NHK-FM',
|
'channel': 'NHK-FM',
|
||||||
|
'uploader': 'NHK-FM',
|
||||||
'description': 'md5:94b08bdeadde81a97df4ec882acce3e9',
|
'description': 'md5:94b08bdeadde81a97df4ec882acce3e9',
|
||||||
'ext': 'm4a',
|
'ext': 'm4a',
|
||||||
'id': '0449_01_3853544',
|
'id': '0449_01_3853544',
|
||||||
@@ -429,6 +498,7 @@ class NhkRadiruIE(InfoExtractor):
|
|||||||
'title': 'ベストオブクラシック',
|
'title': 'ベストオブクラシック',
|
||||||
'description': '世界中の上質な演奏会をじっくり堪能する本格派クラシック番組。',
|
'description': '世界中の上質な演奏会をじっくり堪能する本格派クラシック番組。',
|
||||||
'channel': 'NHK-FM',
|
'channel': 'NHK-FM',
|
||||||
|
'uploader': 'NHK-FM',
|
||||||
'thumbnail': 'https://www.nhk.or.jp/prog/img/458/g458.jpg',
|
'thumbnail': 'https://www.nhk.or.jp/prog/img/458/g458.jpg',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 3,
|
'playlist_mincount': 3,
|
||||||
@@ -442,6 +512,7 @@ class NhkRadiruIE(InfoExtractor):
|
|||||||
'title': '有島武郎「一房のぶどう」',
|
'title': '有島武郎「一房のぶどう」',
|
||||||
'description': '朗読:川野一宇(ラジオ深夜便アンカー)\r\n\r\n(2016年12月8日放送「ラジオ深夜便『アンカー朗読シリーズ』」より)',
|
'description': '朗読:川野一宇(ラジオ深夜便アンカー)\r\n\r\n(2016年12月8日放送「ラジオ深夜便『アンカー朗読シリーズ』」より)',
|
||||||
'channel': 'NHKラジオ第1、NHK-FM',
|
'channel': 'NHKラジオ第1、NHK-FM',
|
||||||
|
'uploader': 'NHKラジオ第1、NHK-FM',
|
||||||
'timestamp': 1635757200,
|
'timestamp': 1635757200,
|
||||||
'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/F300/img/corner/box_109_thumbnail.jpg',
|
'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/F300/img/corner/box_109_thumbnail.jpg',
|
||||||
'release_date': '20161207',
|
'release_date': '20161207',
|
||||||
@@ -457,6 +528,7 @@ class NhkRadiruIE(InfoExtractor):
|
|||||||
'id': 'F261_01_3855109',
|
'id': 'F261_01_3855109',
|
||||||
'ext': 'm4a',
|
'ext': 'm4a',
|
||||||
'channel': 'NHKラジオ第1',
|
'channel': 'NHKラジオ第1',
|
||||||
|
'uploader': 'NHKラジオ第1',
|
||||||
'timestamp': 1681635900,
|
'timestamp': 1681635900,
|
||||||
'release_date': '20230416',
|
'release_date': '20230416',
|
||||||
'series': 'NHKラジオニュース',
|
'series': 'NHKラジオニュース',
|
||||||
@@ -501,6 +573,7 @@ class NhkRadiruIE(InfoExtractor):
|
|||||||
series_meta = traverse_obj(meta, {
|
series_meta = traverse_obj(meta, {
|
||||||
'title': 'program_name',
|
'title': 'program_name',
|
||||||
'channel': 'media_name',
|
'channel': 'media_name',
|
||||||
|
'uploader': 'media_name',
|
||||||
'thumbnail': (('thumbnail_c', 'thumbnail_p'), {url_or_none}),
|
'thumbnail': (('thumbnail_c', 'thumbnail_p'), {url_or_none}),
|
||||||
}, get_all=False)
|
}, get_all=False)
|
||||||
|
|
||||||
@@ -529,6 +602,7 @@ class NhkRadioNewsPageIE(InfoExtractor):
|
|||||||
'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/F261/img/RADIONEWS_640.jpg',
|
'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/F261/img/RADIONEWS_640.jpg',
|
||||||
'description': 'md5:bf2c5b397e44bc7eb26de98d8f15d79d',
|
'description': 'md5:bf2c5b397e44bc7eb26de98d8f15d79d',
|
||||||
'channel': 'NHKラジオ第1',
|
'channel': 'NHKラジオ第1',
|
||||||
|
'uploader': 'NHKラジオ第1',
|
||||||
'title': 'NHKラジオニュース',
|
'title': 'NHKラジオニュース',
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class NovaEmbedIE(InfoExtractor):
|
class NovaEmbedIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://media\.cms\.nova\.cz/embed/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://media(?:tn)?\.cms\.nova\.cz/embed/(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://media.cms.nova.cz/embed/8o0n0r?autoplay=1',
|
'url': 'https://media.cms.nova.cz/embed/8o0n0r?autoplay=1',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -37,6 +37,16 @@ class NovaEmbedIE(InfoExtractor):
|
|||||||
'duration': 114,
|
'duration': 114,
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'},
|
'params': {'skip_download': 'm3u8'},
|
||||||
|
}, {
|
||||||
|
'url': 'https://mediatn.cms.nova.cz/embed/EU5ELEsmOHt?autoplay=1',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'EU5ELEsmOHt',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Haptické křeslo, bionická ruka nebo roboti. Reportérka se podívala na Týden inovací',
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg',
|
||||||
|
'duration': 1780,
|
||||||
|
},
|
||||||
|
'params': {'skip_download': 'm3u8'},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from ..utils import int_or_none, parse_duration, parse_iso8601
|
|||||||
|
|
||||||
|
|
||||||
class NovaPlayIE(InfoExtractor):
|
class NovaPlayIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://play.nova\.bg/video/.*/(?P<id>\d+)'
|
_VALID_URL = r'https://play\.nova\.bg/video/[^?#]+/(?P<id>\d+)'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
'url': 'https://play.nova.bg/video/ochakvaite/season-0/ochakvaite-2022-07-22-sybudi-se-sat/606627',
|
'url': 'https://play.nova.bg/video/ochakvaite/season-0/ochakvaite-2022-07-22-sybudi-se-sat/606627',
|
||||||
|
|||||||
@@ -245,7 +245,7 @@ class NPOIE(InfoExtractor):
|
|||||||
'quality': 'npoplus',
|
'quality': 'npoplus',
|
||||||
'tokenId': player_token,
|
'tokenId': player_token,
|
||||||
'streamType': 'broadcast',
|
'streamType': 'broadcast',
|
||||||
})
|
}, data=b'') # endpoint requires POST
|
||||||
if not streams:
|
if not streams:
|
||||||
continue
|
continue
|
||||||
stream = streams.get('stream')
|
stream = streams.get('stream')
|
||||||
|
|||||||
@@ -1,21 +1,21 @@
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
parse_duration,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class NTVDeIE(InfoExtractor):
|
class NTVDeIE(InfoExtractor):
|
||||||
IE_NAME = 'n-tv.de'
|
IE_NAME = 'n-tv.de'
|
||||||
_VALID_URL = r'https?://(?:www\.)?n-tv\.de/mediathek/videos/[^/?#]+/[^/?#]+-article(?P<id>.+)\.html'
|
_VALID_URL = r'https?://(?:www\.)?n-tv\.de/mediathek/(?:videos|magazine)/[^/?#]+/[^/?#]+-article(?P<id>[^/?#]+)\.html'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.n-tv.de/mediathek/videos/panorama/Schnee-und-Glaette-fuehren-zu-zahlreichen-Unfaellen-und-Staus-article14438086.html',
|
'url': 'http://www.n-tv.de/mediathek/videos/panorama/Schnee-und-Glaette-fuehren-zu-zahlreichen-Unfaellen-und-Staus-article14438086.html',
|
||||||
'md5': '6ef2514d4b1e8e03ca24b49e2f167153',
|
'md5': '6bcf2a6638cb83f45d5561659a1cb498',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '14438086',
|
'id': '14438086',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@@ -23,51 +23,61 @@ class NTVDeIE(InfoExtractor):
|
|||||||
'title': 'Schnee und Glätte führen zu zahlreichen Unfällen und Staus',
|
'title': 'Schnee und Glätte führen zu zahlreichen Unfällen und Staus',
|
||||||
'alt_title': 'Winterchaos auf deutschen Straßen',
|
'alt_title': 'Winterchaos auf deutschen Straßen',
|
||||||
'description': 'Schnee und Glätte sorgen deutschlandweit für einen chaotischen Start in die Woche: Auf den Straßen kommt es zu kilometerlangen Staus und Dutzenden Glätteunfällen. In Düsseldorf und München wirbelt der Schnee zudem den Flugplan durcheinander. Dutzende Flüge landen zu spät, einige fallen ganz aus.',
|
'description': 'Schnee und Glätte sorgen deutschlandweit für einen chaotischen Start in die Woche: Auf den Straßen kommt es zu kilometerlangen Staus und Dutzenden Glätteunfällen. In Düsseldorf und München wirbelt der Schnee zudem den Flugplan durcheinander. Dutzende Flüge landen zu spät, einige fallen ganz aus.',
|
||||||
'duration': 4020,
|
'duration': 67,
|
||||||
'timestamp': 1422892797,
|
'timestamp': 1422892797,
|
||||||
'upload_date': '20150202',
|
'upload_date': '20150202',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.n-tv.de/mediathek/magazine/auslandsreport/Juedische-Siedler-wollten-Rache-die-wollten-nur-toeten-article24523089.html',
|
||||||
|
'md5': 'c5c6014c014ccc3359470e1d34472bfd',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '24523089',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'title': 'Jüdische Siedler "wollten Rache, die wollten nur töten"',
|
||||||
|
'alt_title': 'Israelische Gewalt fern von Gaza',
|
||||||
|
'description': 'Vier Tage nach dem Massaker der Hamas greifen jüdische Siedler das Haus einer palästinensischen Familie im Westjordanland an. Die Überlebenden berichten, sie waren unbewaffnet, die Angreifer seien nur auf "Rache und Töten" aus gewesen. Als die Toten beerdigt werden sollen, eröffnen die Siedler erneut das Feuer.',
|
||||||
|
'duration': 326,
|
||||||
|
'timestamp': 1699688294,
|
||||||
|
'upload_date': '20231111',
|
||||||
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
info = self._parse_json(self._search_regex(
|
info = self._search_json(
|
||||||
r'(?s)ntv\.pageInfo\.article\s*=\s*(\{.*?\});', webpage, 'info'),
|
r'article:', webpage, 'info', video_id, transform_source=js_to_json)
|
||||||
video_id, transform_source=js_to_json)
|
|
||||||
timestamp = int_or_none(info.get('publishedDateAsUnixTimeStamp'))
|
vdata = self._search_json(
|
||||||
vdata = self._parse_json(self._search_regex(
|
r'\$\(\s*"#playerwrapper"\s*\)\s*\.data\(\s*"player",',
|
||||||
r'(?s)\$\(\s*"\#player"\s*\)\s*\.data\(\s*"player",\s*(\{.*?\})\);',
|
webpage, 'player data', video_id,
|
||||||
webpage, 'player data'), video_id,
|
transform_source=lambda s: js_to_json(re.sub(r'ivw:[^},]+', '', s)))['setup']['source']
|
||||||
transform_source=lambda s: js_to_json(re.sub(r'advertising:\s*{[^}]+},', '', s)))
|
|
||||||
duration = parse_duration(vdata.get('duration'))
|
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
if vdata.get('video'):
|
if vdata.get('progressive'):
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': 'flash',
|
'format_id': 'http',
|
||||||
'url': 'rtmp://fms.n-tv.de/%s' % vdata['video'],
|
'url': vdata['progressive'],
|
||||||
})
|
})
|
||||||
if vdata.get('videoMp4'):
|
if vdata.get('hls'):
|
||||||
formats.append({
|
|
||||||
'format_id': 'mobile',
|
|
||||||
'url': compat_urlparse.urljoin('http://video.n-tv.de', vdata['videoMp4']),
|
|
||||||
'tbr': 400, # estimation
|
|
||||||
})
|
|
||||||
if vdata.get('videoM3u8'):
|
|
||||||
m3u8_url = compat_urlparse.urljoin('http://video.n-tv.de', vdata['videoM3u8'])
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
formats.extend(self._extract_m3u8_formats(
|
||||||
m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native',
|
vdata['hls'], video_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||||
quality=1, m3u8_id='hls', fatal=False))
|
if vdata.get('dash'):
|
||||||
|
formats.extend(self._extract_mpd_formats(vdata['dash'], video_id, fatal=False, mpd_id='dash'))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': info['headline'],
|
**traverse_obj(info, {
|
||||||
'description': info.get('intro'),
|
'title': 'headline',
|
||||||
'alt_title': info.get('kicker'),
|
'description': 'intro',
|
||||||
'timestamp': timestamp,
|
'alt_title': 'kicker',
|
||||||
'thumbnail': vdata.get('html5VideoPoster'),
|
'timestamp': ('publishedDateAsUnixTimeStamp', {int_or_none}),
|
||||||
'duration': duration,
|
}),
|
||||||
|
**traverse_obj(vdata, {
|
||||||
|
'thumbnail': ('poster', {url_or_none}),
|
||||||
|
'duration': ('length', {int_or_none}),
|
||||||
|
}),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ from ..utils import (
|
|||||||
class NubilesPornIE(InfoExtractor):
|
class NubilesPornIE(InfoExtractor):
|
||||||
_NETRC_MACHINE = 'nubiles-porn'
|
_NETRC_MACHINE = 'nubiles-porn'
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
https://members.nubiles-porn.com/video/watch/(?P<id>\d+)
|
https://members\.nubiles-porn\.com/video/watch/(?P<id>\d+)
|
||||||
(?:/(?P<display_id>[\w\-]+-s(?P<season>\d+)e(?P<episode>\d+)))?
|
(?:/(?P<display_id>[\w\-]+-s(?P<season>\d+)e(?P<episode>\d+)))?
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from ..utils import traverse_obj
|
|||||||
|
|
||||||
|
|
||||||
class OfTVIE(InfoExtractor):
|
class OfTVIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?of.tv/video/(?P<id>\w+)'
|
_VALID_URL = r'https?://(?:www\.)?of\.tv/video/(?P<id>\w+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://of.tv/video/627d7d95b353db0001dadd1a',
|
'url': 'https://of.tv/video/627d7d95b353db0001dadd1a',
|
||||||
'md5': 'cb9cd5db3bb9ee0d32bfd7e373d6ef0a',
|
'md5': 'cb9cd5db3bb9ee0d32bfd7e373d6ef0a',
|
||||||
@@ -34,7 +34,7 @@ class OfTVIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class OfTVPlaylistIE(InfoExtractor):
|
class OfTVPlaylistIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?of.tv/creators/(?P<id>[a-zA-Z0-9-]+)/.?'
|
_VALID_URL = r'https?://(?:www\.)?of\.tv/creators/(?P<id>[a-zA-Z0-9-]+)/?(?:$|[?#])'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://of.tv/creators/this-is-fire/',
|
'url': 'https://of.tv/creators/this-is-fire/',
|
||||||
'playlist_count': 8,
|
'playlist_count': 8,
|
||||||
|
|||||||
@@ -1,87 +1,167 @@
|
|||||||
|
import functools
|
||||||
import re
|
import re
|
||||||
|
import uuid
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
js_to_json,
|
OnDemandPagedList,
|
||||||
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
|
join_nonempty,
|
||||||
|
parse_age_limit,
|
||||||
|
parse_qs,
|
||||||
|
unified_strdate,
|
||||||
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class OnDemandKoreaIE(InfoExtractor):
|
class OnDemandKoreaIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?ondemandkorea\.com/(?P<id>[^/]+)\.html'
|
_VALID_URL = r'https?://(?:www\.)?ondemandkorea\.com/(?:en/)?player/vod/[a-z0-9-]+\?(?:[^#]+&)?contentId=(?P<id>\d+)'
|
||||||
_GEO_COUNTRIES = ['US', 'CA']
|
_GEO_COUNTRIES = ['US', 'CA']
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.ondemandkorea.com/ask-us-anything-e351.html',
|
'url': 'https://www.ondemandkorea.com/player/vod/ask-us-anything?contentId=686471',
|
||||||
|
'md5': 'e2ff77255d989e3135bde0c5889fbce8',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'ask-us-anything-e351',
|
'id': '686471',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Ask Us Anything : Jung Sung-ho, Park Seul-gi, Kim Bo-min, Yang Seung-won - 09/24/2022',
|
'title': 'Ask Us Anything: Jung Sung-ho, Park Seul-gi, Kim Bo-min, Yang Seung-won',
|
||||||
'description': 'A talk show/game show with a school theme where celebrity guests appear as “transfer students.”',
|
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'duration': 5486.955,
|
||||||
|
'release_date': '20220924',
|
||||||
|
'series': 'Ask Us Anything',
|
||||||
|
'series_id': 11790,
|
||||||
|
'episode_number': 351,
|
||||||
|
'episode': 'Jung Sung-ho, Park Seul-gi, Kim Bo-min, Yang Seung-won',
|
||||||
},
|
},
|
||||||
'params': {
|
|
||||||
'skip_download': 'm3u8 download'
|
|
||||||
}
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.ondemandkorea.com/work-later-drink-now-e1.html',
|
'url': 'https://www.ondemandkorea.com/player/vod/breakup-probation-a-week?contentId=1595796',
|
||||||
|
'md5': '57266c720006962be7ff415b24775caa',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'work-later-drink-now-e1',
|
'id': '1595796',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Work Later, Drink Now : E01',
|
'title': 'Breakup Probation, A Week: E08',
|
||||||
'description': 'Work Later, Drink First follows three women who find solace in a glass of liquor at the end of the day. So-hee, who gets comfort from a cup of soju af',
|
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)',
|
||||||
'thumbnail': r're:^https?://.*\.png$',
|
'duration': 1586.0,
|
||||||
'subtitles': {
|
'release_date': '20231001',
|
||||||
'English': 'mincount:1',
|
'series': 'Breakup Probation, A Week',
|
||||||
},
|
'series_id': 22912,
|
||||||
|
'episode_number': 8,
|
||||||
|
'episode': 'E08',
|
||||||
},
|
},
|
||||||
'params': {
|
}, {
|
||||||
'skip_download': 'm3u8 download'
|
'url': 'https://www.ondemandkorea.com/player/vod/the-outlaws?contentId=369531',
|
||||||
}
|
'md5': 'fa5523b87aa1f6d74fc622a97f2b47cd',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '369531',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'release_date': '20220519',
|
||||||
|
'duration': 7267.0,
|
||||||
|
'title': 'The Outlaws: Main Movie',
|
||||||
|
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)',
|
||||||
|
'age_limit': 18,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.ondemandkorea.com/en/player/vod/capture-the-moment-how-is-that-possible?contentId=1605006',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id, fatal=False)
|
|
||||||
|
|
||||||
if not webpage:
|
data = self._download_json(
|
||||||
# Page sometimes returns captcha page with HTTP 403
|
f'https://odkmedia.io/odx/api/v3/playback/{video_id}/', video_id, fatal=False,
|
||||||
raise ExtractorError(
|
headers={'service-name': 'odk'}, query={'did': str(uuid.uuid4())}, expected_status=(403, 404))
|
||||||
'Unable to access page. You may have been blocked.',
|
if not traverse_obj(data, ('result', {dict})):
|
||||||
expected=True)
|
msg = traverse_obj(data, ('messages', '__default'), 'title', expected_type=str)
|
||||||
|
raise ExtractorError(msg or 'Got empty response from playback API', expected=True)
|
||||||
|
|
||||||
if 'msg_block_01.png' in webpage:
|
data = data['result']
|
||||||
self.raise_geo_restricted(
|
|
||||||
msg='This content is not available in your region',
|
|
||||||
countries=self._GEO_COUNTRIES)
|
|
||||||
|
|
||||||
if 'This video is only available to ODK PLUS members.' in webpage:
|
def try_geo_bypass(url):
|
||||||
raise ExtractorError(
|
return traverse_obj(url, ({parse_qs}, 'stream_url', 0, {url_or_none})) or url
|
||||||
'This video is only available to ODK PLUS members.',
|
|
||||||
expected=True)
|
|
||||||
|
|
||||||
if 'ODK PREMIUM Members Only' in webpage:
|
def try_upgrade_quality(url):
|
||||||
raise ExtractorError(
|
mod_url = re.sub(r'_720(p?)\.m3u8', r'_1080\1.m3u8', url)
|
||||||
'This video is only available to ODK PREMIUM members.',
|
return mod_url if mod_url != url and self._request_webpage(
|
||||||
expected=True)
|
HEADRequest(mod_url), video_id, note='Checking for higher quality format',
|
||||||
|
errnote='No higher quality format found', fatal=False) else url
|
||||||
|
|
||||||
title = self._search_regex(
|
formats = []
|
||||||
r'class=["\']episode_title["\'][^>]*>([^<]+)',
|
for m3u8_url in traverse_obj(data, (('sources', 'manifest'), ..., 'url', {url_or_none}, {try_geo_bypass})):
|
||||||
webpage, 'episode_title', fatal=False) or self._og_search_title(webpage)
|
formats.extend(self._extract_m3u8_formats(try_upgrade_quality(m3u8_url), video_id, fatal=False))
|
||||||
|
|
||||||
jw_config = self._parse_json(
|
subtitles = {}
|
||||||
self._search_regex((
|
for track in traverse_obj(data, ('text_tracks', lambda _, v: url_or_none(v['url']))):
|
||||||
r'(?P<options>{\s*[\'"]tracks[\'"].*?})[)\];]+$',
|
subtitles.setdefault(track.get('language', 'und'), []).append({
|
||||||
r'playlist\s*=\s*\[(?P<options>.+)];?$',
|
'url': track['url'],
|
||||||
r'odkPlayer\.init.*?(?P<options>{[^;]+}).*?;',
|
'ext': track.get('codec'),
|
||||||
), webpage, 'jw config', flags=re.MULTILINE | re.DOTALL, group='options'),
|
'name': track.get('label'),
|
||||||
video_id, transform_source=js_to_json)
|
})
|
||||||
info = self._parse_jwplayer_data(
|
|
||||||
jw_config, video_id, require_title=False, m3u8_id='hls',
|
|
||||||
base_url=url)
|
|
||||||
|
|
||||||
info.update({
|
def if_series(key=None):
|
||||||
'title': title,
|
return lambda obj: obj[key] if key and obj['kind'] == 'series' else None
|
||||||
'description': self._og_search_description(webpage),
|
|
||||||
'thumbnail': self._og_search_thumbnail(webpage)
|
return {
|
||||||
})
|
'id': video_id,
|
||||||
return info
|
'title': join_nonempty(
|
||||||
|
('episode', 'program', 'title'),
|
||||||
|
('episode', 'title'), from_dict=data, delim=': '),
|
||||||
|
**traverse_obj(data, {
|
||||||
|
'thumbnail': ('episode', 'images', 'thumbnail', {url_or_none}),
|
||||||
|
'release_date': ('episode', 'release_date', {lambda x: x.replace('-', '')}, {unified_strdate}),
|
||||||
|
'duration': ('duration', {functools.partial(float_or_none, scale=1000)}),
|
||||||
|
'age_limit': ('age_rating', 'name', {lambda x: x.replace('R', '')}, {parse_age_limit}),
|
||||||
|
'series': ('episode', {if_series(key='program')}, 'title'),
|
||||||
|
'series_id': ('episode', {if_series(key='program')}, 'id'),
|
||||||
|
'episode': ('episode', {if_series(key='title')}),
|
||||||
|
'episode_number': ('episode', {if_series(key='number')}, {int_or_none}),
|
||||||
|
}, get_all=False),
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class OnDemandKoreaProgramIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?ondemandkorea\.com/(?:en/)?player/vod/(?P<id>[a-z0-9-]+)(?:$|#)'
|
||||||
|
_GEO_COUNTRIES = ['US', 'CA']
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.ondemandkorea.com/player/vod/uskn-news',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'uskn-news',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 755,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.ondemandkorea.com/en/player/vod/the-land',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'the-land',
|
||||||
|
},
|
||||||
|
'playlist_count': 52,
|
||||||
|
}]
|
||||||
|
|
||||||
|
_PAGE_SIZE = 100
|
||||||
|
|
||||||
|
def _fetch_page(self, display_id, page):
|
||||||
|
page += 1
|
||||||
|
page_data = self._download_json(
|
||||||
|
f'https://odkmedia.io/odx/api/v3/program/{display_id}/episodes/', display_id,
|
||||||
|
headers={'service-name': 'odk'}, query={
|
||||||
|
'page': page,
|
||||||
|
'page_size': self._PAGE_SIZE,
|
||||||
|
}, note=f'Downloading page {page}', expected_status=404)
|
||||||
|
for episode in traverse_obj(page_data, ('result', 'results', ...)):
|
||||||
|
yield self.url_result(
|
||||||
|
f'https://www.ondemandkorea.com/player/vod/{display_id}?contentId={episode["id"]}',
|
||||||
|
ie=OnDemandKoreaIE, video_title=episode.get('title'))
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
|
entries = OnDemandPagedList(functools.partial(
|
||||||
|
self._fetch_page, display_id), self._PAGE_SIZE)
|
||||||
|
|
||||||
|
return self.playlist_result(entries, display_id)
|
||||||
|
|||||||
@@ -4,15 +4,16 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..networking import HEADRequest
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
InAdvancePagedList,
|
||||||
clean_html,
|
clean_html,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
InAdvancePagedList,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
|
make_archive_id,
|
||||||
|
mimetype2ext,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
remove_end,
|
remove_end,
|
||||||
make_archive_id,
|
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
strip_jsonp,
|
strip_jsonp,
|
||||||
try_call,
|
try_call,
|
||||||
@@ -21,6 +22,7 @@ from ..utils import (
|
|||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class ORFTVthekIE(InfoExtractor):
|
class ORFTVthekIE(InfoExtractor):
|
||||||
@@ -334,6 +336,45 @@ class ORFRadioIE(InfoExtractor):
|
|||||||
self._entries(data, station or station2), show_id, data.get('title'), clean_html(data.get('subtitle')))
|
self._entries(data, station or station2), show_id, data.get('title'), clean_html(data.get('subtitle')))
|
||||||
|
|
||||||
|
|
||||||
|
class ORFPodcastIE(InfoExtractor):
|
||||||
|
IE_NAME = 'orf:podcast'
|
||||||
|
_STATION_RE = '|'.join(map(re.escape, (
|
||||||
|
'bgl', 'fm4', 'ktn', 'noe', 'oe1', 'oe3',
|
||||||
|
'ooe', 'sbg', 'stm', 'tir', 'tv', 'vbg', 'wie')))
|
||||||
|
_VALID_URL = rf'https?://sound\.orf\.at/podcast/(?P<station>{_STATION_RE})/(?P<show>[\w-]+)/(?P<id>[\w-]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://sound.orf.at/podcast/oe3/fruehstueck-bei-mir/nicolas-stockhammer-15102023',
|
||||||
|
'md5': '526a5700e03d271a1505386a8721ab9b',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'nicolas-stockhammer-15102023',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'Nicolas Stockhammer (15.10.2023)',
|
||||||
|
'duration': 3396.0,
|
||||||
|
'series': 'Frühstück bei mir',
|
||||||
|
},
|
||||||
|
'skip': 'ORF podcasts are only available for a limited time'
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
station, show, show_id = self._match_valid_url(url).group('station', 'show', 'id')
|
||||||
|
data = self._download_json(
|
||||||
|
f'https://audioapi.orf.at/radiothek/api/2.0/podcast/{station}/{show}/{show_id}', show_id)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': show_id,
|
||||||
|
'ext': 'mp3',
|
||||||
|
'vcodec': 'none',
|
||||||
|
**traverse_obj(data, ('payload', {
|
||||||
|
'url': ('enclosures', 0, 'url'),
|
||||||
|
'ext': ('enclosures', 0, 'type', {mimetype2ext}),
|
||||||
|
'title': 'title',
|
||||||
|
'description': ('description', {clean_html}),
|
||||||
|
'duration': ('duration', {functools.partial(float_or_none, scale=1000)}),
|
||||||
|
'series': ('podcast', 'title'),
|
||||||
|
})),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class ORFIPTVIE(InfoExtractor):
|
class ORFIPTVIE(InfoExtractor):
|
||||||
IE_NAME = 'orf:iptv'
|
IE_NAME = 'orf:iptv'
|
||||||
IE_DESC = 'iptv.ORF.at'
|
IE_DESC = 'iptv.ORF.at'
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ from ..utils import (
|
|||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class PeriscopeBaseIE(InfoExtractor):
|
class PeriscopeBaseIE(InfoExtractor):
|
||||||
@@ -20,22 +21,25 @@ class PeriscopeBaseIE(InfoExtractor):
|
|||||||
title = broadcast.get('status') or 'Periscope Broadcast'
|
title = broadcast.get('status') or 'Periscope Broadcast'
|
||||||
uploader = broadcast.get('user_display_name') or broadcast.get('username')
|
uploader = broadcast.get('user_display_name') or broadcast.get('username')
|
||||||
title = '%s - %s' % (uploader, title) if uploader else title
|
title = '%s - %s' % (uploader, title) if uploader else title
|
||||||
is_live = broadcast.get('state').lower() == 'running'
|
|
||||||
|
|
||||||
thumbnails = [{
|
thumbnails = [{
|
||||||
'url': broadcast[image],
|
'url': broadcast[image],
|
||||||
} for image in ('image_url', 'image_url_small') if broadcast.get(image)]
|
} for image in ('image_url', 'image_url_medium', 'image_url_small') if broadcast.get(image)]
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': broadcast.get('id') or video_id,
|
'id': broadcast.get('id') or video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'timestamp': parse_iso8601(broadcast.get('created_at')),
|
'timestamp': parse_iso8601(broadcast.get('created_at')) or int_or_none(
|
||||||
|
broadcast.get('created_at_ms'), scale=1000),
|
||||||
|
'release_timestamp': int_or_none(broadcast.get('scheduled_start_ms'), scale=1000),
|
||||||
'uploader': uploader,
|
'uploader': uploader,
|
||||||
'uploader_id': broadcast.get('user_id') or broadcast.get('username'),
|
'uploader_id': broadcast.get('user_id') or broadcast.get('username'),
|
||||||
'thumbnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
'view_count': int_or_none(broadcast.get('total_watched')),
|
'view_count': int_or_none(broadcast.get('total_watched')),
|
||||||
'tags': broadcast.get('tags'),
|
'tags': broadcast.get('tags'),
|
||||||
'is_live': is_live,
|
'live_status': {
|
||||||
|
'running': 'is_live',
|
||||||
|
'not_started': 'is_upcoming',
|
||||||
|
}.get(traverse_obj(broadcast, ('state', {str.lower}))) or 'was_live'
|
||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|||||||
@@ -262,14 +262,14 @@ class PolskieRadioAuditionIE(InfoExtractor):
|
|||||||
query=query, headers={'x-api-key': '9bf6c5a2-a7d0-4980-9ed7-a3f7291f2a81'})
|
query=query, headers={'x-api-key': '9bf6c5a2-a7d0-4980-9ed7-a3f7291f2a81'})
|
||||||
|
|
||||||
def _entries(self, playlist_id, has_episodes, has_articles):
|
def _entries(self, playlist_id, has_episodes, has_articles):
|
||||||
for i in itertools.count(1) if has_episodes else []:
|
for i in itertools.count(0) if has_episodes else []:
|
||||||
page = self._call_lp3(
|
page = self._call_lp3(
|
||||||
'AudioArticle/GetListByCategoryId', {
|
'AudioArticle/GetListByCategoryId', {
|
||||||
'categoryId': playlist_id,
|
'categoryId': playlist_id,
|
||||||
'PageSize': 10,
|
'PageSize': 10,
|
||||||
'skip': i,
|
'skip': i,
|
||||||
'format': 400,
|
'format': 400,
|
||||||
}, playlist_id, f'Downloading episode list page {i}')
|
}, playlist_id, f'Downloading episode list page {i + 1}')
|
||||||
if not traverse_obj(page, 'data'):
|
if not traverse_obj(page, 'data'):
|
||||||
break
|
break
|
||||||
for episode in page['data']:
|
for episode in page['data']:
|
||||||
@@ -281,14 +281,14 @@ class PolskieRadioAuditionIE(InfoExtractor):
|
|||||||
'timestamp': parse_iso8601(episode.get('datePublic')),
|
'timestamp': parse_iso8601(episode.get('datePublic')),
|
||||||
}
|
}
|
||||||
|
|
||||||
for i in itertools.count(1) if has_articles else []:
|
for i in itertools.count(0) if has_articles else []:
|
||||||
page = self._call_lp3(
|
page = self._call_lp3(
|
||||||
'Article/GetListByCategoryId', {
|
'Article/GetListByCategoryId', {
|
||||||
'categoryId': playlist_id,
|
'categoryId': playlist_id,
|
||||||
'PageSize': 9,
|
'PageSize': 9,
|
||||||
'skip': i,
|
'skip': i,
|
||||||
'format': 400,
|
'format': 400,
|
||||||
}, playlist_id, f'Downloading article list page {i}')
|
}, playlist_id, f'Downloading article list page {i + 1}')
|
||||||
if not traverse_obj(page, 'data'):
|
if not traverse_obj(page, 'data'):
|
||||||
break
|
break
|
||||||
for article in page['data']:
|
for article in page['data']:
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ from ..utils import (
|
|||||||
|
|
||||||
class QDanceIE(InfoExtractor):
|
class QDanceIE(InfoExtractor):
|
||||||
_NETRC_MACHINE = 'qdance'
|
_NETRC_MACHINE = 'qdance'
|
||||||
_VALID_URL = r'https?://(?:www\.)?q-dance\.com/network/(?:library|live)/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?q-dance\.com/network/(?:library|live)/(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'note': 'vod',
|
'note': 'vod',
|
||||||
'url': 'https://www.q-dance.com/network/library/146542138',
|
'url': 'https://www.q-dance.com/network/library/146542138',
|
||||||
@@ -53,6 +53,27 @@ class QDanceIE(InfoExtractor):
|
|||||||
'channel_id': 'qdancenetwork.video_149170353',
|
'channel_id': 'qdancenetwork.video_149170353',
|
||||||
},
|
},
|
||||||
'skip': 'Completed livestream',
|
'skip': 'Completed livestream',
|
||||||
|
}, {
|
||||||
|
'note': 'vod with alphanumeric id',
|
||||||
|
'url': 'https://www.q-dance.com/network/library/WhDleSIWSfeT3Q9ObBKBeA',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'WhDleSIWSfeT3Q9ObBKBeA',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Aftershock I Defqon.1 Weekend Festival 2023 I Sunday I BLUE',
|
||||||
|
'display_id': 'naam-i-defqon-1-weekend-festival-2023-i-dag-i-podium',
|
||||||
|
'description': 'Relive Defqon.1 Path of the Warrior with Aftershock at the BLUE 🔥',
|
||||||
|
'series': 'Defqon.1',
|
||||||
|
'series_id': '31840378',
|
||||||
|
'season': 'Defqon.1 Weekend Festival 2023',
|
||||||
|
'season_id': '141735599',
|
||||||
|
'duration': 3507,
|
||||||
|
'availability': 'premium_only',
|
||||||
|
'thumbnail': 'https://images.q-dance.network/1698158361-230625-135716-defqon-1-aftershock.jpg',
|
||||||
|
},
|
||||||
|
'params': {'skip_download': 'm3u8'},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.q-dance.com/network/library/-uRFKXwmRZGVnve7av9uqA',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_access_token = None
|
_access_token = None
|
||||||
|
|||||||
@@ -154,7 +154,7 @@ class RadikoBaseIE(InfoExtractor):
|
|||||||
sf['preference'] = -100
|
sf['preference'] = -100
|
||||||
sf['format_note'] = 'not preferred'
|
sf['format_note'] = 'not preferred'
|
||||||
if not is_onair and timefree_int == 1 and time_to_skip:
|
if not is_onair and timefree_int == 1 and time_to_skip:
|
||||||
sf['downloader_options'] = {'ffmpeg_args': ['-ss', time_to_skip]}
|
sf['downloader_options'] = {'ffmpeg_args': ['-ss', str(time_to_skip)]}
|
||||||
formats.extend(subformats)
|
formats.extend(subformats)
|
||||||
|
|
||||||
return formats
|
return formats
|
||||||
|
|||||||
150
yt_dlp/extractor/radiocomercial.py
Normal file
150
yt_dlp/extractor/radiocomercial.py
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
import itertools
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
extract_attributes,
|
||||||
|
get_element_by_class,
|
||||||
|
get_element_html_by_class,
|
||||||
|
get_element_text_and_html_by_tag,
|
||||||
|
get_elements_html_by_class,
|
||||||
|
int_or_none,
|
||||||
|
join_nonempty,
|
||||||
|
try_call,
|
||||||
|
unified_strdate,
|
||||||
|
update_url,
|
||||||
|
urljoin
|
||||||
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
|
class RadioComercialIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?radiocomercial\.pt/podcasts/[^/?#]+/t?(?P<season>\d+)/(?P<id>[\w-]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://radiocomercial.pt/podcasts/o-homem-que-mordeu-o-cao/t6/taylor-swift-entranhando-se-que-nem-uma-espada-no-ventre-dos-fas#page-content-wrapper',
|
||||||
|
'md5': '5f4fe8e485b29d2e8fd495605bc2c7e4',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'taylor-swift-entranhando-se-que-nem-uma-espada-no-ventre-dos-fas',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'Taylor Swift entranhando-se que nem uma espada no ventre dos fãs.',
|
||||||
|
'release_date': '20231025',
|
||||||
|
'thumbnail': r're:https://radiocomercial.pt/upload/[^.]+.jpg',
|
||||||
|
'season': 6
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://radiocomercial.pt/podcasts/convenca-me-num-minuto/t3/convenca-me-num-minuto-que-os-lobisomens-existem',
|
||||||
|
'md5': '47e96c273aef96a8eb160cd6cf46d782',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'convenca-me-num-minuto-que-os-lobisomens-existem',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'Convença-me num minuto que os lobisomens existem',
|
||||||
|
'release_date': '20231026',
|
||||||
|
'thumbnail': r're:https://radiocomercial.pt/upload/[^.]+.jpg',
|
||||||
|
'season': 3
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://radiocomercial.pt/podcasts/inacreditavel-by-ines-castel-branco/t2/o-desastre-de-aviao',
|
||||||
|
'md5': '69be64255420fec23b7259955d771e54',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'o-desastre-de-aviao',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'O desastre de avião',
|
||||||
|
'description': 'md5:8a82beeb372641614772baab7246245f',
|
||||||
|
'release_date': '20231101',
|
||||||
|
'thumbnail': r're:https://radiocomercial.pt/upload/[^.]+.jpg',
|
||||||
|
'season': 2
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# inconsistant md5
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://radiocomercial.pt/podcasts/tnt-todos-no-top/2023/t-n-t-29-de-outubro',
|
||||||
|
'md5': '91d32d4d4b1407272068b102730fc9fa',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 't-n-t-29-de-outubro',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'T.N.T 29 de outubro',
|
||||||
|
'release_date': '20231029',
|
||||||
|
'thumbnail': r're:https://radiocomercial.pt/upload/[^.]+.jpg',
|
||||||
|
'season': 2023
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id, season = self._match_valid_url(url).group('id', 'season')
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': self._html_extract_title(webpage),
|
||||||
|
'description': self._og_search_description(webpage, default=None),
|
||||||
|
'release_date': unified_strdate(get_element_by_class(
|
||||||
|
'date', get_element_html_by_class('descriptions', webpage) or '')),
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
|
'season': int_or_none(season),
|
||||||
|
'url': extract_attributes(get_element_html_by_class('audiofile', webpage) or '').get('href'),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class RadioComercialPlaylistIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?radiocomercial\.pt/podcasts/(?P<id>[\w-]+)(?:/t?(?P<season>\d+))?/?(?:$|[?#])'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://radiocomercial.pt/podcasts/convenca-me-num-minuto/t3',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'convenca-me-num-minuto_t3',
|
||||||
|
'title': 'Convença-me num Minuto - Temporada 3',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 32
|
||||||
|
}, {
|
||||||
|
'url': 'https://radiocomercial.pt/podcasts/o-homem-que-mordeu-o-cao',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'o-homem-que-mordeu-o-cao',
|
||||||
|
'title': 'O Homem Que Mordeu o Cão',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 19
|
||||||
|
}, {
|
||||||
|
'url': 'https://radiocomercial.pt/podcasts/as-minhas-coisas-favoritas',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'as-minhas-coisas-favoritas',
|
||||||
|
'title': 'As Minhas Coisas Favoritas',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 131
|
||||||
|
}, {
|
||||||
|
'url': 'https://radiocomercial.pt/podcasts/tnt-todos-no-top/t2023',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'tnt-todos-no-top_t2023',
|
||||||
|
'title': 'TNT - Todos No Top - Temporada 2023',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 39
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _entries(self, url, playlist_id):
|
||||||
|
for page in itertools.count(1):
|
||||||
|
try:
|
||||||
|
webpage = self._download_webpage(
|
||||||
|
f'{url}/{page}', playlist_id, f'Downloading page {page}')
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, HTTPError) and e.cause.status == 404:
|
||||||
|
break
|
||||||
|
raise
|
||||||
|
|
||||||
|
episodes = get_elements_html_by_class('tm-ouvir-podcast', webpage)
|
||||||
|
if not episodes:
|
||||||
|
break
|
||||||
|
for url_path in traverse_obj(episodes, (..., {extract_attributes}, 'href')):
|
||||||
|
episode_url = urljoin(url, url_path)
|
||||||
|
if RadioComercialIE.suitable(episode_url):
|
||||||
|
yield episode_url
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
podcast, season = self._match_valid_url(url).group('id', 'season')
|
||||||
|
playlist_id = join_nonempty(podcast, season, delim='_t')
|
||||||
|
url = update_url(url, query=None, fragment=None)
|
||||||
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
|
name = try_call(lambda: get_element_text_and_html_by_tag('h1', webpage)[0])
|
||||||
|
title = name if name == season else join_nonempty(name, season, delim=' - Temporada ')
|
||||||
|
|
||||||
|
return self.playlist_from_matches(
|
||||||
|
self._entries(url, playlist_id), playlist_id, title, ie=RadioComercialIE)
|
||||||
@@ -39,7 +39,7 @@ class RedTubeIE(InfoExtractor):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
'http://www.redtube.com/%s' % video_id, video_id)
|
f'https://www.redtube.com/{video_id}', video_id)
|
||||||
|
|
||||||
ERRORS = (
|
ERRORS = (
|
||||||
(('video-deleted-info', '>This video has been removed'), 'has been removed'),
|
(('video-deleted-info', '>This video has been removed'), 'has been removed'),
|
||||||
|
|||||||
200
yt_dlp/extractor/sbscokr.py
Normal file
200
yt_dlp/extractor/sbscokr.py
Normal file
@@ -0,0 +1,200 @@
|
|||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
clean_html,
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
parse_resolution,
|
||||||
|
url_or_none,
|
||||||
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
|
class SBSCoKrIE(InfoExtractor):
|
||||||
|
IE_NAME = 'sbs.co.kr'
|
||||||
|
_VALID_URL = [r'https?://allvod\.sbs\.co\.kr/allvod/vod(?:Package)?EndPage\.do\?(?:[^#]+&)?mdaId=(?P<id>\d+)',
|
||||||
|
r'https?://programs\.sbs\.co\.kr/(?:enter|drama|culture|sports|plus|mtv|kth)/[a-z0-9]+/(?:vod|clip|movie)/\d+/(?P<id>(?:OC)?\d+)']
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://programs.sbs.co.kr/enter/dongsang2/clip/52007/OC467706746?div=main_pop_clip',
|
||||||
|
'md5': 'c3f6d45e1fb5682039d94cda23c36f19',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'OC467706746',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '‘아슬아슬’ 박군♥한영의 새 집 인테리어 대첩♨',
|
||||||
|
'description': 'md5:6a71eb1979ee4a94ea380310068ccab4',
|
||||||
|
'thumbnail': 'https://img2.sbs.co.kr/ops_clip_img/2023/10/10/34c4c0f9-a9a5-4ff6-a92e-9bb4b5f6fa65915w1280.jpg',
|
||||||
|
'release_timestamp': 1696889400,
|
||||||
|
'release_date': '20231009',
|
||||||
|
'view_count': int,
|
||||||
|
'like_count': int,
|
||||||
|
'duration': 238,
|
||||||
|
'age_limit': 15,
|
||||||
|
'series': '동상이몽2_너는 내 운명',
|
||||||
|
'episode': '레이디제인, ‘혼전임신설’ ‘3개월’ 앞당긴 결혼식 비하인드 스토리 최초 공개!',
|
||||||
|
'episode_number': 311,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://allvod.sbs.co.kr/allvod/vodPackageEndPage.do?mdaId=22000489324&combiId=PA000000284&packageType=A&isFreeYN=',
|
||||||
|
'md5': 'bf46b2e89fda7ae7de01f5743cef7236',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '22000489324',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '[다시보기] 트롤리 15회',
|
||||||
|
'description': 'md5:0e55d74bef1ac55c61ae90c73ac485f4',
|
||||||
|
'thumbnail': 'https://img2.sbs.co.kr/img/sbs_cms/WE/2023/02/14/arC1676333794938-1280-720.jpg',
|
||||||
|
'release_timestamp': 1676325600,
|
||||||
|
'release_date': '20230213',
|
||||||
|
'view_count': int,
|
||||||
|
'like_count': int,
|
||||||
|
'duration': 5931,
|
||||||
|
'age_limit': 15,
|
||||||
|
'series': '트롤리',
|
||||||
|
'episode': '이거 다 거짓말이야',
|
||||||
|
'episode_number': 15,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://programs.sbs.co.kr/enter/fourman/vod/69625/22000508948',
|
||||||
|
'md5': '41e8ae4cc6c8424f4e4d76661a4becbf',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '22000508948',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '[다시보기] 신발 벗고 돌싱포맨 104회',
|
||||||
|
'description': 'md5:c6a247383c4dd661e4b956bf4d3b586e',
|
||||||
|
'thumbnail': 'https://img2.sbs.co.kr/img/sbs_cms/WE/2023/08/30/2vb1693355446261-1280-720.jpg',
|
||||||
|
'release_timestamp': 1693342800,
|
||||||
|
'release_date': '20230829',
|
||||||
|
'view_count': int,
|
||||||
|
'like_count': int,
|
||||||
|
'duration': 7036,
|
||||||
|
'age_limit': 15,
|
||||||
|
'series': '신발 벗고 돌싱포맨',
|
||||||
|
'episode': '돌싱포맨 저격수들 등장!',
|
||||||
|
'episode_number': 104,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _call_api(self, video_id, rscuse=''):
|
||||||
|
return self._download_json(
|
||||||
|
f'https://api.play.sbs.co.kr/1.0/sbs_vodall/{video_id}', video_id,
|
||||||
|
note=f'Downloading m3u8 information {rscuse}',
|
||||||
|
query={
|
||||||
|
'platform': 'pcweb',
|
||||||
|
'protocol': 'download',
|
||||||
|
'absolute_show': 'Y',
|
||||||
|
'service': 'program',
|
||||||
|
'ssl': 'Y',
|
||||||
|
'rscuse': rscuse,
|
||||||
|
})
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
details = self._call_api(video_id)
|
||||||
|
source = traverse_obj(details, ('vod', 'source', 'mediasource', {dict})) or {}
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for stream in traverse_obj(details, (
|
||||||
|
'vod', 'source', 'mediasourcelist', lambda _, v: v['mediaurl'] or v['mediarscuse']
|
||||||
|
), default=[source]):
|
||||||
|
if not stream.get('mediaurl'):
|
||||||
|
new_source = traverse_obj(
|
||||||
|
self._call_api(video_id, rscuse=stream['mediarscuse']),
|
||||||
|
('vod', 'source', 'mediasource', {dict})) or {}
|
||||||
|
if new_source.get('mediarscuse') == source.get('mediarscuse') or not new_source.get('mediaurl'):
|
||||||
|
continue
|
||||||
|
stream = new_source
|
||||||
|
formats.append({
|
||||||
|
'url': stream['mediaurl'],
|
||||||
|
'format_id': stream.get('mediarscuse'),
|
||||||
|
'format_note': stream.get('medianame'),
|
||||||
|
**parse_resolution(stream.get('quality')),
|
||||||
|
'preference': int_or_none(stream.get('mediarscuse'))
|
||||||
|
})
|
||||||
|
|
||||||
|
caption_url = traverse_obj(details, ('vod', 'source', 'subtitle', {url_or_none}))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
**traverse_obj(details, ('vod', {
|
||||||
|
'title': ('info', 'title'),
|
||||||
|
'duration': ('info', 'duration', {int_or_none}),
|
||||||
|
'view_count': ('info', 'viewcount', {int_or_none}),
|
||||||
|
'like_count': ('info', 'likecount', {int_or_none}),
|
||||||
|
'description': ('info', 'synopsis', {clean_html}),
|
||||||
|
'episode': ('info', 'content', ('contenttitle', 'title')),
|
||||||
|
'episode_number': ('info', 'content', 'number', {int_or_none}),
|
||||||
|
'series': ('info', 'program', 'programtitle'),
|
||||||
|
'age_limit': ('info', 'targetage', {int_or_none}),
|
||||||
|
'release_timestamp': ('info', 'broaddate', {parse_iso8601}),
|
||||||
|
'thumbnail': ('source', 'thumbnail', 'origin', {url_or_none}),
|
||||||
|
}), get_all=False),
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': {'ko': [{'url': caption_url}]} if caption_url else None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class SBSCoKrAllvodProgramIE(InfoExtractor):
|
||||||
|
IE_NAME = 'sbs.co.kr:allvod_program'
|
||||||
|
_VALID_URL = r'https?://allvod\.sbs\.co\.kr/allvod/vod(?:Free)?ProgramDetail\.do\?(?:[^#]+&)?pgmId=(?P<id>P?\d+)'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://allvod.sbs.co.kr/allvod/vodFreeProgramDetail.do?type=legend&pgmId=22000010159&listOrder=vodCntAsc',
|
||||||
|
'info_dict': {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': '22000010159',
|
||||||
|
},
|
||||||
|
'playlist_count': 18,
|
||||||
|
}, {
|
||||||
|
'url': 'https://allvod.sbs.co.kr/allvod/vodProgramDetail.do?pgmId=P460810577',
|
||||||
|
'info_dict': {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': 'P460810577',
|
||||||
|
},
|
||||||
|
'playlist_count': 13,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
program_id = self._match_id(url)
|
||||||
|
|
||||||
|
details = self._download_json(
|
||||||
|
'https://allvod.sbs.co.kr/allvod/vodProgramDetail/vodProgramDetailAjax.do',
|
||||||
|
program_id, note='Downloading program details',
|
||||||
|
query={
|
||||||
|
'pgmId': program_id,
|
||||||
|
'currentCount': '10000',
|
||||||
|
})
|
||||||
|
|
||||||
|
return self.playlist_result(
|
||||||
|
[self.url_result(f'https://allvod.sbs.co.kr/allvod/vodEndPage.do?mdaId={video_id}', SBSCoKrIE)
|
||||||
|
for video_id in traverse_obj(details, ('list', ..., 'mdaId'))], program_id)
|
||||||
|
|
||||||
|
|
||||||
|
class SBSCoKrProgramsVodIE(InfoExtractor):
|
||||||
|
IE_NAME = 'sbs.co.kr:programs_vod'
|
||||||
|
_VALID_URL = r'https?://programs\.sbs\.co\.kr/(?:enter|drama|culture|sports|plus|mtv)/(?P<id>[a-z0-9]+)/vods'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://programs.sbs.co.kr/culture/morningwide/vods/65007',
|
||||||
|
'info_dict': {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': '00000210215',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 9782,
|
||||||
|
}, {
|
||||||
|
'url': 'https://programs.sbs.co.kr/enter/dongsang2/vods/52006',
|
||||||
|
'info_dict': {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': '22000010476',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 312,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
program_slug = self._match_id(url)
|
||||||
|
|
||||||
|
program_id = self._download_json(
|
||||||
|
f'https://static.apis.sbs.co.kr/program-api/1.0/menu/{program_slug}', program_slug,
|
||||||
|
note='Downloading program menu data')['program']['programid']
|
||||||
|
|
||||||
|
return self.url_result(
|
||||||
|
f'https://allvod.sbs.co.kr/allvod/vodProgramDetail.do?pgmId={program_id}', SBSCoKrAllvodProgramIE)
|
||||||
@@ -11,7 +11,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class SinaIE(InfoExtractor):
|
class SinaIE(InfoExtractor):
|
||||||
_VALID_URL = r'''(?x)https?://(?:.*?\.)?video\.sina\.com\.cn/
|
_VALID_URL = r'''(?x)https?://(?:[^/?#]+\.)?video\.sina\.com\.cn/
|
||||||
(?:
|
(?:
|
||||||
(?:view/|.*\#)(?P<id>\d+)|
|
(?:view/|.*\#)(?P<id>\d+)|
|
||||||
.+?/(?P<pseudo_id>[^/?#]+)(?:\.s?html)|
|
.+?/(?P<pseudo_id>[^/?#]+)(?:\.s?html)|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@@ -469,11 +470,12 @@ class SlidesLiveIE(InfoExtractor):
|
|||||||
slides = self._download_xml(
|
slides = self._download_xml(
|
||||||
player_info['slides_xml_url'], video_id, fatal=False,
|
player_info['slides_xml_url'], video_id, fatal=False,
|
||||||
note='Downloading slides XML', errnote='Failed to download slides info')
|
note='Downloading slides XML', errnote='Failed to download slides info')
|
||||||
slide_url_template = 'https://cdn.slideslive.com/data/presentations/%s/slides/big/%s%s'
|
if isinstance(slides, xml.etree.ElementTree.Element):
|
||||||
for slide_id, slide in enumerate(slides.findall('./slide') if slides else [], 1):
|
slide_url_template = 'https://cdn.slideslive.com/data/presentations/%s/slides/big/%s%s'
|
||||||
slides_info.append((
|
for slide_id, slide in enumerate(slides.findall('./slide')):
|
||||||
slide_id, xpath_text(slide, './slideName', 'name'), '.jpg',
|
slides_info.append((
|
||||||
int_or_none(xpath_text(slide, './timeSec', 'time'))))
|
slide_id, xpath_text(slide, './slideName', 'name'), '.jpg',
|
||||||
|
int_or_none(xpath_text(slide, './timeSec', 'time'))))
|
||||||
|
|
||||||
chapters, thumbnails = [], []
|
chapters, thumbnails = [], []
|
||||||
if url_or_none(player_info.get('thumbnail')):
|
if url_or_none(player_info.get('thumbnail')):
|
||||||
@@ -528,7 +530,7 @@ class SlidesLiveIE(InfoExtractor):
|
|||||||
if service_name == 'vimeo':
|
if service_name == 'vimeo':
|
||||||
info['url'] = smuggle_url(
|
info['url'] = smuggle_url(
|
||||||
f'https://player.vimeo.com/video/{service_id}',
|
f'https://player.vimeo.com/video/{service_id}',
|
||||||
{'http_headers': {'Referer': url}})
|
{'referer': url})
|
||||||
|
|
||||||
video_slides = traverse_obj(slides, ('slides', ..., 'video', 'id'))
|
video_slides = traverse_obj(slides, ('slides', ..., 'video', 'id'))
|
||||||
if not video_slides:
|
if not video_slides:
|
||||||
|
|||||||
@@ -38,9 +38,48 @@ class StacommuBaseIE(WrestleUniverseBaseIE):
|
|||||||
return None
|
return None
|
||||||
return traverse_obj(encryption_data, {'key': ('key', {decrypt}), 'iv': ('iv', {decrypt})})
|
return traverse_obj(encryption_data, {'key': ('key', {decrypt}), 'iv': ('iv', {decrypt})})
|
||||||
|
|
||||||
|
def _extract_vod(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
video_info = self._download_metadata(
|
||||||
|
url, video_id, 'ja', ('dehydratedState', 'queries', 0, 'state', 'data'))
|
||||||
|
hls_info, decrypt = self._call_encrypted_api(
|
||||||
|
video_id, ':watch', 'stream information', data={'method': 1})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'formats': self._get_formats(hls_info, ('protocolHls', 'url', {url_or_none}), video_id),
|
||||||
|
'hls_aes': self._extract_hls_key(hls_info, 'protocolHls', decrypt),
|
||||||
|
**traverse_obj(video_info, {
|
||||||
|
'title': ('displayName', {str}),
|
||||||
|
'description': ('description', {str}),
|
||||||
|
'timestamp': ('watchStartTime', {int_or_none}),
|
||||||
|
'thumbnail': ('keyVisualUrl', {url_or_none}),
|
||||||
|
'cast': ('casts', ..., 'displayName', {str}),
|
||||||
|
'duration': ('duration', {int}),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _extract_ppv(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
video_info = self._call_api(video_id, msg='video information', query={'al': 'ja'}, auth=False)
|
||||||
|
hls_info, decrypt = self._call_encrypted_api(
|
||||||
|
video_id, ':watchArchive', 'stream information', data={'method': 1})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'formats': self._get_formats(hls_info, ('hls', 'urls', ..., {url_or_none}), video_id),
|
||||||
|
'hls_aes': self._extract_hls_key(hls_info, 'hls', decrypt),
|
||||||
|
**traverse_obj(video_info, {
|
||||||
|
'title': ('displayName', {str}),
|
||||||
|
'timestamp': ('startTime', {int_or_none}),
|
||||||
|
'thumbnail': ('keyVisualUrl', {url_or_none}),
|
||||||
|
'duration': ('duration', {int_or_none}),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class StacommuVODIE(StacommuBaseIE):
|
class StacommuVODIE(StacommuBaseIE):
|
||||||
_VALID_URL = r'https?://www\.stacommu\.jp/videos/episodes/(?P<id>[\da-zA-Z]+)'
|
_VALID_URL = r'https?://www\.stacommu\.jp/(?:en/)?videos/episodes/(?P<id>[\da-zA-Z]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# not encrypted
|
# not encrypted
|
||||||
'url': 'https://www.stacommu.jp/videos/episodes/aXcVKjHyAENEjard61soZZ',
|
'url': 'https://www.stacommu.jp/videos/episodes/aXcVKjHyAENEjard61soZZ',
|
||||||
@@ -79,34 +118,19 @@ class StacommuVODIE(StacommuBaseIE):
|
|||||||
'params': {
|
'params': {
|
||||||
'skip_download': 'm3u8',
|
'skip_download': 'm3u8',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.stacommu.jp/en/videos/episodes/aXcVKjHyAENEjard61soZZ',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_API_PATH = 'videoEpisodes'
|
_API_PATH = 'videoEpisodes'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
return self._extract_vod(url)
|
||||||
video_info = self._download_metadata(
|
|
||||||
url, video_id, 'ja', ('dehydratedState', 'queries', 0, 'state', 'data'))
|
|
||||||
hls_info, decrypt = self._call_encrypted_api(
|
|
||||||
video_id, ':watch', 'stream information', data={'method': 1})
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'formats': self._get_formats(hls_info, ('protocolHls', 'url', {url_or_none}), video_id),
|
|
||||||
'hls_aes': self._extract_hls_key(hls_info, 'protocolHls', decrypt),
|
|
||||||
**traverse_obj(video_info, {
|
|
||||||
'title': ('displayName', {str}),
|
|
||||||
'description': ('description', {str}),
|
|
||||||
'timestamp': ('watchStartTime', {int_or_none}),
|
|
||||||
'thumbnail': ('keyVisualUrl', {url_or_none}),
|
|
||||||
'cast': ('casts', ..., 'displayName', {str}),
|
|
||||||
'duration': ('duration', {int}),
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class StacommuLiveIE(StacommuBaseIE):
|
class StacommuLiveIE(StacommuBaseIE):
|
||||||
_VALID_URL = r'https?://www\.stacommu\.jp/live/(?P<id>[\da-zA-Z]+)'
|
_VALID_URL = r'https?://www\.stacommu\.jp/(?:en/)?live/(?P<id>[\da-zA-Z]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.stacommu.jp/live/d2FJ3zLnndegZJCAEzGM3m',
|
'url': 'https://www.stacommu.jp/live/d2FJ3zLnndegZJCAEzGM3m',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -125,24 +149,83 @@ class StacommuLiveIE(StacommuBaseIE):
|
|||||||
'params': {
|
'params': {
|
||||||
'skip_download': 'm3u8',
|
'skip_download': 'm3u8',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.stacommu.jp/en/live/d2FJ3zLnndegZJCAEzGM3m',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_API_PATH = 'events'
|
_API_PATH = 'events'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
return self._extract_ppv(url)
|
||||||
video_info = self._call_api(video_id, msg='video information', query={'al': 'ja'}, auth=False)
|
|
||||||
hls_info, decrypt = self._call_encrypted_api(
|
|
||||||
video_id, ':watchArchive', 'stream information', data={'method': 1})
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
class TheaterComplexTownBaseIE(StacommuBaseIE):
|
||||||
'formats': self._get_formats(hls_info, ('hls', 'urls', ..., {url_or_none}), video_id),
|
_NETRC_MACHINE = 'theatercomplextown'
|
||||||
'hls_aes': self._extract_hls_key(hls_info, 'hls', decrypt),
|
_API_HOST = 'api.theater-complex.town'
|
||||||
**traverse_obj(video_info, {
|
_LOGIN_QUERY = {'key': 'AIzaSyAgNCqToaIz4a062EeIrkhI_xetVfAOrfc'}
|
||||||
'title': ('displayName', {str}),
|
_LOGIN_HEADERS = {
|
||||||
'timestamp': ('startTime', {int_or_none}),
|
'Accept': '*/*',
|
||||||
'thumbnail': ('keyVisualUrl', {url_or_none}),
|
'Content-Type': 'application/json',
|
||||||
'duration': ('duration', {int_or_none}),
|
'X-Client-Version': 'Chrome/JsCore/9.23.0/FirebaseCore-web',
|
||||||
}),
|
'Referer': 'https://www.theater-complex.town/',
|
||||||
}
|
'Origin': 'https://www.theater-complex.town',
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TheaterComplexTownVODIE(TheaterComplexTownBaseIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?theater-complex\.town/(?:en/)?videos/episodes/(?P<id>\w+)'
|
||||||
|
IE_NAME = 'theatercomplextown:vod'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.theater-complex.town/videos/episodes/hoxqidYNoAn7bP92DN6p78',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'hoxqidYNoAn7bP92DN6p78',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '演劇ドラフトグランプリ2023 劇団『恋のぼり』〜劇団名決定秘話ラジオ',
|
||||||
|
'description': 'md5:a7e2e9cf570379ea67fb630f345ff65d',
|
||||||
|
'cast': ['玉城 裕規', '石川 凌雅'],
|
||||||
|
'thumbnail': 'https://image.theater-complex.town/5URnXX6KCeDysuFrPkP38o/5URnXX6KCeDysuFrPkP38o',
|
||||||
|
'upload_date': '20231103',
|
||||||
|
'timestamp': 1699016400,
|
||||||
|
'duration': 868,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': 'm3u8',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.theater-complex.town/en/videos/episodes/6QT7XYwM9dJz5Gf9VB6K5y',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
_API_PATH = 'videoEpisodes'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
return self._extract_vod(url)
|
||||||
|
|
||||||
|
|
||||||
|
class TheaterComplexTownPPVIE(TheaterComplexTownBaseIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?theater-complex\.town/(?:en/)?ppv/(?P<id>\w+)'
|
||||||
|
IE_NAME = 'theatercomplextown:ppv'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.theater-complex.town/ppv/wytW3X7khrjJBUpKuV3jen',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'wytW3X7khrjJBUpKuV3jen',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'BREAK FREE STARS 11月5日(日)12:30千秋楽公演',
|
||||||
|
'thumbnail': 'https://image.theater-complex.town/5GWEB31JcTUfjtgdeV5t6o/5GWEB31JcTUfjtgdeV5t6o',
|
||||||
|
'upload_date': '20231105',
|
||||||
|
'timestamp': 1699155000,
|
||||||
|
'duration': 8378,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': 'm3u8',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.theater-complex.town/en/ppv/wytW3X7khrjJBUpKuV3jen',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
_API_PATH = 'events'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
return self._extract_ppv(url)
|
||||||
|
|||||||
@@ -32,9 +32,7 @@ class StoryFireBaseIE(InfoExtractor):
|
|||||||
'description': video.get('description'),
|
'description': video.get('description'),
|
||||||
'url': smuggle_url(
|
'url': smuggle_url(
|
||||||
'https://player.vimeo.com/video/' + vimeo_id, {
|
'https://player.vimeo.com/video/' + vimeo_id, {
|
||||||
'http_headers': {
|
'referer': 'https://storyfire.com/',
|
||||||
'Referer': 'https://storyfire.com/',
|
|
||||||
}
|
|
||||||
}),
|
}),
|
||||||
'thumbnail': video.get('storyImage'),
|
'thumbnail': video.get('storyImage'),
|
||||||
'view_count': int_or_none(video.get('views')),
|
'view_count': int_or_none(video.get('views')),
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
from datetime import datetime
|
|
||||||
import base64
|
import base64
|
||||||
|
import functools
|
||||||
|
import itertools
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..networking import HEADRequest
|
from ..networking import HEADRequest
|
||||||
from ..utils import int_or_none, urlencode_postdata
|
from ..utils import int_or_none, traverse_obj, urlencode_postdata, urljoin
|
||||||
|
|
||||||
|
|
||||||
class TenPlayIE(InfoExtractor):
|
class TenPlayIE(InfoExtractor):
|
||||||
@@ -113,3 +115,55 @@ class TenPlayIE(InfoExtractor):
|
|||||||
'uploader': 'Channel 10',
|
'uploader': 'Channel 10',
|
||||||
'uploader_id': '2199827728001',
|
'uploader_id': '2199827728001',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TenPlaySeasonIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?10play\.com\.au/(?P<show>[^/?#]+)/episodes/(?P<season>[^/?#]+)/?(?:$|[?#])'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://10play.com.au/masterchef/episodes/season-14',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Season 14',
|
||||||
|
'id': 'MjMyOTIy',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 64,
|
||||||
|
}, {
|
||||||
|
'url': 'https://10play.com.au/the-bold-and-the-beautiful-fast-tracked/episodes/season-2022',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Season 2022',
|
||||||
|
'id': 'Mjc0OTIw',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 256,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _entries(self, load_more_url, display_id=None):
|
||||||
|
skip_ids = []
|
||||||
|
for page in itertools.count(1):
|
||||||
|
episodes_carousel = self._download_json(
|
||||||
|
load_more_url, display_id, query={'skipIds[]': skip_ids},
|
||||||
|
note=f'Fetching episodes page {page}')
|
||||||
|
|
||||||
|
episodes_chunk = episodes_carousel['items']
|
||||||
|
skip_ids.extend(ep['id'] for ep in episodes_chunk)
|
||||||
|
|
||||||
|
for ep in episodes_chunk:
|
||||||
|
yield ep['cardLink']
|
||||||
|
if not episodes_carousel['hasMore']:
|
||||||
|
break
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
show, season = self._match_valid_url(url).group('show', 'season')
|
||||||
|
season_info = self._download_json(
|
||||||
|
f'https://10play.com.au/api/shows/{show}/episodes/{season}', f'{show}/{season}')
|
||||||
|
|
||||||
|
episodes_carousel = traverse_obj(season_info, (
|
||||||
|
'content', 0, 'components', (
|
||||||
|
lambda _, v: v['title'].lower() == 'episodes',
|
||||||
|
(..., {dict}),
|
||||||
|
)), get_all=False) or {}
|
||||||
|
|
||||||
|
playlist_id = episodes_carousel['tpId']
|
||||||
|
|
||||||
|
return self.playlist_from_matches(
|
||||||
|
self._entries(urljoin(url, episodes_carousel['loadMoreUrl']), playlist_id),
|
||||||
|
playlist_id, traverse_obj(season_info, ('content', 0, 'title', {str})),
|
||||||
|
getter=functools.partial(urljoin, url))
|
||||||
|
|||||||
@@ -1,66 +0,0 @@
|
|||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import remove_end
|
|
||||||
|
|
||||||
|
|
||||||
class ThisAVIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?thisav\.com/video/(?P<id>[0-9]+)/.*'
|
|
||||||
_TESTS = [{
|
|
||||||
# jwplayer
|
|
||||||
'url': 'http://www.thisav.com/video/47734/%98%26sup1%3B%83%9E%83%82---just-fit.html',
|
|
||||||
'md5': '0480f1ef3932d901f0e0e719f188f19b',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '47734',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': '高樹マリア - Just fit',
|
|
||||||
'uploader': 'dj7970',
|
|
||||||
'uploader_id': 'dj7970'
|
|
||||||
}
|
|
||||||
}, {
|
|
||||||
# html5 media
|
|
||||||
'url': 'http://www.thisav.com/video/242352/nerdy-18yo-big-ass-tattoos-and-glasses.html',
|
|
||||||
'md5': 'ba90c076bd0f80203679e5b60bf523ee',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '242352',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Nerdy 18yo Big Ass Tattoos and Glasses',
|
|
||||||
'uploader': 'cybersluts',
|
|
||||||
'uploader_id': 'cybersluts',
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = self._match_valid_url(url)
|
|
||||||
|
|
||||||
video_id = mobj.group('id')
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
title = remove_end(self._html_extract_title(webpage), ' - 視頻 - ThisAV.com-世界第一中文成人娛樂網站')
|
|
||||||
video_url = self._html_search_regex(
|
|
||||||
r"addVariable\('file','([^']+)'\);", webpage, 'video url', default=None)
|
|
||||||
if video_url:
|
|
||||||
info_dict = {
|
|
||||||
'formats': [{
|
|
||||||
'url': video_url,
|
|
||||||
}],
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
entries = self._parse_html5_media_entries(url, webpage, video_id)
|
|
||||||
if entries:
|
|
||||||
info_dict = entries[0]
|
|
||||||
else:
|
|
||||||
info_dict = self._extract_jwplayer_data(
|
|
||||||
webpage, video_id, require_title=False)
|
|
||||||
uploader = self._html_search_regex(
|
|
||||||
r': <a href="http://www\.thisav\.com/user/[0-9]+/(?:[^"]+)">([^<]+)</a>',
|
|
||||||
webpage, 'uploader name', fatal=False)
|
|
||||||
uploader_id = self._html_search_regex(
|
|
||||||
r': <a href="http://www\.thisav\.com/user/[0-9]+/([^"]+)">(?:[^<]+)</a>',
|
|
||||||
webpage, 'uploader id', fatal=False)
|
|
||||||
|
|
||||||
info_dict.update({
|
|
||||||
'id': video_id,
|
|
||||||
'uploader': uploader,
|
|
||||||
'uploader_id': uploader_id,
|
|
||||||
'title': title,
|
|
||||||
})
|
|
||||||
|
|
||||||
return info_dict
|
|
||||||
@@ -1,11 +1,23 @@
|
|||||||
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from .zype import ZypeIE
|
||||||
from ..networking import HEADRequest
|
from ..networking import HEADRequest
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
filter_dict,
|
||||||
|
parse_qs,
|
||||||
|
try_call,
|
||||||
|
urlencode_postdata,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ThisOldHouseIE(InfoExtractor):
|
class ThisOldHouseIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?thisoldhouse\.com/(?:watch|how-to|tv-episode|(?:[^/]+/)?\d+)/(?P<id>[^/?#]+)'
|
_NETRC_MACHINE = 'thisoldhouse'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?thisoldhouse\.com/(?:watch|how-to|tv-episode|(?:[^/?#]+/)?\d+)/(?P<id>[^/?#]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.thisoldhouse.com/how-to/how-to-build-storage-bench',
|
'url': 'https://www.thisoldhouse.com/furniture/21017078/how-to-build-a-storage-bench',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '5dcdddf673c3f956ef5db202',
|
'id': '5dcdddf673c3f956ef5db202',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@@ -23,13 +35,16 @@ class ThisOldHouseIE(InfoExtractor):
|
|||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
# Page no longer has video
|
||||||
'url': 'https://www.thisoldhouse.com/watch/arlington-arts-crafts-arts-and-crafts-class-begins',
|
'url': 'https://www.thisoldhouse.com/watch/arlington-arts-crafts-arts-and-crafts-class-begins',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
|
# 404 Not Found
|
||||||
'url': 'https://www.thisoldhouse.com/tv-episode/ask-toh-shelf-rough-electric',
|
'url': 'https://www.thisoldhouse.com/tv-episode/ask-toh-shelf-rough-electric',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.thisoldhouse.com/furniture/21017078/how-to-build-a-storage-bench',
|
# 404 Not Found
|
||||||
|
'url': 'https://www.thisoldhouse.com/how-to/how-to-build-storage-bench',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.thisoldhouse.com/21113884/s41-e13-paradise-lost',
|
'url': 'https://www.thisoldhouse.com/21113884/s41-e13-paradise-lost',
|
||||||
@@ -39,17 +54,51 @@ class ThisOldHouseIE(InfoExtractor):
|
|||||||
'url': 'https://www.thisoldhouse.com/21083431/seaside-transformation-the-westerly-project',
|
'url': 'https://www.thisoldhouse.com/21083431/seaside-transformation-the-westerly-project',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
_ZYPE_TMPL = 'https://player.zype.com/embed/%s.html?api_key=hsOk_yMSPYNrT22e9pu8hihLXjaZf0JW5jsOWv4ZqyHJFvkJn6rtToHl09tbbsbe'
|
|
||||||
|
_LOGIN_URL = 'https://login.thisoldhouse.com/usernamepassword/login'
|
||||||
|
|
||||||
|
def _perform_login(self, username, password):
|
||||||
|
self._request_webpage(
|
||||||
|
HEADRequest('https://www.thisoldhouse.com/insider'), None, 'Requesting session cookies')
|
||||||
|
urlh = self._request_webpage(
|
||||||
|
'https://www.thisoldhouse.com/wp-login.php', None, 'Requesting login info',
|
||||||
|
errnote='Unable to login', query={'redirect_to': 'https://www.thisoldhouse.com/insider'})
|
||||||
|
|
||||||
|
try:
|
||||||
|
auth_form = self._download_webpage(
|
||||||
|
self._LOGIN_URL, None, 'Submitting credentials', headers={
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Referer': urlh.url,
|
||||||
|
}, data=json.dumps(filter_dict({
|
||||||
|
**{('client_id' if k == 'client' else k): v[0] for k, v in parse_qs(urlh.url).items()},
|
||||||
|
'tenant': 'thisoldhouse',
|
||||||
|
'username': username,
|
||||||
|
'password': password,
|
||||||
|
'popup_options': {},
|
||||||
|
'sso': True,
|
||||||
|
'_csrf': try_call(lambda: self._get_cookies(self._LOGIN_URL)['_csrf'].value),
|
||||||
|
'_intstate': 'deprecated',
|
||||||
|
}), separators=(',', ':')).encode())
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
|
raise ExtractorError('Invalid username or password', expected=True)
|
||||||
|
raise
|
||||||
|
|
||||||
|
self._request_webpage(
|
||||||
|
'https://login.thisoldhouse.com/login/callback', None, 'Completing login',
|
||||||
|
data=urlencode_postdata(self._hidden_inputs(auth_form)))
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
if 'To Unlock This content' in webpage:
|
if 'To Unlock This content' in webpage:
|
||||||
self.raise_login_required(method='cookies')
|
self.raise_login_required(
|
||||||
video_url = self._search_regex(
|
'This video is only available for subscribers. '
|
||||||
|
'Note that --cookies-from-browser may not work due to this site using session cookies')
|
||||||
|
|
||||||
|
video_url, video_id = self._search_regex(
|
||||||
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?thisoldhouse\.(?:chorus\.build|com)/videos/zype/([0-9a-f]{24})[^\'"]*)[\'"]',
|
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?thisoldhouse\.(?:chorus\.build|com)/videos/zype/([0-9a-f]{24})[^\'"]*)[\'"]',
|
||||||
webpage, 'video url')
|
webpage, 'video url', group=(1, 2))
|
||||||
if 'subscription_required=true' in video_url or 'c-entry-group-labels__image' in webpage:
|
video_url = self._request_webpage(HEADRequest(video_url), video_id, 'Resolving Zype URL').url
|
||||||
return self.url_result(self._request_webpage(HEADRequest(video_url), display_id).url, 'Zype', display_id)
|
|
||||||
video_id = self._search_regex(r'(?:https?:)?//(?:www\.)?thisoldhouse\.(?:chorus\.build|com)/videos/zype/([0-9a-f]{24})', video_url, 'video id')
|
return self.url_result(video_url, ZypeIE, video_id)
|
||||||
return self.url_result(self._ZYPE_TMPL % video_id, 'Zype', video_id)
|
|
||||||
|
|||||||
@@ -142,7 +142,7 @@ class TwitCastingIE(InfoExtractor):
|
|||||||
'https://twitcasting.tv/streamserver.php?target=%s&mode=client' % uploader_id, video_id,
|
'https://twitcasting.tv/streamserver.php?target=%s&mode=client' % uploader_id, video_id,
|
||||||
'Downloading live info', fatal=False)
|
'Downloading live info', fatal=False)
|
||||||
|
|
||||||
is_live = 'data-status="online"' in webpage
|
is_live = any(f'data-{x}' in webpage for x in ['is-onlive="true"', 'live-type="live"', 'status="online"'])
|
||||||
if not traverse_obj(stream_server_data, 'llfmp4') and is_live:
|
if not traverse_obj(stream_server_data, 'llfmp4') and is_live:
|
||||||
self.raise_login_required(method='cookies')
|
self.raise_login_required(method='cookies')
|
||||||
|
|
||||||
|
|||||||
@@ -1563,7 +1563,7 @@ class TwitterBroadcastIE(TwitterBaseIE, PeriscopeBaseIE):
|
|||||||
IE_NAME = 'twitter:broadcast'
|
IE_NAME = 'twitter:broadcast'
|
||||||
_VALID_URL = TwitterBaseIE._BASE_REGEX + r'i/broadcasts/(?P<id>[0-9a-zA-Z]{13})'
|
_VALID_URL = TwitterBaseIE._BASE_REGEX + r'i/broadcasts/(?P<id>[0-9a-zA-Z]{13})'
|
||||||
|
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
# untitled Periscope video
|
# untitled Periscope video
|
||||||
'url': 'https://twitter.com/i/broadcasts/1yNGaQLWpejGj',
|
'url': 'https://twitter.com/i/broadcasts/1yNGaQLWpejGj',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -1571,11 +1571,42 @@ class TwitterBroadcastIE(TwitterBaseIE, PeriscopeBaseIE):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Andrea May Sahouri - Periscope Broadcast',
|
'title': 'Andrea May Sahouri - Periscope Broadcast',
|
||||||
'uploader': 'Andrea May Sahouri',
|
'uploader': 'Andrea May Sahouri',
|
||||||
'uploader_id': '1PXEdBZWpGwKe',
|
'uploader_id': 'andreamsahouri',
|
||||||
|
'uploader_url': 'https://twitter.com/andreamsahouri',
|
||||||
|
'timestamp': 1590973638,
|
||||||
|
'upload_date': '20200601',
|
||||||
'thumbnail': r're:^https?://[^?#]+\.jpg\?token=',
|
'thumbnail': r're:^https?://[^?#]+\.jpg\?token=',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
},
|
},
|
||||||
}
|
}, {
|
||||||
|
'url': 'https://twitter.com/i/broadcasts/1ZkKzeyrPbaxv',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1ZkKzeyrPbaxv',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Starship | SN10 | High-Altitude Flight Test',
|
||||||
|
'uploader': 'SpaceX',
|
||||||
|
'uploader_id': 'SpaceX',
|
||||||
|
'uploader_url': 'https://twitter.com/SpaceX',
|
||||||
|
'timestamp': 1614812942,
|
||||||
|
'upload_date': '20210303',
|
||||||
|
'thumbnail': r're:^https?://[^?#]+\.jpg\?token=',
|
||||||
|
'view_count': int,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://twitter.com/i/broadcasts/1OyKAVQrgzwGb',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1OyKAVQrgzwGb',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Starship Flight Test',
|
||||||
|
'uploader': 'SpaceX',
|
||||||
|
'uploader_id': 'SpaceX',
|
||||||
|
'uploader_url': 'https://twitter.com/SpaceX',
|
||||||
|
'timestamp': 1681993964,
|
||||||
|
'upload_date': '20230420',
|
||||||
|
'thumbnail': r're:^https?://[^?#]+\.jpg\?token=',
|
||||||
|
'view_count': int,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
broadcast_id = self._match_id(url)
|
broadcast_id = self._match_id(url)
|
||||||
@@ -1585,6 +1616,12 @@ class TwitterBroadcastIE(TwitterBaseIE, PeriscopeBaseIE):
|
|||||||
if not broadcast:
|
if not broadcast:
|
||||||
raise ExtractorError('Broadcast no longer exists', expected=True)
|
raise ExtractorError('Broadcast no longer exists', expected=True)
|
||||||
info = self._parse_broadcast_data(broadcast, broadcast_id)
|
info = self._parse_broadcast_data(broadcast, broadcast_id)
|
||||||
|
info['title'] = broadcast.get('status') or info.get('title')
|
||||||
|
info['uploader_id'] = broadcast.get('twitter_username') or info.get('uploader_id')
|
||||||
|
info['uploader_url'] = format_field(broadcast, 'twitter_username', 'https://twitter.com/%s', default=None)
|
||||||
|
if info['live_status'] == 'is_upcoming':
|
||||||
|
return info
|
||||||
|
|
||||||
media_key = broadcast['media_key']
|
media_key = broadcast['media_key']
|
||||||
source = self._call_api(
|
source = self._call_api(
|
||||||
f'live_video_stream/status/{media_key}', media_key)['source']
|
f'live_video_stream/status/{media_key}', media_key)['source']
|
||||||
@@ -1741,7 +1778,7 @@ class TwitterSpacesIE(TwitterBaseIE):
|
|||||||
|
|
||||||
class TwitterShortenerIE(TwitterBaseIE):
|
class TwitterShortenerIE(TwitterBaseIE):
|
||||||
IE_NAME = 'twitter:shortener'
|
IE_NAME = 'twitter:shortener'
|
||||||
_VALID_URL = r'https?://t.co/(?P<id>[^?]+)|tco:(?P<eid>[^?]+)'
|
_VALID_URL = r'https?://t\.co/(?P<id>[^?#]+)|tco:(?P<eid>[^?#]+)'
|
||||||
_BASE_URL = 'https://t.co/'
|
_BASE_URL = 'https://t.co/'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|||||||
@@ -164,11 +164,15 @@ class KnownPiracyIE(UnsupportedInfoExtractor):
|
|||||||
r'viewsb\.com',
|
r'viewsb\.com',
|
||||||
r'filemoon\.sx',
|
r'filemoon\.sx',
|
||||||
r'hentai\.animestigma\.com',
|
r'hentai\.animestigma\.com',
|
||||||
|
r'thisav\.com',
|
||||||
)
|
)
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://dood.to/e/5s1wmbdacezb',
|
'url': 'http://dood.to/e/5s1wmbdacezb',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://thisav.com/en/terms',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user