mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2026-01-19 13:21:16 +00:00
Compare commits
112 Commits
2025.08.11
...
2025.09.26
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
88e2a2de8e | ||
|
|
12b57d2858 | ||
|
|
b7b7910d96 | ||
|
|
50e452fd7d | ||
|
|
94c5622be9 | ||
|
|
7df5acc546 | ||
|
|
4429fd0450 | ||
|
|
2e81e298cd | ||
|
|
7f5d9f8543 | ||
|
|
f8750504c2 | ||
|
|
8821682f15 | ||
|
|
08d7899683 | ||
|
|
98b6b0d339 | ||
|
|
bf5d18016b | ||
|
|
4bc19adc87 | ||
|
|
b2c01d0498 | ||
|
|
e123a48f11 | ||
|
|
820c6e2445 | ||
|
|
677997d84e | ||
|
|
b81e9272dc | ||
|
|
df4b4e8ccf | ||
|
|
f3829463c7 | ||
|
|
ae3923b6b2 | ||
|
|
8ab262c66b | ||
|
|
e2d37bcc8e | ||
|
|
eb4b3a5fc7 | ||
|
|
65e90aea29 | ||
|
|
17bfaa53ed | ||
|
|
8cb037c0b0 | ||
|
|
7d9e48b22a | ||
|
|
f5cb721185 | ||
|
|
83b8409366 | ||
|
|
ba80446855 | ||
|
|
22ea0688ed | ||
|
|
5c1abcdc49 | ||
|
|
3d9a88bd8e | ||
|
|
9def9a4b0e | ||
|
|
679587dac7 | ||
|
|
a1c98226a4 | ||
|
|
c8ede5f34d | ||
|
|
a183837ec8 | ||
|
|
067062bb87 | ||
|
|
8597a4331e | ||
|
|
48a214bef4 | ||
|
|
6a763a55d8 | ||
|
|
e6e6b51214 | ||
|
|
7c9b10ebc8 | ||
|
|
cd94e70040 | ||
|
|
7c27965ff6 | ||
|
|
50136eeeb3 | ||
|
|
603acdff07 | ||
|
|
d925e92b71 | ||
|
|
ed24640943 | ||
|
|
76bb46002c | ||
|
|
1e28f6bf74 | ||
|
|
0b51005b48 | ||
|
|
223baa81f6 | ||
|
|
18fe696df9 | ||
|
|
487a90c8ef | ||
|
|
8cd37b85d4 | ||
|
|
5c7ad68ff1 | ||
|
|
1ddbd033f0 | ||
|
|
fec30c56f0 | ||
|
|
d6950c27af | ||
|
|
3bd9154412 | ||
|
|
8f4a908300 | ||
|
|
f1ba9f4ddb | ||
|
|
5c8bcfdbc6 | ||
|
|
895e762a83 | ||
|
|
39b7b8ddc7 | ||
|
|
526410b4af | ||
|
|
f29acc4a6e | ||
|
|
4dbe96459d | ||
|
|
a03c37b44e | ||
|
|
fcea3edb5c | ||
|
|
415b6d9ca8 | ||
|
|
575753b9f3 | ||
|
|
c2fc4f3e7f | ||
|
|
07247d6c20 | ||
|
|
f63a7e41d1 | ||
|
|
7b8a8abb98 | ||
|
|
a97f4cb57e | ||
|
|
d154dc3dcf | ||
|
|
438d3f06b3 | ||
|
|
74b4b3b005 | ||
|
|
36e873822b | ||
|
|
d3d1ac8eb2 | ||
|
|
86d74e5cf0 | ||
|
|
6ca9165648 | ||
|
|
82a1390204 | ||
|
|
7540aa1da1 | ||
|
|
35da8df4f8 | ||
|
|
8df121ba59 | ||
|
|
471a2b60e0 | ||
|
|
df0553153e | ||
|
|
7bc53ae799 | ||
|
|
d8200ff0a4 | ||
|
|
0f6b915822 | ||
|
|
374ea049f5 | ||
|
|
6f4c1bb593 | ||
|
|
c22660aed5 | ||
|
|
404bd889d0 | ||
|
|
edf55e8184 | ||
|
|
8a8861d538 | ||
|
|
70f5669951 | ||
|
|
6ae3543d5a | ||
|
|
770119bdd1 | ||
|
|
8e3f8065af | ||
|
|
aea85d525e | ||
|
|
f2919bd28e | ||
|
|
681ed2153d | ||
|
|
bdeb3eb3f2 |
2
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
2
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
@@ -24,6 +24,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
||||||
- type: input
|
- type: input
|
||||||
id: region
|
id: region
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar requests **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar requests **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and am willing to share it if required
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and am willing to share it if required
|
||||||
- type: input
|
- type: input
|
||||||
id: region
|
id: region
|
||||||
|
|||||||
@@ -22,6 +22,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar requests **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar requests **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
||||||
- type: input
|
- type: input
|
||||||
id: region
|
id: region
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
@@ -20,6 +20,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: description
|
id: description
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
2
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
@@ -22,6 +22,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar requests **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar requests **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: description
|
id: description
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/6_question.yml
vendored
2
.github/ISSUE_TEMPLATE/6_question.yml
vendored
@@ -28,6 +28,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar questions **including closed ones**. DO NOT post duplicates
|
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%3Aissue%20-label%3Aspam%20%20) for similar questions **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: question
|
id: question
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
||||||
- type: input
|
- type: input
|
||||||
id: region
|
id: region
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar requests **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar requests **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and am willing to share it if required
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and am willing to share it if required
|
||||||
- type: input
|
- type: input
|
||||||
id: region
|
id: region
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar requests **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar requests **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
||||||
- type: input
|
- type: input
|
||||||
id: region
|
id: region
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
@@ -16,6 +16,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: description
|
id: description
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar requests **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar requests **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: description
|
id: description
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
2
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
@@ -24,6 +24,8 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar questions **including closed ones**. DO NOT post duplicates
|
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766), [the FAQ](https://github.com/yt-dlp/yt-dlp/wiki/FAQ), and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=is%%3Aissue%%20-label%%3Aspam%%20%%20) for similar questions **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
- label: I've read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: question
|
id: question
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -33,6 +33,7 @@ Fixes #
|
|||||||
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check those that apply and remove the others:
|
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check those that apply and remove the others:
|
||||||
- [ ] I am the original author of the code in this PR, and I am willing to release it under [Unlicense](http://unlicense.org/)
|
- [ ] I am the original author of the code in this PR, and I am willing to release it under [Unlicense](http://unlicense.org/)
|
||||||
- [ ] I am not the original author of the code in this PR, but it is in the public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
|
- [ ] I am not the original author of the code in this PR, but it is in the public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
|
||||||
|
- [ ] I have read the [policy against AI/LLM contributions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#automated-contributions-ai--llm-policy) and understand I may be blocked from the repository if it is violated
|
||||||
|
|
||||||
### What is the purpose of your *pull request*? Check those that apply and remove the others:
|
### What is the purpose of your *pull request*? Check those that apply and remove the others:
|
||||||
- [ ] Fix or improvement to an extractor (Make sure to add/update tests)
|
- [ ] Fix or improvement to an extractor (Make sure to add/update tests)
|
||||||
|
|||||||
28
.github/actionlint.yml
vendored
Normal file
28
.github/actionlint.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
self-hosted-runner:
|
||||||
|
labels:
|
||||||
|
# Workaround for the outdated runner list in actionlint v1.7.7
|
||||||
|
# Ref: https://github.com/rhysd/actionlint/issues/533
|
||||||
|
- windows-11-arm
|
||||||
|
|
||||||
|
config-variables:
|
||||||
|
- KEEP_CACHE_WARM
|
||||||
|
- PUSH_VERSION_COMMIT
|
||||||
|
- UPDATE_TO_VERIFICATION
|
||||||
|
- PYPI_PROJECT
|
||||||
|
- PYPI_SUFFIX
|
||||||
|
- NIGHTLY_PYPI_PROJECT
|
||||||
|
- NIGHTLY_PYPI_SUFFIX
|
||||||
|
- NIGHTLY_ARCHIVE_REPO
|
||||||
|
- BUILD_NIGHTLY
|
||||||
|
- MASTER_PYPI_PROJECT
|
||||||
|
- MASTER_PYPI_SUFFIX
|
||||||
|
- MASTER_ARCHIVE_REPO
|
||||||
|
- BUILD_MASTER
|
||||||
|
- ISSUE_LOCKDOWN
|
||||||
|
- SANITIZE_COMMENT
|
||||||
|
|
||||||
|
paths:
|
||||||
|
.github/workflows/build.yml:
|
||||||
|
ignore:
|
||||||
|
# SC1090 "Can't follow non-constant source": ignore when using `source` to activate venv
|
||||||
|
- '.+SC1090.+'
|
||||||
584
.github/workflows/build.yml
vendored
584
.github/workflows/build.yml
vendored
@@ -9,31 +9,27 @@ on:
|
|||||||
required: false
|
required: false
|
||||||
default: stable
|
default: stable
|
||||||
type: string
|
type: string
|
||||||
|
origin:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
unix:
|
unix:
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
linux_static:
|
linux:
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
linux_arm:
|
linux_armv7l:
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
musllinux:
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
macos:
|
macos:
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
macos_legacy:
|
|
||||||
default: true
|
|
||||||
type: boolean
|
|
||||||
windows:
|
windows:
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
windows32:
|
|
||||||
default: true
|
|
||||||
type: boolean
|
|
||||||
origin:
|
|
||||||
required: false
|
|
||||||
default: ''
|
|
||||||
type: string
|
|
||||||
secrets:
|
secrets:
|
||||||
GPG_SIGNING_KEY:
|
GPG_SIGNING_KEY:
|
||||||
required: false
|
required: false
|
||||||
@@ -43,7 +39,9 @@ on:
|
|||||||
version:
|
version:
|
||||||
description: |
|
description: |
|
||||||
VERSION: yyyy.mm.dd[.rev] or rev
|
VERSION: yyyy.mm.dd[.rev] or rev
|
||||||
required: true
|
(default: auto-generated)
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
type: string
|
type: string
|
||||||
channel:
|
channel:
|
||||||
description: |
|
description: |
|
||||||
@@ -55,37 +53,26 @@ on:
|
|||||||
description: yt-dlp, yt-dlp.tar.gz
|
description: yt-dlp, yt-dlp.tar.gz
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
linux_static:
|
linux:
|
||||||
description: yt-dlp_linux
|
description: yt-dlp_linux, yt-dlp_linux.zip, yt-dlp_linux_aarch64, yt-dlp_linux_aarch64.zip
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
linux_arm:
|
linux_armv7l:
|
||||||
description: yt-dlp_linux_aarch64, yt-dlp_linux_armv7l
|
description: yt-dlp_linux_armv7l.zip
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
musllinux:
|
||||||
|
description: yt-dlp_musllinux, yt-dlp_musllinux.zip, yt-dlp_musllinux_aarch64, yt-dlp_musllinux_aarch64.zip
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
macos:
|
macos:
|
||||||
description: yt-dlp_macos, yt-dlp_macos.zip
|
description: yt-dlp_macos, yt-dlp_macos.zip
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
macos_legacy:
|
|
||||||
description: yt-dlp_macos_legacy
|
|
||||||
default: true
|
|
||||||
type: boolean
|
|
||||||
windows:
|
windows:
|
||||||
description: yt-dlp.exe, yt-dlp_win.zip
|
description: yt-dlp.exe, yt-dlp_win.zip, yt-dlp_x86.exe, yt-dlp_win_x86.zip, yt-dlp_arm64.exe, yt-dlp_win_arm64.zip
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
windows32:
|
|
||||||
description: yt-dlp_x86.exe
|
|
||||||
default: true
|
|
||||||
type: boolean
|
|
||||||
origin:
|
|
||||||
description: Origin
|
|
||||||
required: false
|
|
||||||
default: 'current repo'
|
|
||||||
type: choice
|
|
||||||
options:
|
|
||||||
- 'current repo'
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@@ -94,44 +81,151 @@ jobs:
|
|||||||
process:
|
process:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
origin: ${{ steps.process_origin.outputs.origin }}
|
origin: ${{ steps.process_inputs.outputs.origin }}
|
||||||
|
timestamp: ${{ steps.process_inputs.outputs.timestamp }}
|
||||||
|
version: ${{ steps.process_inputs.outputs.version }}
|
||||||
|
linux_matrix: ${{ steps.linux_matrix.outputs.matrix }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Process origin
|
- name: Process inputs
|
||||||
id: process_origin
|
id: process_inputs
|
||||||
|
env:
|
||||||
|
INPUTS: ${{ toJSON(inputs) }}
|
||||||
|
REPOSITORY: ${{ github.repository }}
|
||||||
|
shell: python
|
||||||
run: |
|
run: |
|
||||||
echo "origin=${{ inputs.origin == 'current repo' && github.repository || inputs.origin }}" | tee "$GITHUB_OUTPUT"
|
import datetime as dt
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
INPUTS = json.loads(os.environ['INPUTS'])
|
||||||
|
timestamp = dt.datetime.now(tz=dt.timezone.utc).strftime('%Y.%m.%d.%H%M%S.%f')
|
||||||
|
version = INPUTS.get('version')
|
||||||
|
if version and '.' not in version:
|
||||||
|
# build.yml was dispatched with only a revision as the version input value
|
||||||
|
version_parts = [*timestamp.split('.')[:3], version]
|
||||||
|
elif not version:
|
||||||
|
# build.yml was dispatched without any version input value, so include .HHMMSS revision
|
||||||
|
version_parts = timestamp.split('.')[:4]
|
||||||
|
else:
|
||||||
|
# build.yml was called or dispatched with a complete version input value
|
||||||
|
version_parts = version.split('.')
|
||||||
|
assert all(re.fullmatch(r'[0-9]+', part) for part in version_parts), 'Version must be numeric'
|
||||||
|
outputs = {
|
||||||
|
'origin': INPUTS.get('origin') or os.environ['REPOSITORY'],
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'version': '.'.join(version_parts),
|
||||||
|
}
|
||||||
|
print(json.dumps(outputs, indent=2))
|
||||||
|
with open(os.environ['GITHUB_OUTPUT'], 'a') as f:
|
||||||
|
f.write('\n'.join(f'{key}={value}' for key, value in outputs.items()))
|
||||||
|
|
||||||
|
- name: Build Linux matrix
|
||||||
|
id: linux_matrix
|
||||||
|
env:
|
||||||
|
INPUTS: ${{ toJSON(inputs) }}
|
||||||
|
PYTHON_VERSION: '3.13'
|
||||||
|
UPDATE_TO: yt-dlp/yt-dlp@2025.09.05
|
||||||
|
shell: python
|
||||||
|
run: |
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
EXE_MAP = {
|
||||||
|
'linux': [{
|
||||||
|
'os': 'linux',
|
||||||
|
'arch': 'x86_64',
|
||||||
|
'runner': 'ubuntu-24.04',
|
||||||
|
}, {
|
||||||
|
'os': 'linux',
|
||||||
|
'arch': 'aarch64',
|
||||||
|
'runner': 'ubuntu-24.04-arm',
|
||||||
|
}],
|
||||||
|
'linux_armv7l': [{
|
||||||
|
'os': 'linux',
|
||||||
|
'arch': 'armv7l',
|
||||||
|
'runner': 'ubuntu-24.04-arm',
|
||||||
|
'qemu_platform': 'linux/arm/v7',
|
||||||
|
'onefile': False,
|
||||||
|
'cache_requirements': True,
|
||||||
|
'update_to': 'yt-dlp/yt-dlp@2023.03.04',
|
||||||
|
}],
|
||||||
|
'musllinux': [{
|
||||||
|
'os': 'musllinux',
|
||||||
|
'arch': 'x86_64',
|
||||||
|
'runner': 'ubuntu-24.04',
|
||||||
|
}, {
|
||||||
|
'os': 'musllinux',
|
||||||
|
'arch': 'aarch64',
|
||||||
|
'runner': 'ubuntu-24.04-arm',
|
||||||
|
}],
|
||||||
|
}
|
||||||
|
INPUTS = json.loads(os.environ['INPUTS'])
|
||||||
|
matrix = [exe for key, group in EXE_MAP.items() for exe in group if INPUTS.get(key)]
|
||||||
|
if not matrix:
|
||||||
|
# If we send an empty matrix when no linux inputs are given, the entire workflow fails
|
||||||
|
matrix = [EXE_MAP['linux'][0]]
|
||||||
|
for exe in matrix:
|
||||||
|
exe['exe'] = '_'.join(filter(None, (
|
||||||
|
'yt-dlp',
|
||||||
|
exe['os'],
|
||||||
|
exe['arch'] != 'x86_64' and exe['arch'],
|
||||||
|
)))
|
||||||
|
exe.setdefault('qemu_platform', None)
|
||||||
|
exe.setdefault('onefile', True)
|
||||||
|
exe.setdefault('onedir', True)
|
||||||
|
exe.setdefault('cache_requirements', False)
|
||||||
|
exe.setdefault('python_version', os.environ['PYTHON_VERSION'])
|
||||||
|
exe.setdefault('update_to', os.environ['UPDATE_TO'])
|
||||||
|
if not any(INPUTS.get(key) for key in EXE_MAP):
|
||||||
|
print('skipping linux job')
|
||||||
|
else:
|
||||||
|
print(json.dumps(matrix, indent=2))
|
||||||
|
with open(os.environ['GITHUB_OUTPUT'], 'a') as f:
|
||||||
|
f.write(f'matrix={json.dumps(matrix)}')
|
||||||
|
|
||||||
unix:
|
unix:
|
||||||
needs: process
|
needs: process
|
||||||
if: inputs.unix
|
if: inputs.unix
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
CHANNEL: ${{ inputs.channel }}
|
||||||
|
ORIGIN: ${{ needs.process.outputs.origin }}
|
||||||
|
VERSION: ${{ needs.process.outputs.version }}
|
||||||
|
UPDATE_TO: yt-dlp/yt-dlp@2025.09.05
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # Needed for changelog
|
fetch-depth: 0 # Needed for changelog
|
||||||
- uses: actions/setup-python@v5
|
|
||||||
|
- uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
|
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
sudo apt -y install zip pandoc man sed
|
sudo apt -y install zip pandoc man sed
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
python devscripts/update-version.py -c "${CHANNEL}" -r "${ORIGIN}" "${VERSION}"
|
||||||
python devscripts/update_changelog.py -vv
|
python devscripts/update_changelog.py -vv
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
|
|
||||||
- name: Build Unix platform-independent binary
|
- name: Build Unix platform-independent binary
|
||||||
run: |
|
run: |
|
||||||
make all tar
|
make all tar
|
||||||
|
|
||||||
- name: Verify --update-to
|
- name: Verify --update-to
|
||||||
if: vars.UPDATE_TO_VERIFICATION
|
if: vars.UPDATE_TO_VERIFICATION
|
||||||
run: |
|
run: |
|
||||||
chmod +x ./yt-dlp
|
chmod +x ./yt-dlp
|
||||||
cp ./yt-dlp ./yt-dlp_downgraded
|
cp ./yt-dlp ./yt-dlp_downgraded
|
||||||
version="$(./yt-dlp --version)"
|
version="$(./yt-dlp --version)"
|
||||||
./yt-dlp_downgraded -v --update-to yt-dlp/yt-dlp@2023.03.04
|
./yt-dlp_downgraded -v --update-to "${UPDATE_TO}"
|
||||||
downgraded_version="$(./yt-dlp_downgraded --version)"
|
downgraded_version="$(./yt-dlp_downgraded --version)"
|
||||||
[[ "$version" != "$downgraded_version" ]]
|
[[ "${version}" != "${downgraded_version}" ]]
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
@@ -141,99 +235,74 @@ jobs:
|
|||||||
yt-dlp.tar.gz
|
yt-dlp.tar.gz
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
|
|
||||||
linux_static:
|
linux:
|
||||||
|
name: ${{ matrix.os }} (${{ matrix.arch }})
|
||||||
|
if: inputs.linux || inputs.linux_armv7l || inputs.musllinux
|
||||||
needs: process
|
needs: process
|
||||||
if: inputs.linux_static
|
runs-on: ${{ matrix.runner }}
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Build static executable
|
|
||||||
env:
|
|
||||||
channel: ${{ inputs.channel }}
|
|
||||||
origin: ${{ needs.process.outputs.origin }}
|
|
||||||
version: ${{ inputs.version }}
|
|
||||||
run: |
|
|
||||||
mkdir ~/build
|
|
||||||
cd bundle/docker
|
|
||||||
docker compose up --build static
|
|
||||||
sudo chown "${USER}:docker" ~/build/yt-dlp_linux
|
|
||||||
- name: Verify --update-to
|
|
||||||
if: vars.UPDATE_TO_VERIFICATION
|
|
||||||
run: |
|
|
||||||
chmod +x ~/build/yt-dlp_linux
|
|
||||||
cp ~/build/yt-dlp_linux ~/build/yt-dlp_linux_downgraded
|
|
||||||
version="$(~/build/yt-dlp_linux --version)"
|
|
||||||
~/build/yt-dlp_linux_downgraded -v --update-to yt-dlp/yt-dlp@2023.03.04
|
|
||||||
downgraded_version="$(~/build/yt-dlp_linux_downgraded --version)"
|
|
||||||
[[ "$version" != "$downgraded_version" ]]
|
|
||||||
- name: Upload artifacts
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: build-bin-${{ github.job }}
|
|
||||||
path: |
|
|
||||||
~/build/yt-dlp_linux
|
|
||||||
compression-level: 0
|
|
||||||
|
|
||||||
linux_arm:
|
|
||||||
needs: process
|
|
||||||
if: inputs.linux_arm
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write # for creating cache
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
architecture:
|
include: ${{ fromJSON(needs.process.outputs.linux_matrix) }}
|
||||||
- armv7
|
env:
|
||||||
- aarch64
|
CHANNEL: ${{ inputs.channel }}
|
||||||
|
ORIGIN: ${{ needs.process.outputs.origin }}
|
||||||
|
VERSION: ${{ needs.process.outputs.version }}
|
||||||
|
EXE_NAME: ${{ matrix.exe }}
|
||||||
|
PYTHON_VERSION: ${{ matrix.python_version }}
|
||||||
|
UPDATE_TO: ${{ (vars.UPDATE_TO_VERIFICATION && matrix.update_to) || '' }}
|
||||||
|
SKIP_ONEDIR_BUILD: ${{ (!matrix.onedir && '1') || '' }}
|
||||||
|
SKIP_ONEFILE_BUILD: ${{ (!matrix.onefile && '1') || '' }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
path: ./repo
|
|
||||||
- name: Virtualized Install, Prepare & Build
|
|
||||||
uses: yt-dlp/run-on-arch-action@v3
|
|
||||||
with:
|
|
||||||
# Ref: https://github.com/uraimo/run-on-arch-action/issues/55
|
|
||||||
env: |
|
|
||||||
GITHUB_WORKFLOW: build
|
|
||||||
githubToken: ${{ github.token }} # To cache image
|
|
||||||
arch: ${{ matrix.architecture }}
|
|
||||||
distro: ubuntu20.04 # Standalone executable should be built on minimum supported OS
|
|
||||||
dockerRunArgs: --volume "${PWD}/repo:/repo"
|
|
||||||
install: | # Installing Python 3.10 from the Deadsnakes repo raises errors
|
|
||||||
apt update
|
|
||||||
apt -y install zlib1g-dev libffi-dev python3.9 python3.9-dev python3.9-distutils python3-pip \
|
|
||||||
python3-secretstorage # Cannot build cryptography wheel in virtual armv7 environment
|
|
||||||
python3.9 -m pip install -U pip wheel 'setuptools>=71.0.2'
|
|
||||||
# XXX: Keep this in sync with pyproject.toml (it can't be accessed at this stage) and exclude secretstorage
|
|
||||||
python3.9 -m pip install -U Pyinstaller mutagen pycryptodomex brotli certifi cffi \
|
|
||||||
'requests>=2.32.2,<3' 'urllib3>=2.0.2,<3' 'websockets>=13.0'
|
|
||||||
|
|
||||||
run: |
|
- name: Cache requirements
|
||||||
cd repo
|
if: matrix.cache_requirements
|
||||||
python3.9 devscripts/install_deps.py -o --include build
|
id: cache-venv
|
||||||
python3.9 devscripts/install_deps.py --include pyinstaller # Cached versions may be out of date
|
uses: actions/cache@v4
|
||||||
python3.9 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
env:
|
||||||
python3.9 devscripts/make_lazy_extractors.py
|
SEGMENT_DOWNLOAD_TIMEOUT_MINS: 1
|
||||||
python3.9 -m bundle.pyinstaller
|
with:
|
||||||
|
path: |
|
||||||
|
venv
|
||||||
|
key: cache-reqs-${{ matrix.os }}_${{ matrix.arch }}-${{ github.ref }}-${{ needs.process.outputs.timestamp }}
|
||||||
|
restore-keys: |
|
||||||
|
cache-reqs-${{ matrix.os }}_${{ matrix.arch }}-${{ github.ref }}-
|
||||||
|
cache-reqs-${{ matrix.os }}_${{ matrix.arch }}-
|
||||||
|
|
||||||
if ${{ vars.UPDATE_TO_VERIFICATION && 'true' || 'false' }}; then
|
- name: Set up QEMU
|
||||||
arch="${{ (matrix.architecture == 'armv7' && 'armv7l') || matrix.architecture }}"
|
if: matrix.qemu_platform
|
||||||
chmod +x ./dist/yt-dlp_linux_${arch}
|
uses: docker/setup-qemu-action@v3
|
||||||
cp ./dist/yt-dlp_linux_${arch} ./dist/yt-dlp_linux_${arch}_downgraded
|
with:
|
||||||
version="$(./dist/yt-dlp_linux_${arch} --version)"
|
platforms: ${{ matrix.qemu_platform }}
|
||||||
./dist/yt-dlp_linux_${arch}_downgraded -v --update-to yt-dlp/yt-dlp@2023.03.04
|
|
||||||
downgraded_version="$(./dist/yt-dlp_linux_${arch}_downgraded --version)"
|
- name: Build executable
|
||||||
[[ "$version" != "$downgraded_version" ]]
|
env:
|
||||||
fi
|
SERVICE: ${{ matrix.os }}_${{ matrix.arch }}
|
||||||
|
run: |
|
||||||
|
mkdir -p ./venv
|
||||||
|
mkdir -p ./dist
|
||||||
|
pushd bundle/docker
|
||||||
|
docker compose up --build --exit-code-from "${SERVICE}" "${SERVICE}"
|
||||||
|
popd
|
||||||
|
if [[ -z "${SKIP_ONEFILE_BUILD}" ]]; then
|
||||||
|
sudo chown "${USER}:docker" "./dist/${EXE_NAME}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Verify executable in container
|
||||||
|
env:
|
||||||
|
SERVICE: ${{ matrix.os }}_${{ matrix.arch }}_verify
|
||||||
|
run: |
|
||||||
|
cd bundle/docker
|
||||||
|
docker compose up --build --exit-code-from "${SERVICE}" "${SERVICE}"
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: build-bin-linux_${{ matrix.architecture }}
|
name: build-bin-${{ matrix.os }}_${{ matrix.arch }}
|
||||||
path: | # run-on-arch-action designates armv7l as armv7
|
path: |
|
||||||
repo/dist/yt-dlp_linux_${{ (matrix.architecture == 'armv7' && 'armv7l') || matrix.architecture }}
|
dist/${{ matrix.exe }}*
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
|
|
||||||
macos:
|
macos:
|
||||||
@@ -241,22 +310,29 @@ jobs:
|
|||||||
if: inputs.macos
|
if: inputs.macos
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
actions: write # For cleaning up cache
|
|
||||||
runs-on: macos-14
|
runs-on: macos-14
|
||||||
|
env:
|
||||||
|
CHANNEL: ${{ inputs.channel }}
|
||||||
|
ORIGIN: ${{ needs.process.outputs.origin }}
|
||||||
|
VERSION: ${{ needs.process.outputs.version }}
|
||||||
|
UPDATE_TO: yt-dlp/yt-dlp@2025.09.05
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
# NB: Building universal2 does not work with python from actions/setup-python
|
# NB: Building universal2 does not work with python from actions/setup-python
|
||||||
|
|
||||||
- name: Restore cached requirements
|
- name: Cache requirements
|
||||||
id: restore-cache
|
id: cache-venv
|
||||||
uses: actions/cache/restore@v4
|
uses: actions/cache@v4
|
||||||
env:
|
env:
|
||||||
SEGMENT_DOWNLOAD_TIMEOUT_MINS: 1
|
SEGMENT_DOWNLOAD_TIMEOUT_MINS: 1
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/yt-dlp-build-venv
|
~/yt-dlp-build-venv
|
||||||
key: cache-reqs-${{ github.job }}-${{ github.ref }}
|
key: cache-reqs-${{ github.job }}-${{ github.ref }}-${{ needs.process.outputs.timestamp }}
|
||||||
|
restore-keys: |
|
||||||
|
cache-reqs-${{ github.job }}-${{ github.ref }}-
|
||||||
|
cache-reqs-${{ github.job }}-
|
||||||
|
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
@@ -301,7 +377,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python3 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
python3 devscripts/update-version.py -c "${CHANNEL}" -r "${ORIGIN}" "${VERSION}"
|
||||||
python3 devscripts/make_lazy_extractors.py
|
python3 devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@@ -316,7 +392,7 @@ jobs:
|
|||||||
chmod +x ./dist/yt-dlp_macos
|
chmod +x ./dist/yt-dlp_macos
|
||||||
cp ./dist/yt-dlp_macos ./dist/yt-dlp_macos_downgraded
|
cp ./dist/yt-dlp_macos ./dist/yt-dlp_macos_downgraded
|
||||||
version="$(./dist/yt-dlp_macos --version)"
|
version="$(./dist/yt-dlp_macos --version)"
|
||||||
./dist/yt-dlp_macos_downgraded -v --update-to yt-dlp/yt-dlp@2023.03.04
|
./dist/yt-dlp_macos_downgraded -v --update-to "${UPDATE_TO}"
|
||||||
downgraded_version="$(./dist/yt-dlp_macos_downgraded --version)"
|
downgraded_version="$(./dist/yt-dlp_macos_downgraded --version)"
|
||||||
[[ "$version" != "$downgraded_version" ]]
|
[[ "$version" != "$downgraded_version" ]]
|
||||||
|
|
||||||
@@ -329,165 +405,124 @@ jobs:
|
|||||||
dist/yt-dlp_macos.zip
|
dist/yt-dlp_macos.zip
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
|
|
||||||
- name: Cleanup cache
|
|
||||||
if: steps.restore-cache.outputs.cache-hit == 'true'
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
cache_key: cache-reqs-${{ github.job }}-${{ github.ref }}
|
|
||||||
run: |
|
|
||||||
gh cache delete "${cache_key}"
|
|
||||||
|
|
||||||
- name: Cache requirements
|
|
||||||
uses: actions/cache/save@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/yt-dlp-build-venv
|
|
||||||
key: cache-reqs-${{ github.job }}-${{ github.ref }}
|
|
||||||
|
|
||||||
macos_legacy:
|
|
||||||
needs: process
|
|
||||||
if: inputs.macos_legacy
|
|
||||||
runs-on: macos-13
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Install Python
|
|
||||||
# We need the official Python, because the GA ones only support newer macOS versions
|
|
||||||
env:
|
|
||||||
PYTHON_VERSION: 3.10.5
|
|
||||||
MACOSX_DEPLOYMENT_TARGET: 10.9 # Used up by the Python build tools
|
|
||||||
run: |
|
|
||||||
# Hack to get the latest patch version. Uncomment if needed
|
|
||||||
#brew install python@3.10
|
|
||||||
#export PYTHON_VERSION=$( $(brew --prefix)/opt/python@3.10/bin/python3 --version | cut -d ' ' -f 2 )
|
|
||||||
curl "https://www.python.org/ftp/python/${PYTHON_VERSION}/python-${PYTHON_VERSION}-macos11.pkg" -o "python.pkg"
|
|
||||||
sudo installer -pkg python.pkg -target /
|
|
||||||
python3 --version
|
|
||||||
- name: Install Requirements
|
|
||||||
run: |
|
|
||||||
brew install coreutils
|
|
||||||
python3 devscripts/install_deps.py --user -o --include build
|
|
||||||
python3 devscripts/install_deps.py --user --include pyinstaller
|
|
||||||
|
|
||||||
- name: Prepare
|
|
||||||
run: |
|
|
||||||
python3 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
|
||||||
python3 devscripts/make_lazy_extractors.py
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
python3 -m bundle.pyinstaller
|
|
||||||
mv dist/yt-dlp_macos dist/yt-dlp_macos_legacy
|
|
||||||
|
|
||||||
- name: Verify --update-to
|
|
||||||
if: vars.UPDATE_TO_VERIFICATION
|
|
||||||
run: |
|
|
||||||
chmod +x ./dist/yt-dlp_macos_legacy
|
|
||||||
cp ./dist/yt-dlp_macos_legacy ./dist/yt-dlp_macos_legacy_downgraded
|
|
||||||
version="$(./dist/yt-dlp_macos_legacy --version)"
|
|
||||||
./dist/yt-dlp_macos_legacy_downgraded -v --update-to yt-dlp/yt-dlp@2023.03.04
|
|
||||||
downgraded_version="$(./dist/yt-dlp_macos_legacy_downgraded --version)"
|
|
||||||
[[ "$version" != "$downgraded_version" ]]
|
|
||||||
|
|
||||||
- name: Upload artifacts
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: build-bin-${{ github.job }}
|
|
||||||
path: |
|
|
||||||
dist/yt-dlp_macos_legacy
|
|
||||||
compression-level: 0
|
|
||||||
|
|
||||||
windows:
|
windows:
|
||||||
|
name: windows (${{ matrix.arch }})
|
||||||
needs: process
|
needs: process
|
||||||
if: inputs.windows
|
if: inputs.windows
|
||||||
runs-on: windows-latest
|
permissions:
|
||||||
|
contents: read
|
||||||
|
runs-on: ${{ matrix.runner }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- arch: 'x64'
|
||||||
|
runner: windows-2025
|
||||||
|
python_version: '3.10'
|
||||||
|
platform_tag: win_amd64
|
||||||
|
pyi_version: '6.16.0'
|
||||||
|
pyi_tag: '2025.09.13.221251'
|
||||||
|
pyi_hash: b6496c7630c3afe66900cfa824e8234a8c2e2c81704bd7facd79586abc76c0e5
|
||||||
|
- arch: 'x86'
|
||||||
|
runner: windows-2025
|
||||||
|
python_version: '3.10'
|
||||||
|
platform_tag: win32
|
||||||
|
pyi_version: '6.16.0'
|
||||||
|
pyi_tag: '2025.09.13.221251'
|
||||||
|
pyi_hash: 2d881843580efdc54f3523507fc6d9c5b6051ee49c743a6d9b7003ac5758c226
|
||||||
|
- arch: 'arm64'
|
||||||
|
runner: windows-11-arm
|
||||||
|
python_version: '3.13' # arm64 only has Python >= 3.11 available
|
||||||
|
platform_tag: win_arm64
|
||||||
|
pyi_version: '6.16.0'
|
||||||
|
pyi_tag: '2025.09.13.221251'
|
||||||
|
pyi_hash: 4250c9085e34a95c898f3ee2f764914fc36ec59f0d97c28e6a75fcf21f7b144f
|
||||||
|
env:
|
||||||
|
CHANNEL: ${{ inputs.channel }}
|
||||||
|
ORIGIN: ${{ needs.process.outputs.origin }}
|
||||||
|
VERSION: ${{ needs.process.outputs.version }}
|
||||||
|
SUFFIX: ${{ (matrix.arch != 'x64' && format('_{0}', matrix.arch)) || '' }}
|
||||||
|
UPDATE_TO: yt-dlp/yt-dlp@2025.09.05
|
||||||
|
BASE_CACHE_KEY: cache-reqs-${{ github.job }}_${{ matrix.arch }}-${{ matrix.python_version }}
|
||||||
|
PYI_REPO: https://github.com/yt-dlp/Pyinstaller-Builds
|
||||||
|
PYI_WHEEL: pyinstaller-${{ matrix.pyi_version }}-py3-none-${{ matrix.platform_tag }}.whl
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: ${{ matrix.python_version }}
|
||||||
|
architecture: ${{ matrix.arch }}
|
||||||
|
|
||||||
|
- name: Cache requirements
|
||||||
|
id: cache-venv
|
||||||
|
if: matrix.arch == 'arm64'
|
||||||
|
uses: actions/cache@v4
|
||||||
|
env:
|
||||||
|
SEGMENT_DOWNLOAD_TIMEOUT_MINS: 1
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/yt-dlp-build-venv
|
||||||
|
key: ${{ env.BASE_CACHE_KEY }}-${{ github.ref }}-${{ needs.process.outputs.timestamp }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ env.BASE_CACHE_KEY }}-${{ github.ref }}-
|
||||||
|
${{ env.BASE_CACHE_KEY }}-
|
||||||
|
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
env:
|
||||||
|
ARCH: ${{ matrix.arch }}
|
||||||
|
PYI_URL: ${{ env.PYI_REPO }}/releases/download/${{ matrix.pyi_tag }}/${{ env.PYI_WHEEL }}
|
||||||
|
PYI_HASH: ${{ matrix.pyi_hash }}
|
||||||
|
shell: pwsh
|
||||||
|
run: |
|
||||||
|
python -m venv /yt-dlp-build-venv
|
||||||
|
/yt-dlp-build-venv/Scripts/Activate.ps1
|
||||||
|
python -m pip install -U pip
|
||||||
|
# Install custom PyInstaller build and verify hash
|
||||||
|
mkdir /pyi-wheels
|
||||||
|
python -m pip download -d /pyi-wheels --no-deps --require-hashes "pyinstaller@${Env:PYI_URL}#sha256=${Env:PYI_HASH}"
|
||||||
|
python -m pip install --force-reinstall -U "/pyi-wheels/${Env:PYI_WHEEL}"
|
||||||
python devscripts/install_deps.py -o --include build
|
python devscripts/install_deps.py -o --include build
|
||||||
python devscripts/install_deps.py --include curl-cffi
|
if ("${Env:ARCH}" -eq "x86") {
|
||||||
python -m pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-6.13.0-py3-none-any.whl"
|
python devscripts/install_deps.py
|
||||||
|
} else {
|
||||||
|
python devscripts/install_deps.py --include curl-cffi
|
||||||
|
}
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
|
shell: pwsh
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
python devscripts/update-version.py -c "${Env:CHANNEL}" -r "${Env:ORIGIN}" "${Env:VERSION}"
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
|
shell: pwsh
|
||||||
run: |
|
run: |
|
||||||
|
/yt-dlp-build-venv/Scripts/Activate.ps1
|
||||||
python -m bundle.pyinstaller
|
python -m bundle.pyinstaller
|
||||||
python -m bundle.pyinstaller --onedir
|
python -m bundle.pyinstaller --onedir
|
||||||
Compress-Archive -Path ./dist/yt-dlp/* -DestinationPath ./dist/yt-dlp_win.zip
|
Compress-Archive -Path ./dist/yt-dlp${Env:SUFFIX}/* -DestinationPath ./dist/yt-dlp_win${Env:SUFFIX}.zip
|
||||||
|
|
||||||
- name: Verify --update-to
|
- name: Verify --update-to
|
||||||
if: vars.UPDATE_TO_VERIFICATION
|
if: vars.UPDATE_TO_VERIFICATION
|
||||||
|
shell: pwsh
|
||||||
run: |
|
run: |
|
||||||
foreach ($name in @("yt-dlp")) {
|
$name = "yt-dlp${Env:SUFFIX}"
|
||||||
Copy-Item "./dist/${name}.exe" "./dist/${name}_downgraded.exe"
|
Copy-Item "./dist/${name}.exe" "./dist/${name}_downgraded.exe"
|
||||||
$version = & "./dist/${name}.exe" --version
|
$version = & "./dist/${name}.exe" --version
|
||||||
& "./dist/${name}_downgraded.exe" -v --update-to yt-dlp/yt-dlp@2023.03.04
|
& "./dist/${name}_downgraded.exe" -v --update-to "${Env:UPDATE_TO}"
|
||||||
$downgraded_version = & "./dist/${name}_downgraded.exe" --version
|
$downgraded_version = & "./dist/${name}_downgraded.exe" --version
|
||||||
if ($version -eq $downgraded_version) {
|
if ($version -eq $downgraded_version) {
|
||||||
exit 1
|
exit 1
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: build-bin-${{ github.job }}
|
name: build-bin-${{ github.job }}-${{ matrix.arch }}
|
||||||
path: |
|
path: |
|
||||||
dist/yt-dlp.exe
|
dist/yt-dlp${{ env.SUFFIX }}.exe
|
||||||
dist/yt-dlp_win.zip
|
dist/yt-dlp_win${{ env.SUFFIX }}.zip
|
||||||
compression-level: 0
|
|
||||||
|
|
||||||
windows32:
|
|
||||||
needs: process
|
|
||||||
if: inputs.windows32
|
|
||||||
runs-on: windows-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: "3.10"
|
|
||||||
architecture: "x86"
|
|
||||||
- name: Install Requirements
|
|
||||||
run: |
|
|
||||||
python devscripts/install_deps.py -o --include build
|
|
||||||
python devscripts/install_deps.py
|
|
||||||
python -m pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-6.13.0-py3-none-any.whl"
|
|
||||||
|
|
||||||
- name: Prepare
|
|
||||||
run: |
|
|
||||||
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
|
||||||
python devscripts/make_lazy_extractors.py
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
python -m bundle.pyinstaller
|
|
||||||
|
|
||||||
- name: Verify --update-to
|
|
||||||
if: vars.UPDATE_TO_VERIFICATION
|
|
||||||
run: |
|
|
||||||
foreach ($name in @("yt-dlp_x86")) {
|
|
||||||
Copy-Item "./dist/${name}.exe" "./dist/${name}_downgraded.exe"
|
|
||||||
$version = & "./dist/${name}.exe" --version
|
|
||||||
& "./dist/${name}_downgraded.exe" -v --update-to yt-dlp/yt-dlp@2023.03.04
|
|
||||||
$downgraded_version = & "./dist/${name}_downgraded.exe" --version
|
|
||||||
if ($version -eq $downgraded_version) {
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- name: Upload artifacts
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: build-bin-${{ github.job }}
|
|
||||||
path: |
|
|
||||||
dist/yt-dlp_x86.exe
|
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
|
|
||||||
meta_files:
|
meta_files:
|
||||||
@@ -495,12 +530,9 @@ jobs:
|
|||||||
needs:
|
needs:
|
||||||
- process
|
- process
|
||||||
- unix
|
- unix
|
||||||
- linux_static
|
- linux
|
||||||
- linux_arm
|
|
||||||
- macos
|
- macos
|
||||||
- macos_legacy
|
|
||||||
- windows
|
- windows
|
||||||
- windows32
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Download artifacts
|
- name: Download artifacts
|
||||||
@@ -529,34 +561,38 @@ jobs:
|
|||||||
lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
||||||
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||||
lock 2024.10.22 py2exe .+
|
lock 2024.10.22 py2exe .+
|
||||||
lock 2024.10.22 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
lock 2024.10.22 zip Python 3\.8
|
||||||
lock 2024.10.22 (?!\w+_exe).+ Python 3\.8
|
|
||||||
lock 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
lock 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
||||||
|
lock 2025.08.11 darwin_legacy_exe .+
|
||||||
|
lock 2025.08.27 linux_armv7l_exe .+
|
||||||
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6
|
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6
|
||||||
lockV2 yt-dlp/yt-dlp 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
lockV2 yt-dlp/yt-dlp 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
||||||
lockV2 yt-dlp/yt-dlp 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
lockV2 yt-dlp/yt-dlp 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||||
lockV2 yt-dlp/yt-dlp 2024.10.22 py2exe .+
|
lockV2 yt-dlp/yt-dlp 2024.10.22 py2exe .+
|
||||||
lockV2 yt-dlp/yt-dlp 2024.10.22 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
lockV2 yt-dlp/yt-dlp 2024.10.22 zip Python 3\.8
|
||||||
lockV2 yt-dlp/yt-dlp 2024.10.22 (?!\w+_exe).+ Python 3\.8
|
|
||||||
lockV2 yt-dlp/yt-dlp 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
lockV2 yt-dlp/yt-dlp 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
||||||
|
lockV2 yt-dlp/yt-dlp 2025.08.11 darwin_legacy_exe .+
|
||||||
|
lockV2 yt-dlp/yt-dlp 2025.08.27 linux_armv7l_exe .+
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 (?!win_x86_exe).+ Python 3\.7
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 (?!win_x86_exe).+ Python 3\.7
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 py2exe .+
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 py2exe .+
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 zip Python 3\.8
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 (?!\w+_exe).+ Python 3\.8
|
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
||||||
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2025.08.12.233030 darwin_legacy_exe .+
|
||||||
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2025.08.30.232839 linux_armv7l_exe .+
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 (?!win_x86_exe).+ Python 3\.7
|
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 (?!win_x86_exe).+ Python 3\.7
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.045052 py2exe .+
|
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.045052 py2exe .+
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 zip Python 3\.8
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 (?!\w+_exe).+ Python 3\.8
|
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
||||||
|
lockV2 yt-dlp/yt-dlp-master-builds 2025.08.12.232447 darwin_legacy_exe .+
|
||||||
|
lockV2 yt-dlp/yt-dlp-master-builds 2025.09.05.212910 linux_armv7l_exe .+
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
- name: Sign checksum files
|
- name: Sign checksum files
|
||||||
env:
|
env:
|
||||||
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
||||||
if: env.GPG_SIGNING_KEY != ''
|
if: env.GPG_SIGNING_KEY
|
||||||
run: |
|
run: |
|
||||||
gpg --batch --import <<< "${{ secrets.GPG_SIGNING_KEY }}"
|
gpg --batch --import <<< "${{ secrets.GPG_SIGNING_KEY }}"
|
||||||
for signfile in ./SHA*SUMS; do
|
for signfile in ./SHA*SUMS; do
|
||||||
|
|||||||
23
.github/workflows/cache-warmer.yml
vendored
Normal file
23
.github/workflows/cache-warmer.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
name: Keep cache warm
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 22 1,6,11,16,21,27 * *'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
if: |
|
||||||
|
vars.KEEP_CACHE_WARM || github.event_name == 'workflow_dispatch'
|
||||||
|
uses: ./.github/workflows/build.yml
|
||||||
|
with:
|
||||||
|
version: '999999'
|
||||||
|
channel: stable
|
||||||
|
origin: ${{ github.repository }}
|
||||||
|
unix: false
|
||||||
|
linux: false
|
||||||
|
linux_armv7l: true
|
||||||
|
musllinux: false
|
||||||
|
macos: true
|
||||||
|
windows: true
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
8
.github/workflows/core.yml
vendored
8
.github/workflows/core.yml
vendored
@@ -37,23 +37,27 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
# CPython 3.9 is in quick-test
|
# CPython 3.9 is in quick-test
|
||||||
python-version: ['3.10', '3.11', '3.12', '3.13', pypy-3.11]
|
python-version: ['3.10', '3.11', '3.12', '3.13', '3.14-dev', pypy-3.11]
|
||||||
include:
|
include:
|
||||||
# atleast one of each CPython/PyPy tests must be in windows
|
# atleast one of each CPython/PyPy tests must be in windows
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: '3.9'
|
python-version: '3.9'
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
|
- os: windows-latest
|
||||||
|
python-version: '3.11'
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: '3.12'
|
python-version: '3.12'
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: '3.13'
|
python-version: '3.13'
|
||||||
|
- os: windows-latest
|
||||||
|
python-version: '3.14-dev'
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: pypy-3.11
|
python-version: pypy-3.11
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
|
|||||||
6
.github/workflows/download.yml
vendored
6
.github/workflows/download.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: 3.9
|
python-version: 3.9
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
@@ -28,7 +28,7 @@ jobs:
|
|||||||
fail-fast: true
|
fail-fast: true
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
python-version: ['3.10', '3.11', '3.12', '3.13', pypy-3.11]
|
python-version: ['3.10', '3.11', '3.12', '3.13', '3.14-dev', pypy-3.11]
|
||||||
include:
|
include:
|
||||||
# atleast one of each CPython/PyPy tests must be in windows
|
# atleast one of each CPython/PyPy tests must be in windows
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
@@ -38,7 +38,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
|
|||||||
4
.github/workflows/quick-test.yml
vendored
4
.github/workflows/quick-test.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python 3.9
|
- name: Set up Python 3.9
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: '3.9'
|
python-version: '3.9'
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
@@ -27,7 +27,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: '3.9'
|
python-version: '3.9'
|
||||||
- name: Install dev dependencies
|
- name: Install dev dependencies
|
||||||
|
|||||||
13
.github/workflows/release-master.yml
vendored
13
.github/workflows/release-master.yml
vendored
@@ -6,10 +6,12 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- "yt_dlp/**.py"
|
- "yt_dlp/**.py"
|
||||||
- "!yt_dlp/version.py"
|
- "!yt_dlp/version.py"
|
||||||
- "bundle/*.py"
|
- "bundle/**"
|
||||||
- "pyproject.toml"
|
- "pyproject.toml"
|
||||||
- "Makefile"
|
- "Makefile"
|
||||||
- ".github/workflows/build.yml"
|
- ".github/workflows/build.yml"
|
||||||
|
- ".github/workflows/release.yml"
|
||||||
|
- ".github/workflows/release-master.yml"
|
||||||
concurrency:
|
concurrency:
|
||||||
group: release-master
|
group: release-master
|
||||||
permissions:
|
permissions:
|
||||||
@@ -17,21 +19,20 @@ permissions:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release:
|
release:
|
||||||
if: vars.BUILD_MASTER != ''
|
if: vars.BUILD_MASTER
|
||||||
uses: ./.github/workflows/release.yml
|
uses: ./.github/workflows/release.yml
|
||||||
with:
|
with:
|
||||||
prerelease: true
|
prerelease: true
|
||||||
source: master
|
source: ${{ (github.repository != 'yt-dlp/yt-dlp' && vars.MASTER_ARCHIVE_REPO) || 'master' }}
|
||||||
|
target: 'master'
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
packages: write # For package cache
|
|
||||||
actions: write # For cleaning up cache
|
|
||||||
id-token: write # mandatory for trusted publishing
|
id-token: write # mandatory for trusted publishing
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
publish_pypi:
|
publish_pypi:
|
||||||
needs: [release]
|
needs: [release]
|
||||||
if: vars.MASTER_PYPI_PROJECT != ''
|
if: vars.MASTER_PYPI_PROJECT
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # mandatory for trusted publishing
|
id-token: write # mandatory for trusted publishing
|
||||||
|
|||||||
13
.github/workflows/release-nightly.yml
vendored
13
.github/workflows/release-nightly.yml
vendored
@@ -7,7 +7,7 @@ permissions:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check_nightly:
|
check_nightly:
|
||||||
if: vars.BUILD_NIGHTLY != ''
|
if: vars.BUILD_NIGHTLY
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
commit: ${{ steps.check_for_new_commits.outputs.commit }}
|
commit: ${{ steps.check_for_new_commits.outputs.commit }}
|
||||||
@@ -22,9 +22,13 @@ jobs:
|
|||||||
"yt_dlp/*.py"
|
"yt_dlp/*.py"
|
||||||
':!yt_dlp/version.py'
|
':!yt_dlp/version.py'
|
||||||
"bundle/*.py"
|
"bundle/*.py"
|
||||||
|
"bundle/docker/compose.yml"
|
||||||
|
"bundle/docker/linux/*"
|
||||||
"pyproject.toml"
|
"pyproject.toml"
|
||||||
"Makefile"
|
"Makefile"
|
||||||
".github/workflows/build.yml"
|
".github/workflows/build.yml"
|
||||||
|
".github/workflows/release.yml"
|
||||||
|
".github/workflows/release-nightly.yml"
|
||||||
)
|
)
|
||||||
echo "commit=$(git log --format=%H -1 --since="24 hours ago" -- "${relevant_files[@]}")" | tee "$GITHUB_OUTPUT"
|
echo "commit=$(git log --format=%H -1 --since="24 hours ago" -- "${relevant_files[@]}")" | tee "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
@@ -34,17 +38,16 @@ jobs:
|
|||||||
uses: ./.github/workflows/release.yml
|
uses: ./.github/workflows/release.yml
|
||||||
with:
|
with:
|
||||||
prerelease: true
|
prerelease: true
|
||||||
source: nightly
|
source: ${{ (github.repository != 'yt-dlp/yt-dlp' && vars.NIGHTLY_ARCHIVE_REPO) || 'nightly' }}
|
||||||
|
target: 'nightly'
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
packages: write # For package cache
|
|
||||||
actions: write # For cleaning up cache
|
|
||||||
id-token: write # mandatory for trusted publishing
|
id-token: write # mandatory for trusted publishing
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
publish_pypi:
|
publish_pypi:
|
||||||
needs: [release]
|
needs: [release]
|
||||||
if: vars.NIGHTLY_PYPI_PROJECT != ''
|
if: vars.NIGHTLY_PYPI_PROJECT
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
id-token: write # mandatory for trusted publishing
|
id-token: write # mandatory for trusted publishing
|
||||||
|
|||||||
279
.github/workflows/release.yml
vendored
279
.github/workflows/release.yml
vendored
@@ -14,6 +14,10 @@ on:
|
|||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
type: string
|
type: string
|
||||||
|
linux_armv7l:
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
prerelease:
|
prerelease:
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: true
|
||||||
@@ -43,6 +47,10 @@ on:
|
|||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
type: string
|
type: string
|
||||||
|
linux_armv7l:
|
||||||
|
description: Include linux_armv7l
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
prerelease:
|
prerelease:
|
||||||
description: Pre-release
|
description: Pre-release
|
||||||
default: false
|
default: false
|
||||||
@@ -71,141 +79,63 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10" # Keep this in sync with test-workflows.yml
|
||||||
|
|
||||||
- name: Process inputs
|
- name: Process inputs
|
||||||
id: process_inputs
|
id: process_inputs
|
||||||
|
env:
|
||||||
|
INPUTS: ${{ toJSON(inputs) }}
|
||||||
run: |
|
run: |
|
||||||
cat << EOF
|
python -m devscripts.setup_variables process_inputs
|
||||||
::group::Inputs
|
|
||||||
prerelease=${{ inputs.prerelease }}
|
|
||||||
source=${{ inputs.source }}
|
|
||||||
target=${{ inputs.target }}
|
|
||||||
version=${{ inputs.version }}
|
|
||||||
::endgroup::
|
|
||||||
EOF
|
|
||||||
IFS='@' read -r source_repo source_tag <<<"${{ inputs.source }}"
|
|
||||||
IFS='@' read -r target_repo target_tag <<<"${{ inputs.target }}"
|
|
||||||
cat << EOF >> "$GITHUB_OUTPUT"
|
|
||||||
source_repo=${source_repo}
|
|
||||||
source_tag=${source_tag}
|
|
||||||
target_repo=${target_repo}
|
|
||||||
target_tag=${target_tag}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
- name: Setup variables
|
- name: Setup variables
|
||||||
id: setup_variables
|
id: setup_variables
|
||||||
env:
|
env:
|
||||||
source_repo: ${{ steps.process_inputs.outputs.source_repo }}
|
INPUTS: ${{ toJSON(inputs) }}
|
||||||
source_tag: ${{ steps.process_inputs.outputs.source_tag }}
|
PROCESSED: ${{ toJSON(steps.process_inputs.outputs) }}
|
||||||
target_repo: ${{ steps.process_inputs.outputs.target_repo }}
|
REPOSITORY: ${{ github.repository }}
|
||||||
target_tag: ${{ steps.process_inputs.outputs.target_tag }}
|
PUSH_VERSION_COMMIT: ${{ vars.PUSH_VERSION_COMMIT }}
|
||||||
|
PYPI_PROJECT: ${{ vars.PYPI_PROJECT }}
|
||||||
|
SOURCE_PYPI_PROJECT: ${{ vars[format('{0}_pypi_project', steps.process_inputs.outputs.source_repo)] }}
|
||||||
|
SOURCE_PYPI_SUFFIX: ${{ vars[format('{0}_pypi_suffix', steps.process_inputs.outputs.source_repo)] }}
|
||||||
|
TARGET_PYPI_PROJECT: ${{ vars[format('{0}_pypi_project', steps.process_inputs.outputs.target_repo)] }}
|
||||||
|
TARGET_PYPI_SUFFIX: ${{ vars[format('{0}_pypi_suffix', steps.process_inputs.outputs.target_repo)] }}
|
||||||
|
SOURCE_ARCHIVE_REPO: ${{ vars[format('{0}_archive_repo', steps.process_inputs.outputs.source_repo)] }}
|
||||||
|
TARGET_ARCHIVE_REPO: ${{ vars[format('{0}_archive_repo', steps.process_inputs.outputs.target_repo)] }}
|
||||||
|
HAS_SOURCE_ARCHIVE_REPO_TOKEN: ${{ !!secrets[format('{0}_archive_repo_token', steps.process_inputs.outputs.source_repo)] }}
|
||||||
|
HAS_TARGET_ARCHIVE_REPO_TOKEN: ${{ !!secrets[format('{0}_archive_repo_token', steps.process_inputs.outputs.target_repo)] }}
|
||||||
|
HAS_ARCHIVE_REPO_TOKEN: ${{ !!secrets.ARCHIVE_REPO_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
# unholy bash monstrosity (sincere apologies)
|
python -m devscripts.setup_variables
|
||||||
fallback_token () {
|
|
||||||
if ${{ !secrets.ARCHIVE_REPO_TOKEN }}; then
|
|
||||||
echo "::error::Repository access secret ${target_repo_token^^} not found"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
target_repo_token=ARCHIVE_REPO_TOKEN
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
source_is_channel=0
|
- name: Update version & documentation
|
||||||
[[ "${source_repo}" == 'stable' ]] && source_repo='yt-dlp/yt-dlp'
|
|
||||||
if [[ -z "${source_repo}" ]]; then
|
|
||||||
source_repo='${{ github.repository }}'
|
|
||||||
elif [[ '${{ vars[format('{0}_archive_repo', env.source_repo)] }}' ]]; then
|
|
||||||
source_is_channel=1
|
|
||||||
source_channel='${{ vars[format('{0}_archive_repo', env.source_repo)] }}'
|
|
||||||
elif [[ -z "${source_tag}" && "${source_repo}" != */* ]]; then
|
|
||||||
source_tag="${source_repo}"
|
|
||||||
source_repo='${{ github.repository }}'
|
|
||||||
fi
|
|
||||||
resolved_source="${source_repo}"
|
|
||||||
if [[ "${source_tag}" ]]; then
|
|
||||||
resolved_source="${resolved_source}@${source_tag}"
|
|
||||||
elif [[ "${source_repo}" == 'yt-dlp/yt-dlp' ]]; then
|
|
||||||
resolved_source='stable'
|
|
||||||
fi
|
|
||||||
|
|
||||||
revision="${{ (inputs.prerelease || !vars.PUSH_VERSION_COMMIT) && '$(date -u +"%H%M%S")' || '' }}"
|
|
||||||
version="$(
|
|
||||||
python devscripts/update-version.py \
|
|
||||||
-c "${resolved_source}" -r "${{ github.repository }}" ${{ inputs.version || '$revision' }} | \
|
|
||||||
grep -Po "version=\K\d+\.\d+\.\d+(\.\d+)?")"
|
|
||||||
|
|
||||||
if [[ "${target_repo}" ]]; then
|
|
||||||
if [[ -z "${target_tag}" ]]; then
|
|
||||||
if [[ '${{ vars[format('{0}_archive_repo', env.target_repo)] }}' ]]; then
|
|
||||||
target_tag="${source_tag:-${version}}"
|
|
||||||
else
|
|
||||||
target_tag="${target_repo}"
|
|
||||||
target_repo='${{ github.repository }}'
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
if [[ "${target_repo}" != '${{ github.repository}}' ]]; then
|
|
||||||
target_repo='${{ vars[format('{0}_archive_repo', env.target_repo)] }}'
|
|
||||||
target_repo_token='${{ env.target_repo }}_archive_repo_token'
|
|
||||||
${{ !!secrets[format('{0}_archive_repo_token', env.target_repo)] }} || fallback_token
|
|
||||||
pypi_project='${{ vars[format('{0}_pypi_project', env.target_repo)] }}'
|
|
||||||
pypi_suffix='${{ vars[format('{0}_pypi_suffix', env.target_repo)] }}'
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
target_tag="${source_tag:-${version}}"
|
|
||||||
if ((source_is_channel)); then
|
|
||||||
target_repo="${source_channel}"
|
|
||||||
target_repo_token='${{ env.source_repo }}_archive_repo_token'
|
|
||||||
${{ !!secrets[format('{0}_archive_repo_token', env.source_repo)] }} || fallback_token
|
|
||||||
pypi_project='${{ vars[format('{0}_pypi_project', env.source_repo)] }}'
|
|
||||||
pypi_suffix='${{ vars[format('{0}_pypi_suffix', env.source_repo)] }}'
|
|
||||||
else
|
|
||||||
target_repo='${{ github.repository }}'
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "${target_repo}" == '${{ github.repository }}' ]] && ${{ !inputs.prerelease }}; then
|
|
||||||
pypi_project='${{ vars.PYPI_PROJECT }}'
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "::group::Output variables"
|
|
||||||
cat << EOF | tee -a "$GITHUB_OUTPUT"
|
|
||||||
channel=${resolved_source}
|
|
||||||
version=${version}
|
|
||||||
target_repo=${target_repo}
|
|
||||||
target_repo_token=${target_repo_token}
|
|
||||||
target_tag=${target_tag}
|
|
||||||
pypi_project=${pypi_project}
|
|
||||||
pypi_suffix=${pypi_suffix}
|
|
||||||
EOF
|
|
||||||
echo "::endgroup::"
|
|
||||||
|
|
||||||
- name: Update documentation
|
|
||||||
env:
|
env:
|
||||||
version: ${{ steps.setup_variables.outputs.version }}
|
CHANNEL: ${{ steps.setup_variables.outputs.channel }}
|
||||||
target_repo: ${{ steps.setup_variables.outputs.target_repo }}
|
# Use base repo since this could be committed; build jobs will call this again with true origin
|
||||||
if: |
|
REPOSITORY: ${{ github.repository }}
|
||||||
!inputs.prerelease && env.target_repo == github.repository
|
VERSION: ${{ steps.setup_variables.outputs.version }}
|
||||||
run: |
|
run: |
|
||||||
|
python devscripts/update-version.py -c "${CHANNEL}" -r "${REPOSITORY}" "${VERSION}"
|
||||||
python devscripts/update_changelog.py -vv
|
python devscripts/update_changelog.py -vv
|
||||||
make doc
|
make doc
|
||||||
|
|
||||||
- name: Push to release
|
- name: Push to release
|
||||||
id: push_release
|
id: push_release
|
||||||
env:
|
env:
|
||||||
version: ${{ steps.setup_variables.outputs.version }}
|
VERSION: ${{ steps.setup_variables.outputs.version }}
|
||||||
target_repo: ${{ steps.setup_variables.outputs.target_repo }}
|
GITHUB_EVENT_SENDER_LOGIN: ${{ github.event.sender.login }}
|
||||||
|
GITHUB_EVENT_REF: ${{ github.event.ref }}
|
||||||
if: |
|
if: |
|
||||||
!inputs.prerelease && env.target_repo == github.repository
|
!inputs.prerelease && steps.setup_variables.outputs.target_repo == github.repository
|
||||||
run: |
|
run: |
|
||||||
git config --global user.name "github-actions[bot]"
|
git config --global user.name "github-actions[bot]"
|
||||||
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||||
git add -u
|
git add -u
|
||||||
git commit -m "Release ${{ env.version }}" \
|
git commit -m "Release ${VERSION}" \
|
||||||
-m "Created by: ${{ github.event.sender.login }}" -m ":ci skip all"
|
-m "Created by: ${GITHUB_EVENT_SENDER_LOGIN}" -m ":ci skip all"
|
||||||
git push origin --force ${{ github.event.ref }}:release
|
git push origin --force "${GITHUB_EVENT_REF}:release"
|
||||||
|
|
||||||
- name: Get target commitish
|
- name: Get target commitish
|
||||||
id: get_target
|
id: get_target
|
||||||
@@ -214,10 +144,10 @@ jobs:
|
|||||||
|
|
||||||
- name: Update master
|
- name: Update master
|
||||||
env:
|
env:
|
||||||
target_repo: ${{ steps.setup_variables.outputs.target_repo }}
|
GITHUB_EVENT_REF: ${{ github.event.ref }}
|
||||||
if: |
|
if: |
|
||||||
vars.PUSH_VERSION_COMMIT != '' && !inputs.prerelease && env.target_repo == github.repository
|
vars.PUSH_VERSION_COMMIT && !inputs.prerelease && steps.setup_variables.outputs.target_repo == github.repository
|
||||||
run: git push origin ${{ github.event.ref }}
|
run: git push origin "${GITHUB_EVENT_REF}"
|
||||||
|
|
||||||
build:
|
build:
|
||||||
needs: prepare
|
needs: prepare
|
||||||
@@ -226,10 +156,9 @@ jobs:
|
|||||||
version: ${{ needs.prepare.outputs.version }}
|
version: ${{ needs.prepare.outputs.version }}
|
||||||
channel: ${{ needs.prepare.outputs.channel }}
|
channel: ${{ needs.prepare.outputs.channel }}
|
||||||
origin: ${{ needs.prepare.outputs.target_repo }}
|
origin: ${{ needs.prepare.outputs.target_repo }}
|
||||||
|
linux_armv7l: ${{ inputs.linux_armv7l }}
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write # For package cache
|
|
||||||
actions: write # For cleaning up cache
|
|
||||||
secrets:
|
secrets:
|
||||||
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
||||||
|
|
||||||
@@ -244,7 +173,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
|
|
||||||
@@ -255,16 +184,16 @@ jobs:
|
|||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
env:
|
env:
|
||||||
version: ${{ needs.prepare.outputs.version }}
|
VERSION: ${{ needs.prepare.outputs.version }}
|
||||||
suffix: ${{ needs.prepare.outputs.pypi_suffix }}
|
SUFFIX: ${{ needs.prepare.outputs.pypi_suffix }}
|
||||||
channel: ${{ needs.prepare.outputs.channel }}
|
CHANNEL: ${{ needs.prepare.outputs.channel }}
|
||||||
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
TARGET_REPO: ${{ needs.prepare.outputs.target_repo }}
|
||||||
pypi_project: ${{ needs.prepare.outputs.pypi_project }}
|
PYPI_PROJECT: ${{ needs.prepare.outputs.pypi_project }}
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c "${{ env.channel }}" -r "${{ env.target_repo }}" -s "${{ env.suffix }}" "${{ env.version }}"
|
python devscripts/update-version.py -c "${CHANNEL}" -r "${TARGET_REPO}" -s "${SUFFIX}" "${VERSION}"
|
||||||
python devscripts/update_changelog.py -vv
|
python devscripts/update_changelog.py -vv
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
sed -i -E '0,/(name = ")[^"]+(")/s//\1${{ env.pypi_project }}\2/' pyproject.toml
|
sed -i -E '0,/(name = ")[^"]+(")/s//\1'"${PYPI_PROJECT}"'\2/' pyproject.toml
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@@ -298,7 +227,11 @@ jobs:
|
|||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
TARGET_REPO: ${{ needs.prepare.outputs.target_repo }}
|
||||||
|
TARGET_TAG: ${{ needs.prepare.outputs.target_tag }}
|
||||||
|
VERSION: ${{ needs.prepare.outputs.version }}
|
||||||
|
HEAD_SHA: ${{ needs.prepare.outputs.head_sha }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -308,87 +241,85 @@ jobs:
|
|||||||
path: artifact
|
path: artifact
|
||||||
pattern: build-*
|
pattern: build-*
|
||||||
merge-multiple: true
|
merge-multiple: true
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
|
|
||||||
- name: Generate release notes
|
- name: Generate release notes
|
||||||
env:
|
env:
|
||||||
head_sha: ${{ needs.prepare.outputs.head_sha }}
|
REPOSITORY: ${{ github.repository }}
|
||||||
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
BASE_REPO: yt-dlp/yt-dlp
|
||||||
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
NIGHTLY_REPO: yt-dlp/yt-dlp-nightly-builds
|
||||||
|
MASTER_REPO: yt-dlp/yt-dlp-master-builds
|
||||||
|
DOCS_PATH: ${{ env.TARGET_REPO == github.repository && format('/tree/{0}', env.TARGET_TAG) || '' }}
|
||||||
run: |
|
run: |
|
||||||
printf '%s' \
|
printf '%s' \
|
||||||
'[]' \
|
"[]" \
|
||||||
'(https://github.com/${{ github.repository }}#installation "Installation instructions") ' \
|
"(https://github.com/${REPOSITORY}#installation \"Installation instructions\") " \
|
||||||
'[]' \
|
"[]" \
|
||||||
'(https://discord.gg/H5MNcFW63r "Discord") ' \
|
"(https://discord.gg/H5MNcFW63r \"Discord\") " \
|
||||||
'[]' \
|
"[]" \
|
||||||
'(https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators "Donate") ' \
|
"(https://github.com/${BASE_REPO}/blob/master/Collaborators.md#collaborators \"Donate\") " \
|
||||||
'[]' \
|
"[]" \
|
||||||
'(https://github.com/${{ github.repository }}' \
|
"(https://github.com/${REPOSITORY}${DOCS_PATH}#readme \"Documentation\") " > ./RELEASE_NOTES
|
||||||
'${{ env.target_repo == github.repository && format('/tree/{0}', env.target_tag) || '' }}#readme "Documentation") ' \
|
if [[ "${TARGET_REPO}" == "${BASE_REPO}" ]]; then
|
||||||
${{ env.target_repo == 'yt-dlp/yt-dlp' && '\
|
printf '%s' \
|
||||||
"[]" \
|
"[]" \
|
||||||
"(https://github.com/yt-dlp/yt-dlp-nightly-builds/releases/latest \"Nightly builds\") " \
|
"(https://github.com/${NIGHTLY_REPO}/releases/latest \"Nightly builds\") " \
|
||||||
"[]" \
|
"[]" \
|
||||||
"(https://github.com/yt-dlp/yt-dlp-master-builds/releases/latest \"Master builds\")"' || '' }} > ./RELEASE_NOTES
|
"(https://github.com/${MASTER_REPO}/releases/latest \"Master builds\")" >> ./RELEASE_NOTES
|
||||||
printf '\n\n' >> ./RELEASE_NOTES
|
fi
|
||||||
cat >> ./RELEASE_NOTES << EOF
|
printf '\n\n%s\n\n%s%s\n\n---\n' \
|
||||||
#### A description of the various files is in the [README](https://github.com/${{ github.repository }}#release-files)
|
"#### A description of the various files is in the [README](https://github.com/${REPOSITORY}#release-files)" \
|
||||||
---
|
"The PyInstaller-bundled executables are subject to the licenses described in " \
|
||||||
$(python ./devscripts/make_changelog.py -vv --collapsible)
|
"[THIRD_PARTY_LICENSES.txt](https://github.com/${BASE_REPO}/blob/${HEAD_SHA}/THIRD_PARTY_LICENSES.txt)" >> ./RELEASE_NOTES
|
||||||
EOF
|
python ./devscripts/make_changelog.py -vv --collapsible >> ./RELEASE_NOTES
|
||||||
printf '%s\n\n' '**This is a pre-release build**' >> ./PRERELEASE_NOTES
|
printf '%s\n\n' '**This is a pre-release build**' >> ./PRERELEASE_NOTES
|
||||||
cat ./RELEASE_NOTES >> ./PRERELEASE_NOTES
|
cat ./RELEASE_NOTES >> ./PRERELEASE_NOTES
|
||||||
printf '%s\n\n' 'Generated from: https://github.com/${{ github.repository }}/commit/${{ env.head_sha }}' >> ./ARCHIVE_NOTES
|
printf '%s\n\n' "Generated from: https://github.com/${REPOSITORY}/commit/${HEAD_SHA}" >> ./ARCHIVE_NOTES
|
||||||
cat ./RELEASE_NOTES >> ./ARCHIVE_NOTES
|
cat ./RELEASE_NOTES >> ./ARCHIVE_NOTES
|
||||||
|
|
||||||
- name: Publish to archive repo
|
- name: Publish to archive repo
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ secrets[needs.prepare.outputs.target_repo_token] }}
|
GH_TOKEN: ${{ secrets[needs.prepare.outputs.target_repo_token] }}
|
||||||
GH_REPO: ${{ needs.prepare.outputs.target_repo }}
|
GH_REPO: ${{ needs.prepare.outputs.target_repo }}
|
||||||
version: ${{ needs.prepare.outputs.version }}
|
TITLE_PREFIX: ${{ startswith(env.TARGET_REPO, 'yt-dlp/') && 'yt-dlp ' || '' }}
|
||||||
channel: ${{ needs.prepare.outputs.channel }}
|
TITLE: ${{ inputs.target != env.TARGET_REPO && inputs.target || needs.prepare.outputs.channel }}
|
||||||
if: |
|
if: |
|
||||||
inputs.prerelease && env.GH_TOKEN != '' && env.GH_REPO != '' && env.GH_REPO != github.repository
|
inputs.prerelease && env.GH_TOKEN && env.GH_REPO && env.GH_REPO != github.repository
|
||||||
run: |
|
run: |
|
||||||
title="${{ startswith(env.GH_REPO, 'yt-dlp/') && 'yt-dlp ' || '' }}${{ env.channel }}"
|
|
||||||
gh release create \
|
gh release create \
|
||||||
--notes-file ARCHIVE_NOTES \
|
--notes-file ARCHIVE_NOTES \
|
||||||
--title "${title} ${{ env.version }}" \
|
--title "${TITLE_PREFIX}${TITLE} ${VERSION}" \
|
||||||
${{ env.version }} \
|
"${VERSION}" \
|
||||||
artifact/*
|
artifact/*
|
||||||
|
|
||||||
- name: Prune old release
|
- name: Prune old release
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ github.token }}
|
||||||
version: ${{ needs.prepare.outputs.version }}
|
|
||||||
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
|
||||||
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
|
||||||
if: |
|
if: |
|
||||||
env.target_repo == github.repository && env.target_tag != env.version
|
env.TARGET_REPO == github.repository && env.TARGET_TAG != env.VERSION
|
||||||
run: |
|
run: |
|
||||||
gh release delete --yes --cleanup-tag "${{ env.target_tag }}" || true
|
gh release delete --yes --cleanup-tag "${TARGET_TAG}" || true
|
||||||
git tag --delete "${{ env.target_tag }}" || true
|
git tag --delete "${TARGET_TAG}" || true
|
||||||
sleep 5 # Enough time to cover deletion race condition
|
sleep 5 # Enough time to cover deletion race condition
|
||||||
|
|
||||||
- name: Publish release
|
- name: Publish release
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ github.token }}
|
||||||
version: ${{ needs.prepare.outputs.version }}
|
NOTES_FILE: ${{ inputs.prerelease && 'PRERELEASE_NOTES' || 'RELEASE_NOTES' }}
|
||||||
target_repo: ${{ needs.prepare.outputs.target_repo }}
|
TITLE_PREFIX: ${{ github.repository == 'yt-dlp/yt-dlp' && 'yt-dlp ' || '' }}
|
||||||
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
TITLE: ${{ env.TARGET_TAG != env.VERSION && format('{0} ', env.TARGET_TAG) || '' }}
|
||||||
head_sha: ${{ needs.prepare.outputs.head_sha }}
|
PRERELEASE: ${{ inputs.prerelease && '1' || '0' }}
|
||||||
if: |
|
if: |
|
||||||
env.target_repo == github.repository
|
env.TARGET_REPO == github.repository
|
||||||
run: |
|
run: |
|
||||||
title="${{ github.repository == 'yt-dlp/yt-dlp' && 'yt-dlp ' || '' }}"
|
gh_options=(
|
||||||
title+="${{ env.target_tag != env.version && format('{0} ', env.target_tag) || '' }}"
|
--notes-file "${NOTES_FILE}"
|
||||||
gh release create \
|
--target "${HEAD_SHA}"
|
||||||
--notes-file ${{ inputs.prerelease && 'PRERELEASE_NOTES' || 'RELEASE_NOTES' }} \
|
--title "${TITLE_PREFIX}${TITLE}${VERSION}"
|
||||||
--target ${{ env.head_sha }} \
|
)
|
||||||
--title "${title}${{ env.version }}" \
|
if ((PRERELEASE)); then
|
||||||
${{ inputs.prerelease && '--prerelease' || '' }} \
|
gh_options+=(--prerelease)
|
||||||
${{ env.target_tag }} \
|
fi
|
||||||
artifact/*
|
gh release create "${gh_options[@]}" "${TARGET_TAG}" artifact/*
|
||||||
|
|||||||
4
.github/workflows/signature-tests.yml
vendored
4
.github/workflows/signature-tests.yml
vendored
@@ -25,11 +25,11 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest, windows-latest]
|
os: [ubuntu-latest, windows-latest]
|
||||||
python-version: ['3.9', '3.10', '3.11', '3.12', '3.13', pypy-3.11]
|
python-version: ['3.9', '3.10', '3.11', '3.12', '3.13', '3.14-dev', pypy-3.11]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
|
|||||||
52
.github/workflows/test-workflows.yml
vendored
Normal file
52
.github/workflows/test-workflows.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
name: Test and lint workflows
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- .github/workflows/*
|
||||||
|
- bundle/docker/linux/*.sh
|
||||||
|
- devscripts/setup_variables.py
|
||||||
|
- devscripts/setup_variables_tests.py
|
||||||
|
- devscripts/utils.py
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- .github/workflows/*
|
||||||
|
- bundle/docker/linux/*.sh
|
||||||
|
- devscripts/setup_variables.py
|
||||||
|
- devscripts/setup_variables_tests.py
|
||||||
|
- devscripts/utils.py
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
env:
|
||||||
|
ACTIONLINT_VERSION: "1.7.7"
|
||||||
|
ACTIONLINT_SHA256SUM: 023070a287cd8cccd71515fedc843f1985bf96c436b7effaecce67290e7e0757
|
||||||
|
ACTIONLINT_REPO: https://github.com/rhysd/actionlint
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
name: Check workflows
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-python@v6
|
||||||
|
with:
|
||||||
|
python-version: "3.10" # Keep this in sync with release.yml's prepare job
|
||||||
|
- name: Install requirements
|
||||||
|
env:
|
||||||
|
ACTIONLINT_TARBALL: ${{ format('actionlint_{0}_linux_amd64.tar.gz', env.ACTIONLINT_VERSION) }}
|
||||||
|
run: |
|
||||||
|
python -m devscripts.install_deps -o --include test
|
||||||
|
sudo apt -y install shellcheck
|
||||||
|
python -m pip install -U pyflakes
|
||||||
|
curl -LO "${ACTIONLINT_REPO}/releases/download/v${ACTIONLINT_VERSION}/${ACTIONLINT_TARBALL}"
|
||||||
|
printf '%s %s' "${ACTIONLINT_SHA256SUM}" "${ACTIONLINT_TARBALL}" | sha256sum -c -
|
||||||
|
tar xvzf "${ACTIONLINT_TARBALL}" actionlint
|
||||||
|
chmod +x actionlint
|
||||||
|
- name: Run actionlint
|
||||||
|
run: |
|
||||||
|
./actionlint -color
|
||||||
|
- name: Check Docker shell scripts
|
||||||
|
run: |
|
||||||
|
shellcheck bundle/docker/linux/*.sh
|
||||||
|
- name: Test GHA devscripts
|
||||||
|
run: |
|
||||||
|
pytest -Werror --tb=short --color=yes devscripts/setup_variables_tests.py
|
||||||
@@ -12,6 +12,7 @@
|
|||||||
- [Is your question about yt-dlp?](#is-your-question-about-yt-dlp)
|
- [Is your question about yt-dlp?](#is-your-question-about-yt-dlp)
|
||||||
- [Are you willing to share account details if needed?](#are-you-willing-to-share-account-details-if-needed)
|
- [Are you willing to share account details if needed?](#are-you-willing-to-share-account-details-if-needed)
|
||||||
- [Is the website primarily used for piracy](#is-the-website-primarily-used-for-piracy)
|
- [Is the website primarily used for piracy](#is-the-website-primarily-used-for-piracy)
|
||||||
|
- [AUTOMATED CONTRIBUTIONS (AI / LLM) POLICY](#automated-contributions-ai--llm-policy)
|
||||||
- [DEVELOPER INSTRUCTIONS](#developer-instructions)
|
- [DEVELOPER INSTRUCTIONS](#developer-instructions)
|
||||||
- [Adding new feature or making overarching changes](#adding-new-feature-or-making-overarching-changes)
|
- [Adding new feature or making overarching changes](#adding-new-feature-or-making-overarching-changes)
|
||||||
- [Adding support for a new site](#adding-support-for-a-new-site)
|
- [Adding support for a new site](#adding-support-for-a-new-site)
|
||||||
@@ -134,6 +135,17 @@ While these steps won't necessarily ensure that no misuse of the account takes p
|
|||||||
We follow [youtube-dl's policy](https://github.com/ytdl-org/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free) to not support services that is primarily used for infringing copyright. Additionally, it has been decided to not to support porn sites that specialize in fakes. We also cannot support any service that serves only [DRM protected content](https://en.wikipedia.org/wiki/Digital_rights_management).
|
We follow [youtube-dl's policy](https://github.com/ytdl-org/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free) to not support services that is primarily used for infringing copyright. Additionally, it has been decided to not to support porn sites that specialize in fakes. We also cannot support any service that serves only [DRM protected content](https://en.wikipedia.org/wiki/Digital_rights_management).
|
||||||
|
|
||||||
|
|
||||||
|
# AUTOMATED CONTRIBUTIONS (AI / LLM) POLICY
|
||||||
|
|
||||||
|
Please refrain from submitting issues or pull requests that have been generated by an LLM or other fully-automated tools. Any submission that is in violation of this policy will be closed, and the submitter may be blocked from this repository without warning.
|
||||||
|
|
||||||
|
If you submit an issue, you need to understand what your issue description is saying. You need to be able to answer questions about your bug report or feature request. Using an AI tool to *proofread* your issue/comment text is acceptable. Using an AI tool to *write* your issue/comment text is unacceptable.
|
||||||
|
|
||||||
|
If you submit a pull request, you need to understand what every line of code you've changed does. If you can't explain why your PR is doing something, then do not submit it. Using an AI tool to generate entire lines of code is unacceptable.
|
||||||
|
|
||||||
|
The rationale behind this policy is that automated contributions are a waste of the maintainers' time. Humans spend their time and brainpower reviewing every submission. Issues or pull requests generated by automation tools create an imbalance of effort between the submitter and the reviewer. Nobody learns anything when a maintainer reviews code written by an LLM.
|
||||||
|
|
||||||
|
Additionally, AI-generated code conflicts with this project's license (Unlicense), since you cannot truly release code into the public domain if you didn't author it yourself.
|
||||||
|
|
||||||
|
|
||||||
# DEVELOPER INSTRUCTIONS
|
# DEVELOPER INSTRUCTIONS
|
||||||
@@ -768,12 +780,10 @@ view_count = int_or_none(video.get('views'))
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
# My pull request is labeled pending-fixes
|
## My pull request is labeled pending-fixes
|
||||||
|
|
||||||
The `pending-fixes` label is added when there are changes requested to a PR. When the necessary changes are made, the label should be removed. However, despite our best efforts, it may sometimes happen that the maintainer did not see the changes or forgot to remove the label. If your PR is still marked as `pending-fixes` a few days after all requested changes have been made, feel free to ping the maintainer who labeled your issue and ask them to re-review and remove the label.
|
The `pending-fixes` label is added when there are changes requested to a PR. When the necessary changes are made, the label should be removed. However, despite our best efforts, it may sometimes happen that the maintainer did not see the changes or forgot to remove the label. If your PR is still marked as `pending-fixes` a few days after all requested changes have been made, feel free to ping the maintainer who labeled your issue and ask them to re-review and remove the label.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# EMBEDDING YT-DLP
|
# EMBEDDING YT-DLP
|
||||||
See [README.md#embedding-yt-dlp](README.md#embedding-yt-dlp) for instructions on how to embed yt-dlp in another Python program
|
See [README.md#embedding-yt-dlp](README.md#embedding-yt-dlp) for instructions on how to embed yt-dlp in another Python program
|
||||||
|
|||||||
11
CONTRIBUTORS
11
CONTRIBUTORS
@@ -800,3 +800,14 @@ iribeirocampos
|
|||||||
rolandcrosby
|
rolandcrosby
|
||||||
Sojiroh
|
Sojiroh
|
||||||
tchebb
|
tchebb
|
||||||
|
AzartX47
|
||||||
|
e2dk4r
|
||||||
|
junyilou
|
||||||
|
PierreMesure
|
||||||
|
Randalix
|
||||||
|
runarmod
|
||||||
|
gitchasing
|
||||||
|
zakaryan2004
|
||||||
|
cdce8p
|
||||||
|
nicolaasjan
|
||||||
|
willsmillie
|
||||||
|
|||||||
167
Changelog.md
167
Changelog.md
@@ -4,13 +4,178 @@
|
|||||||
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
### 2025.09.26
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- **twitch**: vod: [Fix `live_status` detection](https://github.com/yt-dlp/yt-dlp/commit/50e452fd7dfb8a648bd3b9aaabc8f94f37ce2051) ([#14457](https://github.com/yt-dlp/yt-dlp/issues/14457)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **youtube**
|
||||||
|
- [Fix player JS overrides](https://github.com/yt-dlp/yt-dlp/commit/b7b7910d96359a539b7997890342ab4a59dd685d) ([#14430](https://github.com/yt-dlp/yt-dlp/issues/14430)) by [bashonly](https://github.com/bashonly), [seproDev](https://github.com/seproDev)
|
||||||
|
- [Improve PO token logging](https://github.com/yt-dlp/yt-dlp/commit/7df5acc546dccd32213c3a125d721e32b06d71b0) ([#14447](https://github.com/yt-dlp/yt-dlp/issues/14447)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Player client maintenance](https://github.com/yt-dlp/yt-dlp/commit/94c5622be96474ca3c637e52898c4daee4d8fb69) ([#14448](https://github.com/yt-dlp/yt-dlp/issues/14448)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Replace `tv_simply` with `web_safari` in default clients](https://github.com/yt-dlp/yt-dlp/commit/12b57d2858845c0c7fb33bf9aa8ed7be6905535d) ([#14465](https://github.com/yt-dlp/yt-dlp/issues/14465)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
### 2025.09.23
|
||||||
|
|
||||||
|
#### Important changes
|
||||||
|
- **Several options have been deprecated**
|
||||||
|
In order to simplify the codebase and reduce maintenance burden, various options have been deprecated. Please remove them from your commands/configurations. [Read more](https://github.com/yt-dlp/yt-dlp/issues/14198)
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- **compat**: [Add `compat_datetime_from_timestamp`](https://github.com/yt-dlp/yt-dlp/commit/6a763a55d8a93b2a964ecf7699248ad342485412) ([#11902](https://github.com/yt-dlp/yt-dlp/issues/11902)) by [pzhlkj6612](https://github.com/pzhlkj6612), [seproDev](https://github.com/seproDev)
|
||||||
|
- **utils**
|
||||||
|
- `mimetype2ext`: [Recognize `vnd.dlna.mpeg-tts`](https://github.com/yt-dlp/yt-dlp/commit/98b6b0d339130e955f9d45ce67c0357c633c1627) ([#14388](https://github.com/yt-dlp/yt-dlp/issues/14388)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- `random_user_agent`: [Bump versions](https://github.com/yt-dlp/yt-dlp/commit/f3829463c728a5b5e62b3fc157e71c99b26edac7) ([#14317](https://github.com/yt-dlp/yt-dlp/issues/14317)) by [seproDev](https://github.com/seproDev)
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- **10play**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/067062bb87ac057e453ce9efdac7ca117a6a7da0) ([#14242](https://github.com/yt-dlp/yt-dlp/issues/14242)) by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||||
|
- **applepodcast**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/b2c01d0498653e0239c7226c5a7fcb614dd4dbc8) ([#14372](https://github.com/yt-dlp/yt-dlp/issues/14372)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **loco**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/f5cb721185e8725cf4eb4080e86aa9aa73ef25b3) ([#14256](https://github.com/yt-dlp/yt-dlp/issues/14256)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **mitele**: [Remove extractor](https://github.com/yt-dlp/yt-dlp/commit/820c6e244571557fcfc127d4b3680e2d07c04dca) ([#14348](https://github.com/yt-dlp/yt-dlp/issues/14348)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **newspicks**: [Warn when only preview is available](https://github.com/yt-dlp/yt-dlp/commit/9def9a4b0e958285e055eb350e5dd43b5c423336) ([#14197](https://github.com/yt-dlp/yt-dlp/issues/14197)) by [doe1080](https://github.com/doe1080)
|
||||||
|
- **onsen**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/17bfaa53edf5c52fce73cf0cef4592f929c2462d) ([#10971](https://github.com/yt-dlp/yt-dlp/issues/10971)) by [doe1080](https://github.com/doe1080)
|
||||||
|
- **pixivsketch**: [Remove extractors](https://github.com/yt-dlp/yt-dlp/commit/3d9a88bd8ef149d781c7e569e48e61551eda395e) ([#14196](https://github.com/yt-dlp/yt-dlp/issues/14196)) by [doe1080](https://github.com/doe1080)
|
||||||
|
- **smotrim**: [Rework extractors](https://github.com/yt-dlp/yt-dlp/commit/8cb037c0b06c2815080f87d61ea2e95c412785fc) ([#14200](https://github.com/yt-dlp/yt-dlp/issues/14200)) by [doe1080](https://github.com/doe1080), [swayll](https://github.com/swayll)
|
||||||
|
- **telecinco**: [Support browser impersonation](https://github.com/yt-dlp/yt-dlp/commit/e123a48f1155703d8709a4221a42bd45c0a2b3ce) ([#14351](https://github.com/yt-dlp/yt-dlp/issues/14351)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **tiktok**: live: [Fix room ID extraction](https://github.com/yt-dlp/yt-dlp/commit/5c1abcdc49b9d23e1dcb77b95d063cf2bf93e352) ([#14287](https://github.com/yt-dlp/yt-dlp/issues/14287)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **ttinglive**: [Adapt FlexTV extractor to new domain](https://github.com/yt-dlp/yt-dlp/commit/4bc19adc8798e7564513898cf34adc432c6c5709) ([#14375](https://github.com/yt-dlp/yt-dlp/issues/14375)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **tunein**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/7d9e48b22a780c2e8d2d2d68940d49fd2029ab70) ([#13981](https://github.com/yt-dlp/yt-dlp/issues/13981)) by [doe1080](https://github.com/doe1080)
|
||||||
|
- **twitch**: clips: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/f8750504c2f71b54586fb857d60dce4e354a13ea) ([#14397](https://github.com/yt-dlp/yt-dlp/issues/14397)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **vimeo**: [Fix login error handling](https://github.com/yt-dlp/yt-dlp/commit/679587dac7cd011a1472255e1f06efb017ba91b6) ([#14280](https://github.com/yt-dlp/yt-dlp/issues/14280)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **vk**
|
||||||
|
- [Support vksport URLs](https://github.com/yt-dlp/yt-dlp/commit/b81e9272dce5844e8fba371cb4b4fd95ad3ed819) ([#14341](https://github.com/yt-dlp/yt-dlp/issues/14341)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- uservideos: [Support alternate URL format](https://github.com/yt-dlp/yt-dlp/commit/bf5d18016b03a3f2fd5d3494d9efe85d3f8beeac) ([#14376](https://github.com/yt-dlp/yt-dlp/issues/14376)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **xhamster**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/a1c98226a4e869a34cc764a9dcf7a4558516308e) ([#14286](https://github.com/yt-dlp/yt-dlp/issues/14286)) by [nicolaasjan](https://github.com/nicolaasjan), [willsmillie](https://github.com/willsmillie) (With fixes in [677997d](https://github.com/yt-dlp/yt-dlp/commit/677997d84eaec0037397f7d935386daa3025b004) by [arand](https://github.com/arand), [thegymguy](https://github.com/thegymguy))
|
||||||
|
- **youtube**: [Force player `0004de42`](https://github.com/yt-dlp/yt-dlp/commit/7f5d9f8543d19590eeec9473d54fa00151afa78a) ([#14398](https://github.com/yt-dlp/yt-dlp/issues/14398)) by [seproDev](https://github.com/seproDev)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **build**
|
||||||
|
- [Fix cache warmer](https://github.com/yt-dlp/yt-dlp/commit/8597a4331e8535a246d777bb8397bdcab251766c) ([#14261](https://github.com/yt-dlp/yt-dlp/issues/14261)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Post-release workflow cleanup](https://github.com/yt-dlp/yt-dlp/commit/cd94e7004036e0149d7d3fa236c7dd44cf460788) ([#14250](https://github.com/yt-dlp/yt-dlp/issues/14250)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Refactor Linux build jobs](https://github.com/yt-dlp/yt-dlp/commit/e2d37bcc8e84be9ce0f67fc24cb830c13963d10f) ([#14275](https://github.com/yt-dlp/yt-dlp/issues/14275)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Use PyInstaller 6.16 for Windows](https://github.com/yt-dlp/yt-dlp/commit/df4b4e8ccf3385be6d2ad65465a0704c223dfdfb) ([#14318](https://github.com/yt-dlp/yt-dlp/issues/14318)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Use SPDX license identifier](https://github.com/yt-dlp/yt-dlp/commit/48a214bef4bfd5984362d3d24b09dce50ba449ea) ([#14260](https://github.com/yt-dlp/yt-dlp/issues/14260)) by [cdce8p](https://github.com/cdce8p)
|
||||||
|
- [Use new PyInstaller builds for Windows](https://github.com/yt-dlp/yt-dlp/commit/c8ede5f34d6c95c442b936bb01ecbcb724aefdef) ([#14273](https://github.com/yt-dlp/yt-dlp/issues/14273)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **ci**
|
||||||
|
- [Bump actions/setup-python to v6](https://github.com/yt-dlp/yt-dlp/commit/22ea0688ed6bcdbe4c51401a84239cda3decfc9c) ([#14282](https://github.com/yt-dlp/yt-dlp/issues/14282)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Improve workflow checks](https://github.com/yt-dlp/yt-dlp/commit/ae3923b6b23bc62115be55510d6b5842f7a46b5f) ([#14316](https://github.com/yt-dlp/yt-dlp/issues/14316)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Test and lint workflows](https://github.com/yt-dlp/yt-dlp/commit/7c9b10ebc83907d37f9f65ea9d4bd6f5e3bd1371) ([#14249](https://github.com/yt-dlp/yt-dlp/issues/14249)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Test with Python 3.14](https://github.com/yt-dlp/yt-dlp/commit/83b8409366d0f9554eaeae56394b244dab64a2cb) ([#13468](https://github.com/yt-dlp/yt-dlp/issues/13468)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **cleanup**
|
||||||
|
- [Bump ruff to 0.13.x](https://github.com/yt-dlp/yt-dlp/commit/ba8044685537e8e14adc6826fb4d730856fd2e2b) ([#14293](https://github.com/yt-dlp/yt-dlp/issues/14293)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Deprecate various options](https://github.com/yt-dlp/yt-dlp/commit/08d78996831bd8e1e3c2592d740c3def00bbf548) ([#13821](https://github.com/yt-dlp/yt-dlp/issues/13821)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Remove broken extractors](https://github.com/yt-dlp/yt-dlp/commit/65e90aea29cf3bfc9d1ae3e009fbf9a8db3a23c9) ([#14305](https://github.com/yt-dlp/yt-dlp/issues/14305)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Remove setup.cfg](https://github.com/yt-dlp/yt-dlp/commit/eb4b3a5fc7765a6cd0370ca44ccee0d7d5111dd7) ([#14314](https://github.com/yt-dlp/yt-dlp/issues/14314)) by [seproDev](https://github.com/seproDev) (With fixes in [8ab262c](https://github.com/yt-dlp/yt-dlp/commit/8ab262c66bd3e1d8874fb2d070068ba1f0d48f16) by [bashonly](https://github.com/bashonly))
|
||||||
|
- Miscellaneous: [2e81e29](https://github.com/yt-dlp/yt-dlp/commit/2e81e298cdce23afadb06a95836284acb38f7018) by [bashonly](https://github.com/bashonly), [doe1080](https://github.com/doe1080), [seproDev](https://github.com/seproDev)
|
||||||
|
- **docs**
|
||||||
|
- [Clarify license of PyInstaller-bundled executables](https://github.com/yt-dlp/yt-dlp/commit/e6e6b512141e66b1b36058966804fe59c02a2b4d) ([#14257](https://github.com/yt-dlp/yt-dlp/issues/14257)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Establish AI/LLM contribution policy](https://github.com/yt-dlp/yt-dlp/commit/8821682f15af59047bc1f92724ef8a9ba30d6f7e) ([#14194](https://github.com/yt-dlp/yt-dlp/issues/14194)) by [bashonly](https://github.com/bashonly), [seproDev](https://github.com/seproDev)
|
||||||
|
- **test**: utils: [Fix `sanitize_path` test for Windows CPython 3.11](https://github.com/yt-dlp/yt-dlp/commit/a183837ec8bb5e28fe6eb3a9d77ea2d0d7a106bd) ([#13878](https://github.com/yt-dlp/yt-dlp/issues/13878)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
|
||||||
|
### 2025.09.05
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- [Fix `--id` deprecation warning](https://github.com/yt-dlp/yt-dlp/commit/76bb46002c9a9655f2b1d29d4840e75e79037cfa) ([#14190](https://github.com/yt-dlp/yt-dlp/issues/14190)) by [seproDev](https://github.com/seproDev)
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- **charlierose**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/603acdff07f0226088916886002d2ad8309ff9d3) ([#14231](https://github.com/yt-dlp/yt-dlp/issues/14231)) by [gitchasing](https://github.com/gitchasing)
|
||||||
|
- **googledrive**: [Fix subtitles extraction](https://github.com/yt-dlp/yt-dlp/commit/18fe696df9d60804a8f5cb8cd74f38111d6eb711) ([#14139](https://github.com/yt-dlp/yt-dlp/issues/14139)) by [zakaryan2004](https://github.com/zakaryan2004)
|
||||||
|
- **itvbtcc**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/0b51005b4819e7cea222fcbaf8e60391db4f732c) ([#14161](https://github.com/yt-dlp/yt-dlp/issues/14161)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **kick**: vod: [Support ongoing livestream VODs](https://github.com/yt-dlp/yt-dlp/commit/1e28f6bf743627b909135bb9a88537ad2deccaf0) ([#14154](https://github.com/yt-dlp/yt-dlp/issues/14154)) by [InvalidUsernameException](https://github.com/InvalidUsernameException)
|
||||||
|
- **lrt**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/ed24640943872c4cf30d7cc4601bec87b50ba03c) ([#14193](https://github.com/yt-dlp/yt-dlp/issues/14193)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **tver**: [Extract more metadata](https://github.com/yt-dlp/yt-dlp/commit/223baa81f6637dcdef108f817180d8d1ae9fa213) ([#14165](https://github.com/yt-dlp/yt-dlp/issues/14165)) by [arabcoders](https://github.com/arabcoders)
|
||||||
|
- **vevo**: [Restore extractors](https://github.com/yt-dlp/yt-dlp/commit/d925e92b710153d0d51d030f115b3c87226bc0f0) ([#14203](https://github.com/yt-dlp/yt-dlp/issues/14203)) by [seproDev](https://github.com/seproDev)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **build**: [Overhaul Linux builds and refactor release workflow](https://github.com/yt-dlp/yt-dlp/commit/50136eeeb3767289b236f140b759f23b39b00888) ([#13997](https://github.com/yt-dlp/yt-dlp/issues/13997)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
### 2025.08.27
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- **generic**
|
||||||
|
- [Simplify invalid URL error message](https://github.com/yt-dlp/yt-dlp/commit/1ddbd033f0fd65917526b1271cea66913ac8647f) ([#14167](https://github.com/yt-dlp/yt-dlp/issues/14167)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Use https as fallback protocol](https://github.com/yt-dlp/yt-dlp/commit/fec30c56f0e97e573ace659104ff0d72c4cc9809) ([#14160](https://github.com/yt-dlp/yt-dlp/issues/14160)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **skeb**: [Support wav files](https://github.com/yt-dlp/yt-dlp/commit/d6950c27af31908363c5c815e3b7eb4f9ff41643) ([#14147](https://github.com/yt-dlp/yt-dlp/issues/14147)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **youtube**
|
||||||
|
- [Add `tcc` player JS variant](https://github.com/yt-dlp/yt-dlp/commit/8f4a908300f55054bc96814bceeaa1034fdf4110) ([#14134](https://github.com/yt-dlp/yt-dlp/issues/14134)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Deprioritize `web_safari` m3u8 formats](https://github.com/yt-dlp/yt-dlp/commit/5c7ad68ff1643ad80d18cef8be9db8fcab05ee6c) ([#14168](https://github.com/yt-dlp/yt-dlp/issues/14168)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Player client maintenance](https://github.com/yt-dlp/yt-dlp/commit/3bd91544122142a87863d79e54e995c26cfd7f92) ([#14135](https://github.com/yt-dlp/yt-dlp/issues/14135)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Use alternative `tv` user-agent when authenticated](https://github.com/yt-dlp/yt-dlp/commit/8cd37b85d492edb56a4f7506ea05527b85a6b02b) ([#14169](https://github.com/yt-dlp/yt-dlp/issues/14169)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
### 2025.08.22
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- **cookies**: [Fix `--cookies-from-browser` with Firefox 142+](https://github.com/yt-dlp/yt-dlp/commit/f29acc4a6e73a9dc091686d40951288acae5a46d) ([#14114](https://github.com/yt-dlp/yt-dlp/issues/14114)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K) (With fixes in [526410b](https://github.com/yt-dlp/yt-dlp/commit/526410b4af9c1ca73aa3503cdaf4d32e42308fd6) by [bashonly](https://github.com/bashonly))
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- **mediaklikk**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/4dbe96459d7e632d397826d0bb323f3f0ac8b057) ([#13975](https://github.com/yt-dlp/yt-dlp/issues/13975)) by [zhallgato](https://github.com/zhallgato)
|
||||||
|
- **steam**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/fcea3edb5c5648638357f27431500c0aaf08b147) ([#14093](https://github.com/yt-dlp/yt-dlp/issues/14093)) by [doe1080](https://github.com/doe1080)
|
||||||
|
- **youtube**
|
||||||
|
- [Improve `tv` client context](https://github.com/yt-dlp/yt-dlp/commit/39b7b8ddc7a4d0669e0cf39105c3bb84cb2736cc) ([#14122](https://github.com/yt-dlp/yt-dlp/issues/14122)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Optimize playback wait times](https://github.com/yt-dlp/yt-dlp/commit/5c8bcfdbc638dfde13e93157637d8521413ed774) ([#14124](https://github.com/yt-dlp/yt-dlp/issues/14124)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Replace `ios` with `tv_simply` in default clients](https://github.com/yt-dlp/yt-dlp/commit/895e762a834bbd729ab822c7d17329fdf815aaf2) ([#14123](https://github.com/yt-dlp/yt-dlp/issues/14123)) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- [Update `tv` client config](https://github.com/yt-dlp/yt-dlp/commit/a03c37b44ec8f50fd472c409115096f92410346d) ([#14101](https://github.com/yt-dlp/yt-dlp/issues/14101)) by [seproDev](https://github.com/seproDev)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **build**: [Post-release workflow cleanup](https://github.com/yt-dlp/yt-dlp/commit/415b6d9ca868032a45b30b9139a50c5c06be2feb) ([#14090](https://github.com/yt-dlp/yt-dlp/issues/14090)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
### 2025.08.20
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- [Warn against using `-f mp4`](https://github.com/yt-dlp/yt-dlp/commit/70f56699515e0854a4853d214dce11b61d432387) ([#13915](https://github.com/yt-dlp/yt-dlp/issues/13915)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **utils**: [Add improved `jwt_encode` function](https://github.com/yt-dlp/yt-dlp/commit/35da8df4f843cb8f0656a301e5bebbf47d64d69a) ([#14071](https://github.com/yt-dlp/yt-dlp/issues/14071)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- [Extract avif storyboard formats from MPD manifests](https://github.com/yt-dlp/yt-dlp/commit/770119bdd15c525ba4338503f0eb68ea4baedf10) ([#14016](https://github.com/yt-dlp/yt-dlp/issues/14016)) by [doe1080](https://github.com/doe1080)
|
||||||
|
- `_rta_search`: [Do not assume `age_limit` is `0`](https://github.com/yt-dlp/yt-dlp/commit/6ae3543d5a1feea0c546571fd2782b024c108eac) ([#13985](https://github.com/yt-dlp/yt-dlp/issues/13985)) by [doe1080](https://github.com/doe1080)
|
||||||
|
- **adobetv**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/c22660aed5fadb4ac29bdf25db4e8016414153cc) ([#13917](https://github.com/yt-dlp/yt-dlp/issues/13917)) by [doe1080](https://github.com/doe1080)
|
||||||
|
- **bilibili**: [Handle Bangumi redirection](https://github.com/yt-dlp/yt-dlp/commit/6ca9165648ac9a07c012de639faf50a97cbe0991) ([#14038](https://github.com/yt-dlp/yt-dlp/issues/14038)) by [grqz](https://github.com/grqz), [junyilou](https://github.com/junyilou)
|
||||||
|
- **faulio**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/74b4b3b00516e92a60250e0626272a6826459057) ([#13907](https://github.com/yt-dlp/yt-dlp/issues/13907)) by [CasperMcFadden95](https://github.com/CasperMcFadden95)
|
||||||
|
- **francetv**: site: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/7b8a8abb98165a53c026e2a3f52faee608df1f20) ([#14082](https://github.com/yt-dlp/yt-dlp/issues/14082)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **medialaan**: [Rework extractors](https://github.com/yt-dlp/yt-dlp/commit/86d74e5cf0e06c53c931ccdbdd497e3f2c4d2fe2) ([#14015](https://github.com/yt-dlp/yt-dlp/issues/14015)) by [doe1080](https://github.com/doe1080)
|
||||||
|
- **mtv**: [Overhaul extractors](https://github.com/yt-dlp/yt-dlp/commit/8df121ba59208979aa713822781891347abd03d1) ([#14052](https://github.com/yt-dlp/yt-dlp/issues/14052)) by [bashonly](https://github.com/bashonly), [doe1080](https://github.com/doe1080), [Randalix](https://github.com/Randalix), [seproDev](https://github.com/seproDev)
|
||||||
|
- **niconico**: live: [Support age-restricted streams](https://github.com/yt-dlp/yt-dlp/commit/374ea049f531959bcccf8a1e6bc5659d228a780e) ([#13549](https://github.com/yt-dlp/yt-dlp/issues/13549)) by [doe1080](https://github.com/doe1080)
|
||||||
|
- **nrktvepisode**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/7540aa1da1800769af40381f423825a1a8826377) ([#14065](https://github.com/yt-dlp/yt-dlp/issues/14065)) by [runarmod](https://github.com/runarmod)
|
||||||
|
- **puhutv**: [Fix playlists extraction](https://github.com/yt-dlp/yt-dlp/commit/36e873822bdb2c5aba3780dd3ae32cbae564c6cd) ([#11955](https://github.com/yt-dlp/yt-dlp/issues/11955)) by [e2dk4r](https://github.com/e2dk4r)
|
||||||
|
- **steam**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/d3d1ac8eb2f9e96f3d75292e0effe2b1bccece3b) ([#14008](https://github.com/yt-dlp/yt-dlp/issues/14008)) by [AzartX47](https://github.com/AzartX47)
|
||||||
|
- **svt**: [Extract forced subs under separate lang code](https://github.com/yt-dlp/yt-dlp/commit/82a139020417a501f261d9fe02cefca01b1e12e4) ([#14062](https://github.com/yt-dlp/yt-dlp/issues/14062)) by [PierreMesure](https://github.com/PierreMesure)
|
||||||
|
- **tiktok**: user: [Avoid infinite loop during extraction](https://github.com/yt-dlp/yt-dlp/commit/edf55e81842fcfa6c302528d7f33ccd5081b37ef) ([#14032](https://github.com/yt-dlp/yt-dlp/issues/14032)) by [bashonly](https://github.com/bashonly) (With fixes in [471a2b6](https://github.com/yt-dlp/yt-dlp/commit/471a2b60e0a3e056960d9ceb1ebf57908428f752))
|
||||||
|
- **vimeo**
|
||||||
|
- album: [Support embed-only and non-numeric albums](https://github.com/yt-dlp/yt-dlp/commit/d8200ff0a4699e06c9f7daca8f8531f8b98e68f2) ([#14021](https://github.com/yt-dlp/yt-dlp/issues/14021)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- event: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/0f6b915822fb64bd944126fdacd401975c9f06ed) ([#14064](https://github.com/yt-dlp/yt-dlp/issues/14064)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **weibo**
|
||||||
|
- [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/8e3f8065af1415caeff788c5c430703dd0d8f576) ([#14012](https://github.com/yt-dlp/yt-dlp/issues/14012)) by [AzartX47](https://github.com/AzartX47), [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support more URLs and --no-playlist](https://github.com/yt-dlp/yt-dlp/commit/404bd889d0e0b62ad72b7281e3fefdc0497080b3) ([#14035](https://github.com/yt-dlp/yt-dlp/issues/14035)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **youtube**
|
||||||
|
- [Add `es5` and `es6` player JS variants](https://github.com/yt-dlp/yt-dlp/commit/f2919bd28eac905f1267c62b83738a02bb5b4e04) ([#14005](https://github.com/yt-dlp/yt-dlp/issues/14005)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Add `playback_wait` extractor-arg](https://github.com/yt-dlp/yt-dlp/commit/f63a7e41d120ef84f0f2274b0962438e3272d2fa) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Default to `main` player JS variant](https://github.com/yt-dlp/yt-dlp/commit/df0553153e41f81e3b30aa5bb1d119c61bd449ac) ([#14079](https://github.com/yt-dlp/yt-dlp/issues/14079)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Extract title and description from initial data](https://github.com/yt-dlp/yt-dlp/commit/7bc53ae79930b36f4f947679545c75f36e9f0ddd) ([#14078](https://github.com/yt-dlp/yt-dlp/issues/14078)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Handle required preroll waiting period](https://github.com/yt-dlp/yt-dlp/commit/a97f4cb57e61e19be61a7d5ac19665d4b567c960) ([#14081](https://github.com/yt-dlp/yt-dlp/issues/14081)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Remove default player params](https://github.com/yt-dlp/yt-dlp/commit/d154dc3dcf0c7c75dbabb6cd1aca66fdd806f858) ([#14081](https://github.com/yt-dlp/yt-dlp/issues/14081)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- tab: [Fix playlists tab extraction](https://github.com/yt-dlp/yt-dlp/commit/8a8861d53864c8a38e924bc0657ead5180f17268) ([#14030](https://github.com/yt-dlp/yt-dlp/issues/14030)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
#### Downloader changes
|
||||||
|
- [Support `available_at` format field](https://github.com/yt-dlp/yt-dlp/commit/438d3f06b3c41bdef8112d40b75d342186e91a16) ([#13980](https://github.com/yt-dlp/yt-dlp/issues/13980)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
#### Postprocessor changes
|
||||||
|
- **xattrmetadata**: [Only set "Where From" attribute on macOS](https://github.com/yt-dlp/yt-dlp/commit/bdeb3eb3f29eebbe8237fbc5186e51e7293eea4a) ([#13999](https://github.com/yt-dlp/yt-dlp/issues/13999)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **build**
|
||||||
|
- [Add Windows ARM64 builds](https://github.com/yt-dlp/yt-dlp/commit/07247d6c20fef1ad13b6f71f6355a44d308cf010) ([#14003](https://github.com/yt-dlp/yt-dlp/issues/14003)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Bump PyInstaller version to 6.15.0 for Windows](https://github.com/yt-dlp/yt-dlp/commit/681ed2153de754c2c885fdad09ab71fffa8114f9) ([#14002](https://github.com/yt-dlp/yt-dlp/issues/14002)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Discontinue `darwin_legacy_exe` support](https://github.com/yt-dlp/yt-dlp/commit/aea85d525e1007bb64baec0e170c054292d0858a) ([#13860](https://github.com/yt-dlp/yt-dlp/issues/13860)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **cleanup**
|
||||||
|
- [Remove dead extractors](https://github.com/yt-dlp/yt-dlp/commit/6f4c1bb593da92f0ce68229d0c813cdbaf1314da) ([#13996](https://github.com/yt-dlp/yt-dlp/issues/13996)) by [doe1080](https://github.com/doe1080)
|
||||||
|
- Miscellaneous: [c2fc4f3](https://github.com/yt-dlp/yt-dlp/commit/c2fc4f3e7f6d757250183b177130c64beee50520) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
### 2025.08.11
|
### 2025.08.11
|
||||||
|
|
||||||
#### Important changes
|
#### Important changes
|
||||||
- **The minimum *recommended* Python version has been raised to 3.10**
|
- **The minimum *recommended* Python version has been raised to 3.10**
|
||||||
Since Python 3.9 will reach end-of-life in October 2025, support for it will be dropped soon. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13858)
|
Since Python 3.9 will reach end-of-life in October 2025, support for it will be dropped soon. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13858)
|
||||||
- **darwin_legacy_exe builds are being discontinued**
|
- **darwin_legacy_exe builds are being discontinued**
|
||||||
This release's `yt-dlp_macos_legacy` binary will likely be the last one. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13857)
|
This release's `yt-dlp_macos_legacy` binary will likely be the last one. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13856)
|
||||||
- **linux_armv7l_exe builds are being discontinued**
|
- **linux_armv7l_exe builds are being discontinued**
|
||||||
This release's `yt-dlp_linux_armv7l` binary could be the last one. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13976)
|
This release's `yt-dlp_linux_armv7l` binary could be the last one. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13976)
|
||||||
|
|
||||||
|
|||||||
4
Makefile
4
Makefile
@@ -10,7 +10,7 @@ tar: yt-dlp.tar.gz
|
|||||||
# intended use: when building a source distribution,
|
# intended use: when building a source distribution,
|
||||||
# make pypi-files && python3 -m build -sn .
|
# make pypi-files && python3 -m build -sn .
|
||||||
pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites \
|
pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites \
|
||||||
completions yt-dlp.1 pyproject.toml setup.cfg devscripts/* test/*
|
completions yt-dlp.1 pyproject.toml devscripts/* test/*
|
||||||
|
|
||||||
.PHONY: all clean clean-all clean-test clean-dist clean-cache \
|
.PHONY: all clean clean-all clean-test clean-dist clean-cache \
|
||||||
completions completion-bash completion-fish completion-zsh \
|
completions completion-bash completion-fish completion-zsh \
|
||||||
@@ -159,7 +159,7 @@ yt-dlp.tar.gz: all
|
|||||||
README.md supportedsites.md Changelog.md LICENSE \
|
README.md supportedsites.md Changelog.md LICENSE \
|
||||||
CONTRIBUTING.md Collaborators.md CONTRIBUTORS AUTHORS \
|
CONTRIBUTING.md Collaborators.md CONTRIBUTORS AUTHORS \
|
||||||
Makefile yt-dlp.1 README.txt completions .gitignore \
|
Makefile yt-dlp.1 README.txt completions .gitignore \
|
||||||
setup.cfg yt-dlp yt_dlp pyproject.toml devscripts test
|
yt-dlp yt_dlp pyproject.toml devscripts test
|
||||||
|
|
||||||
AUTHORS: Changelog.md
|
AUTHORS: Changelog.md
|
||||||
@if [ -d '.git' ] && command -v git > /dev/null ; then \
|
@if [ -d '.git' ] && command -v git > /dev/null ; then \
|
||||||
|
|||||||
73
README.md
73
README.md
@@ -105,13 +105,21 @@ File|Description
|
|||||||
|
|
||||||
File|Description
|
File|Description
|
||||||
:---|:---
|
:---|:---
|
||||||
|
[yt-dlp_linux](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux)|Linux (glibc 2.17+) standalone x86_64 binary
|
||||||
|
[yt-dlp_linux.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux.zip)|Unpackaged Linux (glibc 2.17+) x86_64 executable (no auto-update)
|
||||||
|
[yt-dlp_linux_aarch64](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_aarch64)|Linux (glibc 2.17+) standalone aarch64 binary
|
||||||
|
[yt-dlp_linux_aarch64.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_aarch64.zip)|Unpackaged Linux (glibc 2.17+) aarch64 executable (no auto-update)
|
||||||
|
[yt-dlp_linux_armv7l.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_armv7l.zip)|Unpackaged Linux (glibc 2.31+) armv7l executable (no auto-update)
|
||||||
|
[yt-dlp_musllinux](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_musllinux)|Linux (musl 1.2+) standalone x86_64 binary
|
||||||
|
[yt-dlp_musllinux.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_musllinux.zip)|Unpackaged Linux (musl 1.2+) x86_64 executable (no auto-update)
|
||||||
|
[yt-dlp_musllinux_aarch64](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_musllinux_aarch64)|Linux (musl 1.2+) standalone aarch64 binary
|
||||||
|
[yt-dlp_musllinux_aarch64.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_musllinux_aarch64.zip)|Unpackaged Linux (musl 1.2+) aarch64 executable (no auto-update)
|
||||||
[yt-dlp_x86.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_x86.exe)|Windows (Win8+) standalone x86 (32-bit) binary
|
[yt-dlp_x86.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_x86.exe)|Windows (Win8+) standalone x86 (32-bit) binary
|
||||||
[yt-dlp_linux](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux)|Linux standalone x64 binary
|
[yt-dlp_win_x86.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_win_x86.zip)|Unpackaged Windows (Win8+) x86 (32-bit) executable (no auto-update)
|
||||||
[yt-dlp_linux_armv7l](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_armv7l)|Linux standalone armv7l (32-bit) binary
|
[yt-dlp_arm64.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_arm64.exe)|Windows (Win10+) standalone ARM64 binary
|
||||||
[yt-dlp_linux_aarch64](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_aarch64)|Linux standalone aarch64 (64-bit) binary
|
[yt-dlp_win_arm64.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_win_arm64.zip)|Unpackaged Windows (Win10+) ARM64 executable (no auto-update)
|
||||||
[yt-dlp_win.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_win.zip)|Unpackaged Windows executable (no auto-update)
|
[yt-dlp_win.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_win.zip)|Unpackaged Windows (Win8+) x64 executable (no auto-update)
|
||||||
[yt-dlp_macos.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_macos.zip)|Unpackaged MacOS (10.15+) executable (no auto-update)
|
[yt-dlp_macos.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_macos.zip)|Unpackaged MacOS (10.15+) executable (no auto-update)
|
||||||
[yt-dlp_macos_legacy](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_macos_legacy)|MacOS (10.9+) standalone x64 executable
|
|
||||||
|
|
||||||
#### Misc
|
#### Misc
|
||||||
|
|
||||||
@@ -130,6 +138,17 @@ curl -L https://github.com/yt-dlp/yt-dlp/raw/master/public.key | gpg --import
|
|||||||
gpg --verify SHA2-256SUMS.sig SHA2-256SUMS
|
gpg --verify SHA2-256SUMS.sig SHA2-256SUMS
|
||||||
gpg --verify SHA2-512SUMS.sig SHA2-512SUMS
|
gpg --verify SHA2-512SUMS.sig SHA2-512SUMS
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Licensing
|
||||||
|
|
||||||
|
While yt-dlp is licensed under the [Unlicense](LICENSE), many of the release files contain code from other projects with different licenses.
|
||||||
|
|
||||||
|
Most notably, the PyInstaller-bundled executables include GPLv3+ licensed code, and as such the combined work is licensed under [GPLv3+](https://www.gnu.org/licenses/gpl-3.0.html).
|
||||||
|
|
||||||
|
See [THIRD_PARTY_LICENSES.txt](THIRD_PARTY_LICENSES.txt) for details.
|
||||||
|
|
||||||
|
The zipimport binary (`yt-dlp`), the source tarball (`yt-dlp.tar.gz`), and the PyPI source distribution & wheel only contain code licensed under the [Unlicense](LICENSE).
|
||||||
|
|
||||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||||
|
|
||||||
**Note**: The manpages, shell completion (autocomplete) files etc. are available inside the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
**Note**: The manpages, shell completion (autocomplete) files etc. are available inside the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
||||||
@@ -204,7 +223,7 @@ The following provide support for impersonating browser requests. This may be re
|
|||||||
|
|
||||||
* [**curl_cffi**](https://github.com/lexiforest/curl_cffi) (recommended) - Python binding for [curl-impersonate](https://github.com/lexiforest/curl-impersonate). Provides impersonation targets for Chrome, Edge and Safari. Licensed under [MIT](https://github.com/lexiforest/curl_cffi/blob/main/LICENSE)
|
* [**curl_cffi**](https://github.com/lexiforest/curl_cffi) (recommended) - Python binding for [curl-impersonate](https://github.com/lexiforest/curl-impersonate). Provides impersonation targets for Chrome, Edge and Safari. Licensed under [MIT](https://github.com/lexiforest/curl_cffi/blob/main/LICENSE)
|
||||||
* Can be installed with the `curl-cffi` group, e.g. `pip install "yt-dlp[default,curl-cffi]"`
|
* Can be installed with the `curl-cffi` group, e.g. `pip install "yt-dlp[default,curl-cffi]"`
|
||||||
* Currently included in `yt-dlp.exe`, `yt-dlp_linux` and `yt-dlp_macos` builds
|
* Currently included in most builds *except* `yt-dlp` (Unix zipimport binary), `yt-dlp_x86` (Windows 32-bit) and `yt-dlp_musllinux_aarch64`
|
||||||
|
|
||||||
|
|
||||||
### Metadata
|
### Metadata
|
||||||
@@ -222,8 +241,6 @@ The following provide support for impersonating browser requests. This may be re
|
|||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
|
|
||||||
* [**avconv** and **avprobe**](https://www.libav.org) - Now **deprecated** alternative to ffmpeg. License [depends on the build](https://libav.org/legal)
|
|
||||||
* [**sponskrub**](https://github.com/faissaloo/SponSkrub) - For using the now **deprecated** [sponskrub options](#sponskrub-options). Licensed under [GPLv3+](https://github.com/faissaloo/SponSkrub/blob/master/LICENCE.md)
|
|
||||||
* [**rtmpdump**](http://rtmpdump.mplayerhq.hu) - For downloading `rtmp` streams. ffmpeg can be used instead with `--downloader ffmpeg`. Licensed under [GPLv2+](http://rtmpdump.mplayerhq.hu)
|
* [**rtmpdump**](http://rtmpdump.mplayerhq.hu) - For downloading `rtmp` streams. ffmpeg can be used instead with `--downloader ffmpeg`. Licensed under [GPLv2+](http://rtmpdump.mplayerhq.hu)
|
||||||
* [**mplayer**](http://mplayerhq.hu/design7/info.html) or [**mpv**](https://mpv.io) - For downloading `rstp`/`mms` streams. ffmpeg can be used instead with `--downloader ffmpeg`. Licensed under [GPLv2+](https://github.com/mpv-player/mpv/blob/master/Copyright)
|
* [**mplayer**](http://mplayerhq.hu/design7/info.html) or [**mpv**](https://mpv.io) - For downloading `rstp`/`mms` streams. ffmpeg can be used instead with `--downloader ffmpeg`. Licensed under [GPLv2+](https://github.com/mpv-player/mpv/blob/master/Copyright)
|
||||||
|
|
||||||
@@ -303,7 +320,6 @@ Tip: Use `CTRL`+`F` (or `Command`+`F`) to search by keywords
|
|||||||
playlist (default)
|
playlist (default)
|
||||||
--abort-on-error Abort downloading of further videos if an
|
--abort-on-error Abort downloading of further videos if an
|
||||||
error occurs (Alias: --no-ignore-errors)
|
error occurs (Alias: --no-ignore-errors)
|
||||||
--dump-user-agent Display the current user-agent and exit
|
|
||||||
--list-extractors List all supported extractors and exit
|
--list-extractors List all supported extractors and exit
|
||||||
--extractor-descriptions Output descriptions of all supported
|
--extractor-descriptions Output descriptions of all supported
|
||||||
extractors and exit
|
extractors and exit
|
||||||
@@ -554,8 +570,6 @@ Tip: Use `CTRL`+`F` (or `Command`+`F`) to search by keywords
|
|||||||
--playlist-random and --playlist-reverse
|
--playlist-random and --playlist-reverse
|
||||||
--no-lazy-playlist Process videos in the playlist only after
|
--no-lazy-playlist Process videos in the playlist only after
|
||||||
the entire playlist is parsed (default)
|
the entire playlist is parsed (default)
|
||||||
--xattr-set-filesize Set file xattribute ytdl.filesize with
|
|
||||||
expected file size
|
|
||||||
--hls-use-mpegts Use the mpegts container for HLS videos;
|
--hls-use-mpegts Use the mpegts container for HLS videos;
|
||||||
allowing some players to play the video
|
allowing some players to play the video
|
||||||
while downloading, and reducing the chance
|
while downloading, and reducing the chance
|
||||||
@@ -579,9 +593,9 @@ Tip: Use `CTRL`+`F` (or `Command`+`F`) to search by keywords
|
|||||||
use (optionally) prefixed by the protocols
|
use (optionally) prefixed by the protocols
|
||||||
(http, ftp, m3u8, dash, rstp, rtmp, mms) to
|
(http, ftp, m3u8, dash, rstp, rtmp, mms) to
|
||||||
use it for. Currently supports native,
|
use it for. Currently supports native,
|
||||||
aria2c, avconv, axel, curl, ffmpeg, httpie,
|
aria2c, axel, curl, ffmpeg, httpie, wget.
|
||||||
wget. You can use this option multiple times
|
You can use this option multiple times to
|
||||||
to set different downloaders for different
|
set different downloaders for different
|
||||||
protocols. E.g. --downloader aria2c
|
protocols. E.g. --downloader aria2c
|
||||||
--downloader "dash,m3u8:native" will use
|
--downloader "dash,m3u8:native" will use
|
||||||
aria2c for http/ftp downloads, and the
|
aria2c for http/ftp downloads, and the
|
||||||
@@ -1800,11 +1814,12 @@ The following extractors use this feature:
|
|||||||
#### youtube
|
#### youtube
|
||||||
* `lang`: Prefer translated metadata (`title`, `description` etc) of this language code (case-sensitive). By default, the video primary language metadata is preferred, with a fallback to `en` translated. See [youtube/_base.py](https://github.com/yt-dlp/yt-dlp/blob/415b4c9f955b1a0391204bd24a7132590e7b3bdb/yt_dlp/extractor/youtube/_base.py#L402-L409) for the list of supported content language codes
|
* `lang`: Prefer translated metadata (`title`, `description` etc) of this language code (case-sensitive). By default, the video primary language metadata is preferred, with a fallback to `en` translated. See [youtube/_base.py](https://github.com/yt-dlp/yt-dlp/blob/415b4c9f955b1a0391204bd24a7132590e7b3bdb/yt_dlp/extractor/youtube/_base.py#L402-L409) for the list of supported content language codes
|
||||||
* `skip`: One or more of `hls`, `dash` or `translated_subs` to skip extraction of the m3u8 manifests, dash manifests and [auto-translated subtitles](https://github.com/yt-dlp/yt-dlp/issues/4090#issuecomment-1158102032) respectively
|
* `skip`: One or more of `hls`, `dash` or `translated_subs` to skip extraction of the m3u8 manifests, dash manifests and [auto-translated subtitles](https://github.com/yt-dlp/yt-dlp/issues/4090#issuecomment-1158102032) respectively
|
||||||
* `player_client`: Clients to extract video data from. The currently available clients are `web`, `web_safari`, `web_embedded`, `web_music`, `web_creator`, `mweb`, `ios`, `android`, `android_vr`, `tv`, `tv_simply` and `tv_embedded`. By default, `tv,ios,web` is used, or `tv,web` is used when authenticating with cookies. The `web_music` client is added for `music.youtube.com` URLs when logged-in cookies are used. The `web_embedded` client is added for age-restricted videos but only works if the video is embeddable. The `tv_embedded` and `web_creator` clients are added for age-restricted videos if account age-verification is required. Some clients, such as `web` and `web_music`, require a `po_token` for their formats to be downloadable. Some clients, such as `web_creator`, will only work with authentication. Not all clients support authentication via cookies. You can use `default` for the default clients, or you can use `all` for all clients (not recommended). You can prefix a client with `-` to exclude it, e.g. `youtube:player_client=default,-ios`
|
* `player_client`: Clients to extract video data from. The currently available clients are `web`, `web_safari`, `web_embedded`, `web_music`, `web_creator`, `mweb`, `ios`, `android`, `android_vr`, `tv`, `tv_simply` and `tv_embedded`. By default, `tv,web_safari,web` is used, and `tv,web_creator,web` is used with premium accounts. The `web_music` client is added for `music.youtube.com` URLs when logged-in cookies are used. The `web_embedded` client is added for age-restricted videos but only works if the video is embeddable. The `tv_embedded` and `web_creator` clients are added for age-restricted videos if account age-verification is required. Some clients, such as `web` and `web_music`, require a `po_token` for their formats to be downloadable. Some clients, such as `web_creator`, will only work with authentication. Not all clients support authentication via cookies. You can use `default` for the default clients, or you can use `all` for all clients (not recommended). You can prefix a client with `-` to exclude it, e.g. `youtube:player_client=default,-ios`
|
||||||
* `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player), `initial_data` (skip initial data/next ep request). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause issues such as missing formats or metadata. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) and [#12826](https://github.com/yt-dlp/yt-dlp/issues/12826) for more details
|
* `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player), `initial_data` (skip initial data/next ep request). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause issues such as missing formats or metadata. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) and [#12826](https://github.com/yt-dlp/yt-dlp/issues/12826) for more details
|
||||||
* `webpage_skip`: Skip extraction of embedded webpage data. One or both of `player_response`, `initial_data`. These options are for testing purposes and don't skip any network requests
|
* `webpage_skip`: Skip extraction of embedded webpage data. One or both of `player_response`, `initial_data`. These options are for testing purposes and don't skip any network requests
|
||||||
* `player_params`: YouTube player parameters to use for player requests. Will overwrite any default ones set by yt-dlp.
|
* `player_params`: YouTube player parameters to use for player requests. Will overwrite any default ones set by yt-dlp.
|
||||||
* `player_js_variant`: The player javascript variant to use for signature and nsig deciphering. The known variants are: `main`, `tce`, `tv`, `tv_es6`, `phone`, `tablet`. Only `main` is recommended as a possible workaround; the others are for debugging purposes. The default is to use what is prescribed by the site, and can be selected with `actual`
|
* `player_js_variant`: The player javascript variant to use for n/sig deciphering. The known variants are: `main`, `tcc`, `tce`, `es5`, `es6`, `tv`, `tv_es6`, `phone`, `tablet`. The default is `main`, and the others are for debugging purposes. You can use `actual` to go with what is prescribed by the site
|
||||||
|
* `player_js_version`: The player javascript version to use for n/sig deciphering, in the format of `signature_timestamp@hash`. Currently, the default is to force `20348@0004de42`. You can use `actual` to go with what is prescribed by the site
|
||||||
* `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side)
|
* `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side)
|
||||||
* `max_comments`: Limit the amount of comments to gather. Comma-separated list of integers representing `max-comments,max-parents,max-replies,max-replies-per-thread`. Default is `all,all,all,all`
|
* `max_comments`: Limit the amount of comments to gather. Comma-separated list of integers representing `max-comments,max-parents,max-replies,max-replies-per-thread`. Default is `all,all,all,all`
|
||||||
* E.g. `all,all,1000,10` will get a maximum of 1000 replies total, with up to 10 replies per thread. `1000,all,100` will get a maximum of 1000 comments, with a maximum of 100 replies total
|
* E.g. `all,all,1000,10` will get a maximum of 1000 replies total, with up to 10 replies per thread. `1000,all,100` will get a maximum of 1000 comments, with a maximum of 100 replies total
|
||||||
@@ -1817,6 +1832,7 @@ The following extractors use this feature:
|
|||||||
* `po_token`: Proof of Origin (PO) Token(s) to use. Comma seperated list of PO Tokens in the format `CLIENT.CONTEXT+PO_TOKEN`, e.g. `youtube:po_token=web.gvs+XXX,web.player=XXX,web_safari.gvs+YYY`. Context can be any of `gvs` (Google Video Server URLs), `player` (Innertube player request) or `subs` (Subtitles)
|
* `po_token`: Proof of Origin (PO) Token(s) to use. Comma seperated list of PO Tokens in the format `CLIENT.CONTEXT+PO_TOKEN`, e.g. `youtube:po_token=web.gvs+XXX,web.player=XXX,web_safari.gvs+YYY`. Context can be any of `gvs` (Google Video Server URLs), `player` (Innertube player request) or `subs` (Subtitles)
|
||||||
* `pot_trace`: Enable debug logging for PO Token fetching. Either `true` or `false` (default)
|
* `pot_trace`: Enable debug logging for PO Token fetching. Either `true` or `false` (default)
|
||||||
* `fetch_pot`: Policy to use for fetching a PO Token from providers. One of `always` (always try fetch a PO Token regardless if the client requires one for the given context), `never` (never fetch a PO Token), or `auto` (default; only fetch a PO Token if the client requires one for the given context)
|
* `fetch_pot`: Policy to use for fetching a PO Token from providers. One of `always` (always try fetch a PO Token regardless if the client requires one for the given context), `never` (never fetch a PO Token), or `auto` (default; only fetch a PO Token if the client requires one for the given context)
|
||||||
|
* `playback_wait`: Duration (in seconds) to wait inbetween the extraction and download stages in order to ensure the formats are available. The default is `6` seconds
|
||||||
|
|
||||||
#### youtubepot-webpo
|
#### youtubepot-webpo
|
||||||
* `bind_to_visitor_id`: Whether to use the Visitor ID instead of Visitor Data for caching WebPO tokens. Either `true` (default) or `false`
|
* `bind_to_visitor_id`: Whether to use the Visitor ID instead of Visitor Data for caching WebPO tokens. Either `true` (default) or `false`
|
||||||
@@ -2198,7 +2214,6 @@ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
|||||||
* Fix for [n-sig based throttling](https://github.com/ytdl-org/youtube-dl/issues/29326) **\***
|
* Fix for [n-sig based throttling](https://github.com/ytdl-org/youtube-dl/issues/29326) **\***
|
||||||
* Download livestreams from the start using `--live-from-start` (*experimental*)
|
* Download livestreams from the start using `--live-from-start` (*experimental*)
|
||||||
* Channel URLs download all uploads of the channel, including shorts and live
|
* Channel URLs download all uploads of the channel, including shorts and live
|
||||||
* Support for [logging in with OAuth](https://github.com/yt-dlp/yt-dlp/wiki/Extractors#logging-in-with-oauth)
|
|
||||||
|
|
||||||
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]`
|
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]`
|
||||||
|
|
||||||
@@ -2342,11 +2357,7 @@ While these options still work, their use is not recommended since there are oth
|
|||||||
--hls-prefer-native --downloader "m3u8:native"
|
--hls-prefer-native --downloader "m3u8:native"
|
||||||
--hls-prefer-ffmpeg --downloader "m3u8:ffmpeg"
|
--hls-prefer-ffmpeg --downloader "m3u8:ffmpeg"
|
||||||
--list-formats-old --compat-options list-formats (Alias: --no-list-formats-as-table)
|
--list-formats-old --compat-options list-formats (Alias: --no-list-formats-as-table)
|
||||||
--list-formats-as-table --compat-options -list-formats [Default] (Alias: --no-list-formats-old)
|
--list-formats-as-table --compat-options -list-formats [Default]
|
||||||
--youtube-skip-dash-manifest --extractor-args "youtube:skip=dash" (Alias: --no-youtube-include-dash-manifest)
|
|
||||||
--youtube-skip-hls-manifest --extractor-args "youtube:skip=hls" (Alias: --no-youtube-include-hls-manifest)
|
|
||||||
--youtube-include-dash-manifest Default (Alias: --no-youtube-skip-dash-manifest)
|
|
||||||
--youtube-include-hls-manifest Default (Alias: --no-youtube-skip-hls-manifest)
|
|
||||||
--geo-bypass --xff "default"
|
--geo-bypass --xff "default"
|
||||||
--no-geo-bypass --xff "never"
|
--no-geo-bypass --xff "never"
|
||||||
--geo-bypass-country CODE --xff CODE
|
--geo-bypass-country CODE --xff CODE
|
||||||
@@ -2357,18 +2368,13 @@ These options are not intended to be used by the end-user
|
|||||||
|
|
||||||
--test Download only part of video for testing extractors
|
--test Download only part of video for testing extractors
|
||||||
--load-pages Load pages dumped by --write-pages
|
--load-pages Load pages dumped by --write-pages
|
||||||
--youtube-print-sig-code For testing youtube signatures
|
|
||||||
--allow-unplayable-formats List unplayable formats also
|
--allow-unplayable-formats List unplayable formats also
|
||||||
--no-allow-unplayable-formats Default
|
--no-allow-unplayable-formats Default
|
||||||
|
|
||||||
#### Old aliases
|
#### Old aliases
|
||||||
These are aliases that are no longer documented for various reasons
|
These are aliases that are no longer documented for various reasons
|
||||||
|
|
||||||
--avconv-location --ffmpeg-location
|
|
||||||
--clean-infojson --clean-info-json
|
--clean-infojson --clean-info-json
|
||||||
--cn-verification-proxy URL --geo-verification-proxy URL
|
|
||||||
--dump-headers --print-traffic
|
|
||||||
--dump-intermediate-pages --dump-pages
|
|
||||||
--force-write-download-archive --force-write-archive
|
--force-write-download-archive --force-write-archive
|
||||||
--no-clean-infojson --no-clean-info-json
|
--no-clean-infojson --no-clean-info-json
|
||||||
--no-split-tracks --no-split-chapters
|
--no-split-tracks --no-split-chapters
|
||||||
@@ -2382,7 +2388,7 @@ These are aliases that are no longer documented for various reasons
|
|||||||
--yes-overwrites --force-overwrites
|
--yes-overwrites --force-overwrites
|
||||||
|
|
||||||
#### Sponskrub Options
|
#### Sponskrub Options
|
||||||
Support for [SponSkrub](https://github.com/faissaloo/SponSkrub) has been deprecated in favor of the `--sponsorblock` options
|
Support for [SponSkrub](https://github.com/faissaloo/SponSkrub) has been removed in favor of the `--sponsorblock` options
|
||||||
|
|
||||||
--sponskrub --sponsorblock-mark all
|
--sponskrub --sponsorblock-mark all
|
||||||
--no-sponskrub --no-sponsorblock
|
--no-sponskrub --no-sponsorblock
|
||||||
@@ -2404,6 +2410,17 @@ These options may no longer work as intended
|
|||||||
--no-include-ads Default
|
--no-include-ads Default
|
||||||
--write-annotations No supported site has annotations now
|
--write-annotations No supported site has annotations now
|
||||||
--no-write-annotations Default
|
--no-write-annotations Default
|
||||||
|
--avconv-location Removed alias for --ffmpeg-location
|
||||||
|
--cn-verification-proxy URL Removed alias for --geo-verification-proxy URL
|
||||||
|
--dump-headers Removed alias for --print-traffic
|
||||||
|
--dump-intermediate-pages Removed alias for --dump-pages
|
||||||
|
--youtube-skip-dash-manifest Removed alias for --extractor-args "youtube:skip=dash" (Alias: --no-youtube-include-dash-manifest)
|
||||||
|
--youtube-skip-hls-manifest Removed alias for --extractor-args "youtube:skip=hls" (Alias: --no-youtube-include-hls-manifest)
|
||||||
|
--youtube-include-dash-manifest Default (Alias: --no-youtube-skip-dash-manifest)
|
||||||
|
--youtube-include-hls-manifest Default (Alias: --no-youtube-skip-hls-manifest)
|
||||||
|
--youtube-print-sig-code Removed testing functionality
|
||||||
|
--dump-user-agent No longer supported
|
||||||
|
--xattr-set-filesize No longer supported
|
||||||
--compat-options seperate-video-versions No longer needed
|
--compat-options seperate-video-versions No longer needed
|
||||||
--compat-options no-youtube-prefer-utc-upload-date No longer supported
|
--compat-options no-youtube-prefer-utc-upload-date No longer supported
|
||||||
|
|
||||||
|
|||||||
4433
THIRD_PARTY_LICENSES.txt
Normal file
4433
THIRD_PARTY_LICENSES.txt
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,178 @@
|
|||||||
services:
|
services:
|
||||||
static:
|
|
||||||
build: static
|
linux_x86_64:
|
||||||
|
build:
|
||||||
|
context: linux
|
||||||
|
target: build
|
||||||
|
platforms:
|
||||||
|
- "linux/amd64"
|
||||||
|
args:
|
||||||
|
BUILDIMAGE: ghcr.io/yt-dlp/manylinux2014_x86_64-shared:latest
|
||||||
environment:
|
environment:
|
||||||
channel: ${channel}
|
EXE_NAME: ${EXE_NAME:?}
|
||||||
origin: ${origin}
|
CHANNEL: ${CHANNEL:?}
|
||||||
version: ${version}
|
ORIGIN: ${ORIGIN:?}
|
||||||
|
VERSION:
|
||||||
|
PYTHON_VERSION:
|
||||||
|
SKIP_ONEDIR_BUILD:
|
||||||
|
SKIP_ONEFILE_BUILD:
|
||||||
volumes:
|
volumes:
|
||||||
- ~/build:/build
|
|
||||||
- ../..:/yt-dlp
|
- ../..:/yt-dlp
|
||||||
|
|
||||||
|
linux_x86_64_verify:
|
||||||
|
build:
|
||||||
|
context: linux
|
||||||
|
target: verify
|
||||||
|
platforms:
|
||||||
|
- "linux/amd64"
|
||||||
|
args:
|
||||||
|
VERIFYIMAGE: quay.io/pypa/manylinux2014_x86_64:latest
|
||||||
|
environment:
|
||||||
|
EXE_NAME: ${EXE_NAME:?}
|
||||||
|
UPDATE_TO:
|
||||||
|
SKIP_ONEDIR_BUILD:
|
||||||
|
SKIP_ONEFILE_BUILD:
|
||||||
|
volumes:
|
||||||
|
- ../../dist:/build
|
||||||
|
|
||||||
|
linux_aarch64:
|
||||||
|
build:
|
||||||
|
context: linux
|
||||||
|
target: build
|
||||||
|
platforms:
|
||||||
|
- "linux/arm64"
|
||||||
|
args:
|
||||||
|
BUILDIMAGE: ghcr.io/yt-dlp/manylinux2014_aarch64-shared:latest
|
||||||
|
environment:
|
||||||
|
EXE_NAME: ${EXE_NAME:?}
|
||||||
|
CHANNEL: ${CHANNEL:?}
|
||||||
|
ORIGIN: ${ORIGIN:?}
|
||||||
|
VERSION:
|
||||||
|
PYTHON_VERSION:
|
||||||
|
SKIP_ONEDIR_BUILD:
|
||||||
|
SKIP_ONEFILE_BUILD:
|
||||||
|
volumes:
|
||||||
|
- ../..:/yt-dlp
|
||||||
|
|
||||||
|
linux_aarch64_verify:
|
||||||
|
build:
|
||||||
|
context: linux
|
||||||
|
target: verify
|
||||||
|
platforms:
|
||||||
|
- "linux/arm64"
|
||||||
|
args:
|
||||||
|
VERIFYIMAGE: quay.io/pypa/manylinux2014_aarch64:latest
|
||||||
|
environment:
|
||||||
|
EXE_NAME: ${EXE_NAME:?}
|
||||||
|
UPDATE_TO:
|
||||||
|
SKIP_ONEDIR_BUILD:
|
||||||
|
SKIP_ONEFILE_BUILD:
|
||||||
|
volumes:
|
||||||
|
- ../../dist:/build
|
||||||
|
|
||||||
|
linux_armv7l:
|
||||||
|
build:
|
||||||
|
context: linux
|
||||||
|
target: build
|
||||||
|
platforms:
|
||||||
|
- "linux/arm/v7"
|
||||||
|
args:
|
||||||
|
BUILDIMAGE: ghcr.io/yt-dlp/manylinux_2_31_armv7l-shared:latest
|
||||||
|
environment:
|
||||||
|
EXE_NAME: ${EXE_NAME:?}
|
||||||
|
CHANNEL: ${CHANNEL:?}
|
||||||
|
ORIGIN: ${ORIGIN:?}
|
||||||
|
VERSION:
|
||||||
|
PYTHON_VERSION:
|
||||||
|
SKIP_ONEDIR_BUILD:
|
||||||
|
SKIP_ONEFILE_BUILD:
|
||||||
|
volumes:
|
||||||
|
- ../..:/yt-dlp
|
||||||
|
- ../../venv:/yt-dlp-build-venv
|
||||||
|
|
||||||
|
linux_armv7l_verify:
|
||||||
|
build:
|
||||||
|
context: linux
|
||||||
|
target: verify
|
||||||
|
platforms:
|
||||||
|
- "linux/arm/v7"
|
||||||
|
args:
|
||||||
|
VERIFYIMAGE: arm32v7/debian:bullseye
|
||||||
|
environment:
|
||||||
|
EXE_NAME: ${EXE_NAME:?}
|
||||||
|
UPDATE_TO:
|
||||||
|
SKIP_ONEDIR_BUILD:
|
||||||
|
SKIP_ONEFILE_BUILD:
|
||||||
|
volumes:
|
||||||
|
- ../../dist:/build
|
||||||
|
|
||||||
|
musllinux_x86_64:
|
||||||
|
build:
|
||||||
|
context: linux
|
||||||
|
target: build
|
||||||
|
platforms:
|
||||||
|
- "linux/amd64"
|
||||||
|
args:
|
||||||
|
BUILDIMAGE: ghcr.io/yt-dlp/musllinux_1_2_x86_64-shared:latest
|
||||||
|
environment:
|
||||||
|
EXE_NAME: ${EXE_NAME:?}
|
||||||
|
CHANNEL: ${CHANNEL:?}
|
||||||
|
ORIGIN: ${ORIGIN:?}
|
||||||
|
VERSION:
|
||||||
|
PYTHON_VERSION:
|
||||||
|
SKIP_ONEDIR_BUILD:
|
||||||
|
SKIP_ONEFILE_BUILD:
|
||||||
|
volumes:
|
||||||
|
- ../..:/yt-dlp
|
||||||
|
|
||||||
|
musllinux_x86_64_verify:
|
||||||
|
build:
|
||||||
|
context: linux
|
||||||
|
target: verify
|
||||||
|
platforms:
|
||||||
|
- "linux/amd64"
|
||||||
|
args:
|
||||||
|
VERIFYIMAGE: alpine:3.22
|
||||||
|
environment:
|
||||||
|
EXE_NAME: ${EXE_NAME:?}
|
||||||
|
UPDATE_TO:
|
||||||
|
SKIP_ONEDIR_BUILD:
|
||||||
|
SKIP_ONEFILE_BUILD:
|
||||||
|
volumes:
|
||||||
|
- ../../dist:/build
|
||||||
|
|
||||||
|
musllinux_aarch64:
|
||||||
|
build:
|
||||||
|
context: linux
|
||||||
|
target: build
|
||||||
|
platforms:
|
||||||
|
- "linux/arm64"
|
||||||
|
args:
|
||||||
|
BUILDIMAGE: ghcr.io/yt-dlp/musllinux_1_2_aarch64-shared:latest
|
||||||
|
environment:
|
||||||
|
EXE_NAME: ${EXE_NAME:?}
|
||||||
|
CHANNEL: ${CHANNEL:?}
|
||||||
|
ORIGIN: ${ORIGIN:?}
|
||||||
|
VERSION:
|
||||||
|
PYTHON_VERSION:
|
||||||
|
SKIP_ONEDIR_BUILD:
|
||||||
|
SKIP_ONEFILE_BUILD:
|
||||||
|
EXCLUDE_CURL_CFFI: "1"
|
||||||
|
volumes:
|
||||||
|
- ../..:/yt-dlp
|
||||||
|
|
||||||
|
musllinux_aarch64_verify:
|
||||||
|
build:
|
||||||
|
context: linux
|
||||||
|
target: verify
|
||||||
|
platforms:
|
||||||
|
- "linux/arm64"
|
||||||
|
args:
|
||||||
|
VERIFYIMAGE: alpine:3.22
|
||||||
|
environment:
|
||||||
|
EXE_NAME: ${EXE_NAME:?}
|
||||||
|
UPDATE_TO:
|
||||||
|
SKIP_ONEDIR_BUILD:
|
||||||
|
SKIP_ONEFILE_BUILD:
|
||||||
|
volumes:
|
||||||
|
- ../../dist:/build
|
||||||
|
|||||||
16
bundle/docker/linux/Dockerfile
Normal file
16
bundle/docker/linux/Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
ARG BUILDIMAGE=ghcr.io/yt-dlp/manylinux2014_x86_64-shared:latest
|
||||||
|
ARG VERIFYIMAGE=alpine:3.22
|
||||||
|
|
||||||
|
|
||||||
|
FROM $BUILDIMAGE AS build
|
||||||
|
|
||||||
|
WORKDIR /yt-dlp
|
||||||
|
COPY build.sh /build.sh
|
||||||
|
ENTRYPOINT ["/build.sh"]
|
||||||
|
|
||||||
|
|
||||||
|
FROM $VERIFYIMAGE AS verify
|
||||||
|
|
||||||
|
WORKDIR /testing
|
||||||
|
COPY verify.sh /verify.sh
|
||||||
|
ENTRYPOINT ["/verify.sh"]
|
||||||
48
bundle/docker/linux/build.sh
Executable file
48
bundle/docker/linux/build.sh
Executable file
@@ -0,0 +1,48 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -exuo pipefail
|
||||||
|
|
||||||
|
if [[ -z "${PYTHON_VERSION:-}" ]]; then
|
||||||
|
PYTHON_VERSION="3.13"
|
||||||
|
echo "Defaulting to using Python ${PYTHON_VERSION}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
function runpy {
|
||||||
|
"/opt/shared-cpython-${PYTHON_VERSION}/bin/python${PYTHON_VERSION}" "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
function venvpy {
|
||||||
|
"python${PYTHON_VERSION}" "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
INCLUDES=(
|
||||||
|
--include pyinstaller
|
||||||
|
--include secretstorage
|
||||||
|
)
|
||||||
|
|
||||||
|
if [[ -z "${EXCLUDE_CURL_CFFI:-}" ]]; then
|
||||||
|
INCLUDES+=(--include curl-cffi)
|
||||||
|
fi
|
||||||
|
|
||||||
|
runpy -m venv /yt-dlp-build-venv
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source /yt-dlp-build-venv/bin/activate
|
||||||
|
# Inside the venv we use venvpy instead of runpy
|
||||||
|
venvpy -m ensurepip --upgrade --default-pip
|
||||||
|
venvpy -m devscripts.install_deps -o --include build
|
||||||
|
venvpy -m devscripts.install_deps "${INCLUDES[@]}"
|
||||||
|
venvpy -m devscripts.make_lazy_extractors
|
||||||
|
venvpy devscripts/update-version.py -c "${CHANNEL}" -r "${ORIGIN}" "${VERSION}"
|
||||||
|
|
||||||
|
if [[ -z "${SKIP_ONEDIR_BUILD:-}" ]]; then
|
||||||
|
mkdir -p /build
|
||||||
|
venvpy -m bundle.pyinstaller --onedir --distpath=/build
|
||||||
|
pushd "/build/${EXE_NAME}"
|
||||||
|
chmod +x "${EXE_NAME}"
|
||||||
|
venvpy -m zipfile -c "/yt-dlp/dist/${EXE_NAME}.zip" ./
|
||||||
|
popd
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "${SKIP_ONEFILE_BUILD:-}" ]]; then
|
||||||
|
venvpy -m bundle.pyinstaller
|
||||||
|
chmod +x "./dist/${EXE_NAME}"
|
||||||
|
fi
|
||||||
51
bundle/docker/linux/verify.sh
Executable file
51
bundle/docker/linux/verify.sh
Executable file
@@ -0,0 +1,51 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
if [ -n "${SKIP_ONEFILE_BUILD:-}" ]; then
|
||||||
|
if [ -n "${SKIP_ONEDIR_BUILD:-}" ]; then
|
||||||
|
echo "All executable builds were skipped"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "Extracting zip to verify onedir build"
|
||||||
|
if command -v python3 >/dev/null 2>&1; then
|
||||||
|
python3 -m zipfile -e "/build/${EXE_NAME}.zip" ./
|
||||||
|
else
|
||||||
|
echo "Attempting to install unzip"
|
||||||
|
if command -v dnf >/dev/null 2>&1; then
|
||||||
|
dnf -y install --allowerasing unzip
|
||||||
|
elif command -v yum >/dev/null 2>&1; then
|
||||||
|
yum -y install unzip
|
||||||
|
elif command -v apt-get >/dev/null 2>&1; then
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get update -qq
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get install -qq -y --no-install-recommends unzip
|
||||||
|
elif command -v apk >/dev/null 2>&1; then
|
||||||
|
apk add --no-cache unzip
|
||||||
|
else
|
||||||
|
echo "Unsupported image"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
unzip "/build/${EXE_NAME}.zip" -d ./
|
||||||
|
fi
|
||||||
|
chmod +x "./${EXE_NAME}"
|
||||||
|
"./${EXE_NAME}" -v || true
|
||||||
|
"./${EXE_NAME}" --version
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Verifying onefile build"
|
||||||
|
cp "/build/${EXE_NAME}" ./
|
||||||
|
chmod +x "./${EXE_NAME}"
|
||||||
|
|
||||||
|
if [ -z "${UPDATE_TO:-}" ]; then
|
||||||
|
"./${EXE_NAME}" -v || true
|
||||||
|
"./${EXE_NAME}" --version
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
cp "./${EXE_NAME}" "./${EXE_NAME}_downgraded"
|
||||||
|
version="$("./${EXE_NAME}" --version)"
|
||||||
|
"./${EXE_NAME}_downgraded" -v --update-to "${UPDATE_TO}"
|
||||||
|
downgraded_version="$("./${EXE_NAME}_downgraded" --version)"
|
||||||
|
if [ "${version}" = "${downgraded_version}" ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
FROM alpine:3.19 as base
|
|
||||||
|
|
||||||
RUN apk --update add --no-cache \
|
|
||||||
build-base \
|
|
||||||
python3 \
|
|
||||||
pipx \
|
|
||||||
;
|
|
||||||
|
|
||||||
RUN pipx install pyinstaller
|
|
||||||
# Requires above step to prepare the shared venv
|
|
||||||
RUN ~/.local/share/pipx/shared/bin/python -m pip install -U wheel
|
|
||||||
RUN apk --update add --no-cache \
|
|
||||||
scons \
|
|
||||||
patchelf \
|
|
||||||
binutils \
|
|
||||||
;
|
|
||||||
RUN pipx install staticx
|
|
||||||
|
|
||||||
WORKDIR /yt-dlp
|
|
||||||
COPY entrypoint.sh /entrypoint.sh
|
|
||||||
ENTRYPOINT /entrypoint.sh
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
#!/bin/ash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
source ~/.local/share/pipx/venvs/pyinstaller/bin/activate
|
|
||||||
python -m devscripts.install_deps -o --include build
|
|
||||||
python -m devscripts.install_deps --include secretstorage --include curl-cffi
|
|
||||||
python -m devscripts.make_lazy_extractors
|
|
||||||
python devscripts/update-version.py -c "${channel}" -r "${origin}" "${version}"
|
|
||||||
python -m bundle.pyinstaller
|
|
||||||
deactivate
|
|
||||||
|
|
||||||
source ~/.local/share/pipx/venvs/staticx/bin/activate
|
|
||||||
staticx /yt-dlp/dist/yt-dlp_linux /build/yt-dlp_linux
|
|
||||||
deactivate
|
|
||||||
@@ -13,6 +13,8 @@ from PyInstaller.__main__ import run as run_pyinstaller
|
|||||||
from devscripts.utils import read_version
|
from devscripts.utils import read_version
|
||||||
|
|
||||||
OS_NAME, MACHINE, ARCH = sys.platform, platform.machine().lower(), platform.architecture()[0][:2]
|
OS_NAME, MACHINE, ARCH = sys.platform, platform.machine().lower(), platform.architecture()[0][:2]
|
||||||
|
if OS_NAME == 'linux' and platform.libc_ver()[0] != 'glibc':
|
||||||
|
OS_NAME = 'musllinux'
|
||||||
if MACHINE in ('x86', 'x86_64', 'amd64', 'i386', 'i686'):
|
if MACHINE in ('x86', 'x86_64', 'amd64', 'i386', 'i686'):
|
||||||
MACHINE = 'x86' if ARCH == '32' else ''
|
MACHINE = 'x86' if ARCH == '32' else ''
|
||||||
|
|
||||||
@@ -127,7 +129,6 @@ def windows_set_version(exe, version):
|
|||||||
StringStruct('FileDescription', 'yt-dlp%s' % (MACHINE and f' ({MACHINE})')),
|
StringStruct('FileDescription', 'yt-dlp%s' % (MACHINE and f' ({MACHINE})')),
|
||||||
StringStruct('FileVersion', version),
|
StringStruct('FileVersion', version),
|
||||||
StringStruct('InternalName', f'yt-dlp{suffix}'),
|
StringStruct('InternalName', f'yt-dlp{suffix}'),
|
||||||
StringStruct('LegalCopyright', 'pukkandan.ytdlp@gmail.com | UNLICENSE'),
|
|
||||||
StringStruct('OriginalFilename', f'yt-dlp{suffix}.exe'),
|
StringStruct('OriginalFilename', f'yt-dlp{suffix}.exe'),
|
||||||
StringStruct('ProductName', f'yt-dlp{suffix}'),
|
StringStruct('ProductName', f'yt-dlp{suffix}'),
|
||||||
StringStruct(
|
StringStruct(
|
||||||
|
|||||||
@@ -287,11 +287,16 @@
|
|||||||
{
|
{
|
||||||
"action": "add",
|
"action": "add",
|
||||||
"when": "cc5a5caac5fbc0d605b52bde0778d6fd5f97b5ab",
|
"when": "cc5a5caac5fbc0d605b52bde0778d6fd5f97b5ab",
|
||||||
"short": "[priority] **darwin_legacy_exe builds are being discontinued**\nThis release's `yt-dlp_macos_legacy` binary will likely be the last one. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13857)"
|
"short": "[priority] **darwin_legacy_exe builds are being discontinued**\nThis release's `yt-dlp_macos_legacy` binary will likely be the last one. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13856)"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"action": "add",
|
"action": "add",
|
||||||
"when": "c76ce28e06c816eb5b261dfb6aff6e69dd9b7382",
|
"when": "c76ce28e06c816eb5b261dfb6aff6e69dd9b7382",
|
||||||
"short": "[priority] **linux_armv7l_exe builds are being discontinued**\nThis release's `yt-dlp_linux_armv7l` binary could be the last one. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13976)"
|
"short": "[priority] **linux_armv7l_exe builds are being discontinued**\nThis release's `yt-dlp_linux_armv7l` binary could be the last one. [Read more](https://github.com/yt-dlp/yt-dlp/issues/13976)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "add",
|
||||||
|
"when": "08d78996831bd8e1e3c2592d740c3def00bbf548",
|
||||||
|
"short": "[priority] **Several options have been deprecated**\nIn order to simplify the codebase and reduce maintenance burden, various options have been deprecated. Please remove them from your commands/configurations. [Read more](https://github.com/yt-dlp/yt-dlp/issues/14198)"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
316
devscripts/generate_third_party_licenses.py
Normal file
316
devscripts/generate_third_party_licenses.py
Normal file
@@ -0,0 +1,316 @@
|
|||||||
|
import requests
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
DEFAULT_OUTPUT = 'THIRD_PARTY_LICENSES.txt'
|
||||||
|
CACHE_LOCATION = '.license_cache'
|
||||||
|
HEADER = '''THIRD-PARTY LICENSES
|
||||||
|
|
||||||
|
This file aggregates license texts of third-party components included with the yt-dlp PyInstaller-bundled executables.
|
||||||
|
yt-dlp itself is licensed under the Unlicense (see LICENSE file).
|
||||||
|
Source code for bundled third-party components is available from the original projects.
|
||||||
|
If you cannot obtain it, the maintainers will provide it as per license obligation; maintainer emails are listed in pyproject.toml.'''
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class Dependency:
|
||||||
|
name: str
|
||||||
|
license_url: str
|
||||||
|
project_url: str = ''
|
||||||
|
license: str = ''
|
||||||
|
comment: str = ''
|
||||||
|
|
||||||
|
|
||||||
|
DEPENDENCIES: list[Dependency] = [
|
||||||
|
# Core runtime environment components
|
||||||
|
Dependency(
|
||||||
|
name='Python',
|
||||||
|
license='PSF-2.0',
|
||||||
|
license_url='https://raw.githubusercontent.com/python/cpython/refs/heads/main/LICENSE',
|
||||||
|
project_url='https://www.python.org/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='Microsoft Distributable Code',
|
||||||
|
license_url='https://raw.githubusercontent.com/python/cpython/refs/heads/main/PC/crtlicense.txt',
|
||||||
|
comment='Only included in Windows builds',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='bzip2',
|
||||||
|
license='bzip2-1.0.6',
|
||||||
|
license_url='https://gitlab.com/federicomenaquintero/bzip2/-/raw/master/COPYING',
|
||||||
|
project_url='https://sourceware.org/bzip2/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='libffi',
|
||||||
|
license='MIT',
|
||||||
|
license_url='https://raw.githubusercontent.com/libffi/libffi/refs/heads/master/LICENSE',
|
||||||
|
project_url='https://sourceware.org/libffi/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='OpenSSL 3.0+',
|
||||||
|
license='Apache-2.0',
|
||||||
|
license_url='https://raw.githubusercontent.com/openssl/openssl/refs/heads/master/LICENSE.txt',
|
||||||
|
project_url='https://www.openssl.org/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='SQLite',
|
||||||
|
license='Public Domain', # Technically does not need to be included
|
||||||
|
license_url='https://sqlite.org/src/raw/e108e1e69ae8e8a59e93c455654b8ac9356a11720d3345df2a4743e9590fb20d?at=LICENSE.md',
|
||||||
|
project_url='https://www.sqlite.org/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='liblzma',
|
||||||
|
license='0BSD', # Technically does not need to be included
|
||||||
|
license_url='https://raw.githubusercontent.com/tukaani-project/xz/refs/heads/master/COPYING',
|
||||||
|
project_url='https://tukaani.org/xz/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='mpdecimal',
|
||||||
|
license='BSD-2-Clause',
|
||||||
|
# No official repo URL
|
||||||
|
license_url='https://gist.githubusercontent.com/seproDev/9e5dbfc08af35c3f2463e64eb9b27161/raw/61f5a98bc1a4ad7d48b1c793fc3314d4d43c2ab1/mpdecimal_COPYRIGHT.txt',
|
||||||
|
project_url='https://www.bytereef.org/mpdecimal/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='zlib',
|
||||||
|
license='zlib',
|
||||||
|
license_url='https://raw.githubusercontent.com/madler/zlib/refs/heads/develop/LICENSE',
|
||||||
|
project_url='https://zlib.net/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='Expat',
|
||||||
|
license='MIT',
|
||||||
|
license_url='https://raw.githubusercontent.com/libexpat/libexpat/refs/heads/master/COPYING',
|
||||||
|
project_url='https://libexpat.github.io/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='ncurses',
|
||||||
|
license='X11-distribute-modifications-variant',
|
||||||
|
license_url='https://raw.githubusercontent.com/mirror/ncurses/refs/heads/master/COPYING',
|
||||||
|
comment='Only included in Linux/macOS builds',
|
||||||
|
project_url='https://invisible-island.net/ncurses/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='GNU Readline',
|
||||||
|
license='GPL-3.0-or-later',
|
||||||
|
license_url='https://tiswww.case.edu/php/chet/readline/COPYING',
|
||||||
|
comment='Only included in Linux builds',
|
||||||
|
project_url='https://www.gnu.org/software/readline/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='libstdc++',
|
||||||
|
license='GPL-3.0-with-GCC-exception',
|
||||||
|
license_url='https://raw.githubusercontent.com/gcc-mirror/gcc/refs/heads/master/COPYING.RUNTIME',
|
||||||
|
comment='Only included in Linux builds',
|
||||||
|
project_url='https://gcc.gnu.org/onlinedocs/libstdc++/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='libgcc',
|
||||||
|
license='GPL-3.0-with-GCC-exception',
|
||||||
|
license_url='https://raw.githubusercontent.com/gcc-mirror/gcc/refs/heads/master/COPYING.RUNTIME',
|
||||||
|
comment='Only included in Linux builds',
|
||||||
|
project_url='https://gcc.gnu.org/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='libuuid',
|
||||||
|
license='BSD-3-Clause',
|
||||||
|
license_url='https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/plain/lib/uuid/COPYING',
|
||||||
|
comment='Only included in Linux builds',
|
||||||
|
project_url='https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/uuid',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='libintl',
|
||||||
|
license='LGPL-2.1-or-later',
|
||||||
|
license_url='https://raw.githubusercontent.com/autotools-mirror/gettext/refs/heads/master/gettext-runtime/intl/COPYING.LIB',
|
||||||
|
comment='Only included in macOS builds',
|
||||||
|
project_url='https://www.gnu.org/software/gettext/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='libidn2',
|
||||||
|
license='LGPL-3.0-or-later',
|
||||||
|
license_url='https://gitlab.com/libidn/libidn2/-/raw/master/COPYING.LESSERv3',
|
||||||
|
comment='Only included in macOS builds',
|
||||||
|
project_url='https://www.gnu.org/software/libidn/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='libidn2 (Unicode character data files)',
|
||||||
|
license='Unicode-TOU AND Unicode-DFS-2016',
|
||||||
|
license_url='https://gitlab.com/libidn/libidn2/-/raw/master/COPYING.unicode',
|
||||||
|
comment='Only included in macOS builds',
|
||||||
|
project_url='https://www.gnu.org/software/libidn/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='libunistring',
|
||||||
|
license='LGPL-3.0-or-later',
|
||||||
|
license_url='https://gitweb.git.savannah.gnu.org/gitweb/?p=libunistring.git;a=blob_plain;f=COPYING.LIB;hb=HEAD',
|
||||||
|
comment='Only included in macOS builds',
|
||||||
|
project_url='https://www.gnu.org/software/libunistring/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='librtmp',
|
||||||
|
license='LGPL-2.1-or-later',
|
||||||
|
# No official repo URL
|
||||||
|
license_url='https://gist.githubusercontent.com/seproDev/31d8c691ccddebe37b8b379307cb232d/raw/053408e98547ea8c7d9ba3a80c965f33e163b881/librtmp_COPYING.txt',
|
||||||
|
comment='Only included in macOS builds',
|
||||||
|
project_url='https://rtmpdump.mplayerhq.hu/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='zstd',
|
||||||
|
license='BSD-3-Clause',
|
||||||
|
license_url='https://raw.githubusercontent.com/facebook/zstd/refs/heads/dev/LICENSE',
|
||||||
|
comment='Only included in macOS builds',
|
||||||
|
project_url='https://facebook.github.io/zstd/',
|
||||||
|
),
|
||||||
|
|
||||||
|
# Python packages
|
||||||
|
Dependency(
|
||||||
|
name='brotli',
|
||||||
|
license='MIT',
|
||||||
|
license_url='https://raw.githubusercontent.com/google/brotli/refs/heads/master/LICENSE',
|
||||||
|
project_url='https://brotli.org/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='curl_cffi',
|
||||||
|
license='MIT',
|
||||||
|
license_url='https://raw.githubusercontent.com/lexiforest/curl_cffi/refs/heads/main/LICENSE',
|
||||||
|
comment='Not included in `yt-dlp_x86` and `yt-dlp_musllinux_aarch64` builds',
|
||||||
|
project_url='https://curl-cffi.readthedocs.io/',
|
||||||
|
),
|
||||||
|
# Dependency of curl_cffi
|
||||||
|
Dependency(
|
||||||
|
name='curl-impersonate',
|
||||||
|
license='MIT',
|
||||||
|
license_url='https://raw.githubusercontent.com/lexiforest/curl-impersonate/refs/heads/main/LICENSE',
|
||||||
|
comment='Not included in `yt-dlp_x86` and `yt-dlp_musllinux_aarch64` builds',
|
||||||
|
project_url='https://github.com/lexiforest/curl-impersonate',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='cffi',
|
||||||
|
license='MIT-0', # Technically does not need to be included
|
||||||
|
license_url='https://raw.githubusercontent.com/python-cffi/cffi/refs/heads/main/LICENSE',
|
||||||
|
project_url='https://cffi.readthedocs.io/',
|
||||||
|
),
|
||||||
|
# Dependecy of cffi
|
||||||
|
Dependency(
|
||||||
|
name='pycparser',
|
||||||
|
license='BSD-3-Clause',
|
||||||
|
license_url='https://raw.githubusercontent.com/eliben/pycparser/refs/heads/main/LICENSE',
|
||||||
|
project_url='https://github.com/eliben/pycparser',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='mutagen',
|
||||||
|
license='GPL-2.0-or-later',
|
||||||
|
license_url='https://raw.githubusercontent.com/quodlibet/mutagen/refs/heads/main/COPYING',
|
||||||
|
project_url='https://mutagen.readthedocs.io/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='PyCryptodome',
|
||||||
|
license='Public Domain and BSD-2-Clause',
|
||||||
|
license_url='https://raw.githubusercontent.com/Legrandin/pycryptodome/refs/heads/master/LICENSE.rst',
|
||||||
|
project_url='https://www.pycryptodome.org/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='certifi',
|
||||||
|
license='MPL-2.0',
|
||||||
|
license_url='https://raw.githubusercontent.com/certifi/python-certifi/refs/heads/master/LICENSE',
|
||||||
|
project_url='https://github.com/certifi/python-certifi',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='requests',
|
||||||
|
license='Apache-2.0',
|
||||||
|
license_url='https://raw.githubusercontent.com/psf/requests/refs/heads/main/LICENSE',
|
||||||
|
project_url='https://requests.readthedocs.io/',
|
||||||
|
),
|
||||||
|
# Dependency of requests
|
||||||
|
Dependency(
|
||||||
|
name='charset-normalizer',
|
||||||
|
license='MIT',
|
||||||
|
license_url='https://raw.githubusercontent.com/jawah/charset_normalizer/refs/heads/master/LICENSE',
|
||||||
|
project_url='https://charset-normalizer.readthedocs.io/',
|
||||||
|
),
|
||||||
|
# Dependency of requests
|
||||||
|
Dependency(
|
||||||
|
name='idna',
|
||||||
|
license='BSD-3-Clause',
|
||||||
|
license_url='https://raw.githubusercontent.com/kjd/idna/refs/heads/master/LICENSE.md',
|
||||||
|
project_url='https://github.com/kjd/idna',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='urllib3',
|
||||||
|
license='MIT',
|
||||||
|
license_url='https://raw.githubusercontent.com/urllib3/urllib3/refs/heads/main/LICENSE.txt',
|
||||||
|
project_url='https://urllib3.readthedocs.io/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='SecretStorage',
|
||||||
|
license='BSD-3-Clause',
|
||||||
|
license_url='https://raw.githubusercontent.com/mitya57/secretstorage/refs/heads/master/LICENSE',
|
||||||
|
comment='Only included in Linux builds',
|
||||||
|
project_url='https://secretstorage.readthedocs.io/',
|
||||||
|
),
|
||||||
|
# Dependency of SecretStorage
|
||||||
|
Dependency(
|
||||||
|
name='cryptography',
|
||||||
|
license='Apache-2.0', # Also available as BSD-3-Clause
|
||||||
|
license_url='https://raw.githubusercontent.com/pyca/cryptography/refs/heads/main/LICENSE.APACHE',
|
||||||
|
comment='Only included in Linux builds',
|
||||||
|
project_url='https://cryptography.io/',
|
||||||
|
),
|
||||||
|
# Dependency of SecretStorage
|
||||||
|
Dependency(
|
||||||
|
name='Jeepney',
|
||||||
|
license='MIT',
|
||||||
|
license_url='https://gitlab.com/takluyver/jeepney/-/raw/master/LICENSE',
|
||||||
|
comment='Only included in Linux builds',
|
||||||
|
project_url='https://jeepney.readthedocs.io/',
|
||||||
|
),
|
||||||
|
Dependency(
|
||||||
|
name='websockets',
|
||||||
|
license='BSD-3-Clause',
|
||||||
|
license_url='https://raw.githubusercontent.com/python-websockets/websockets/refs/heads/main/LICENSE',
|
||||||
|
project_url='https://websockets.readthedocs.io/',
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_text(dep: Dependency) -> str:
|
||||||
|
cache_dir = Path(CACHE_LOCATION)
|
||||||
|
cache_dir.mkdir(exist_ok=True)
|
||||||
|
url_hash = hashlib.sha256(dep.license_url.encode('utf-8')).hexdigest()
|
||||||
|
cache_file = cache_dir / f'{url_hash}.txt'
|
||||||
|
|
||||||
|
if cache_file.exists():
|
||||||
|
return cache_file.read_text()
|
||||||
|
|
||||||
|
# UA needed since some domains block requests default UA
|
||||||
|
req = requests.get(dep.license_url, headers={'User-Agent': 'yt-dlp license fetcher'})
|
||||||
|
req.raise_for_status()
|
||||||
|
text = req.text
|
||||||
|
cache_file.write_text(text)
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def build_output() -> str:
|
||||||
|
lines = [HEADER]
|
||||||
|
for d in DEPENDENCIES:
|
||||||
|
lines.append('\n')
|
||||||
|
lines.append('-' * 80)
|
||||||
|
header = f'{d.name}'
|
||||||
|
if d.license:
|
||||||
|
header += f' | {d.license}'
|
||||||
|
if d.comment:
|
||||||
|
header += f'\nNote: {d.comment}'
|
||||||
|
if d.project_url:
|
||||||
|
header += f'\nURL: {d.project_url}'
|
||||||
|
lines.append(header)
|
||||||
|
lines.append('-' * 80)
|
||||||
|
|
||||||
|
text = fetch_text(d)
|
||||||
|
lines.append(text.strip('\n') + '\n')
|
||||||
|
return '\n'.join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
content = build_output()
|
||||||
|
Path(DEFAULT_OUTPUT).write_text(content)
|
||||||
@@ -8,7 +8,7 @@ def main():
|
|||||||
return # This is unused in yt-dlp
|
return # This is unused in yt-dlp
|
||||||
|
|
||||||
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
||||||
options, args = parser.parse_args()
|
_, args = parser.parse_args()
|
||||||
if len(args) != 2:
|
if len(args) != 2:
|
||||||
parser.error('Expected an input and an output filename')
|
parser.error('Expected an input and an output filename')
|
||||||
|
|
||||||
|
|||||||
157
devscripts/setup_variables.py
Normal file
157
devscripts/setup_variables.py
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
import datetime as dt
|
||||||
|
import json
|
||||||
|
|
||||||
|
from devscripts.utils import calculate_version
|
||||||
|
|
||||||
|
|
||||||
|
STABLE_REPOSITORY = 'yt-dlp/yt-dlp'
|
||||||
|
|
||||||
|
|
||||||
|
def setup_variables(environment):
|
||||||
|
"""
|
||||||
|
`environment` must contain these keys:
|
||||||
|
REPOSITORY, INPUTS, PROCESSED,
|
||||||
|
PUSH_VERSION_COMMIT, PYPI_PROJECT,
|
||||||
|
SOURCE_PYPI_PROJECT, SOURCE_PYPI_SUFFIX,
|
||||||
|
TARGET_PYPI_PROJECT, TARGET_PYPI_SUFFIX,
|
||||||
|
SOURCE_ARCHIVE_REPO, TARGET_ARCHIVE_REPO,
|
||||||
|
HAS_SOURCE_ARCHIVE_REPO_TOKEN,
|
||||||
|
HAS_TARGET_ARCHIVE_REPO_TOKEN,
|
||||||
|
HAS_ARCHIVE_REPO_TOKEN
|
||||||
|
|
||||||
|
`INPUTS` must contain these keys:
|
||||||
|
prerelease
|
||||||
|
|
||||||
|
`PROCESSED` must contain these keys:
|
||||||
|
source_repo, source_tag,
|
||||||
|
target_repo, target_tag
|
||||||
|
"""
|
||||||
|
REPOSITORY = environment['REPOSITORY']
|
||||||
|
INPUTS = json.loads(environment['INPUTS'])
|
||||||
|
PROCESSED = json.loads(environment['PROCESSED'])
|
||||||
|
|
||||||
|
source_channel = None
|
||||||
|
does_not_have_needed_token = False
|
||||||
|
target_repo_token = None
|
||||||
|
pypi_project = None
|
||||||
|
pypi_suffix = None
|
||||||
|
|
||||||
|
source_repo = PROCESSED['source_repo']
|
||||||
|
source_tag = PROCESSED['source_tag']
|
||||||
|
if source_repo == 'stable':
|
||||||
|
source_repo = STABLE_REPOSITORY
|
||||||
|
if not source_repo:
|
||||||
|
source_repo = REPOSITORY
|
||||||
|
elif environment['SOURCE_ARCHIVE_REPO']:
|
||||||
|
source_channel = environment['SOURCE_ARCHIVE_REPO']
|
||||||
|
elif not source_tag and '/' not in source_repo:
|
||||||
|
source_tag = source_repo
|
||||||
|
source_repo = REPOSITORY
|
||||||
|
|
||||||
|
resolved_source = source_repo
|
||||||
|
if source_tag:
|
||||||
|
resolved_source = f'{resolved_source}@{source_tag}'
|
||||||
|
elif source_repo == STABLE_REPOSITORY:
|
||||||
|
resolved_source = 'stable'
|
||||||
|
|
||||||
|
revision = None
|
||||||
|
if INPUTS['prerelease'] or not environment['PUSH_VERSION_COMMIT']:
|
||||||
|
revision = dt.datetime.now(tz=dt.timezone.utc).strftime('%H%M%S')
|
||||||
|
|
||||||
|
version = calculate_version(INPUTS.get('version') or revision)
|
||||||
|
|
||||||
|
target_repo = PROCESSED['target_repo']
|
||||||
|
target_tag = PROCESSED['target_tag']
|
||||||
|
if target_repo:
|
||||||
|
if target_repo == 'stable':
|
||||||
|
target_repo = STABLE_REPOSITORY
|
||||||
|
if not target_tag:
|
||||||
|
if target_repo == STABLE_REPOSITORY:
|
||||||
|
target_tag = version
|
||||||
|
elif environment['TARGET_ARCHIVE_REPO']:
|
||||||
|
target_tag = source_tag or version
|
||||||
|
else:
|
||||||
|
target_tag = target_repo
|
||||||
|
target_repo = REPOSITORY
|
||||||
|
if target_repo != REPOSITORY:
|
||||||
|
target_repo = environment['TARGET_ARCHIVE_REPO']
|
||||||
|
target_repo_token = f'{PROCESSED["target_repo"].upper()}_ARCHIVE_REPO_TOKEN'
|
||||||
|
if not json.loads(environment['HAS_TARGET_ARCHIVE_REPO_TOKEN']):
|
||||||
|
does_not_have_needed_token = True
|
||||||
|
pypi_project = environment['TARGET_PYPI_PROJECT'] or None
|
||||||
|
pypi_suffix = environment['TARGET_PYPI_SUFFIX'] or None
|
||||||
|
else:
|
||||||
|
target_tag = source_tag or version
|
||||||
|
if source_channel:
|
||||||
|
target_repo = source_channel
|
||||||
|
target_repo_token = f'{PROCESSED["source_repo"].upper()}_ARCHIVE_REPO_TOKEN'
|
||||||
|
if not json.loads(environment['HAS_SOURCE_ARCHIVE_REPO_TOKEN']):
|
||||||
|
does_not_have_needed_token = True
|
||||||
|
pypi_project = environment['SOURCE_PYPI_PROJECT'] or None
|
||||||
|
pypi_suffix = environment['SOURCE_PYPI_SUFFIX'] or None
|
||||||
|
else:
|
||||||
|
target_repo = REPOSITORY
|
||||||
|
|
||||||
|
if does_not_have_needed_token:
|
||||||
|
if not json.loads(environment['HAS_ARCHIVE_REPO_TOKEN']):
|
||||||
|
print(f'::error::Repository access secret {target_repo_token} not found')
|
||||||
|
return None
|
||||||
|
target_repo_token = 'ARCHIVE_REPO_TOKEN'
|
||||||
|
|
||||||
|
if target_repo == REPOSITORY and not INPUTS['prerelease']:
|
||||||
|
pypi_project = environment['PYPI_PROJECT'] or None
|
||||||
|
|
||||||
|
return {
|
||||||
|
'channel': resolved_source,
|
||||||
|
'version': version,
|
||||||
|
'target_repo': target_repo,
|
||||||
|
'target_repo_token': target_repo_token,
|
||||||
|
'target_tag': target_tag,
|
||||||
|
'pypi_project': pypi_project,
|
||||||
|
'pypi_suffix': pypi_suffix,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def process_inputs(inputs):
|
||||||
|
outputs = {}
|
||||||
|
for key in ('source', 'target'):
|
||||||
|
repo, _, tag = inputs.get(key, '').partition('@')
|
||||||
|
outputs[f'{key}_repo'] = repo
|
||||||
|
outputs[f'{key}_tag'] = tag
|
||||||
|
return outputs
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if not os.getenv('GITHUB_OUTPUT'):
|
||||||
|
print('This script is only intended for use with GitHub Actions', file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if 'process_inputs' in sys.argv:
|
||||||
|
inputs = json.loads(os.environ['INPUTS'])
|
||||||
|
print('::group::Inputs')
|
||||||
|
print(json.dumps(inputs, indent=2))
|
||||||
|
print('::endgroup::')
|
||||||
|
outputs = process_inputs(inputs)
|
||||||
|
print('::group::Processed')
|
||||||
|
print(json.dumps(outputs, indent=2))
|
||||||
|
print('::endgroup::')
|
||||||
|
with open(os.environ['GITHUB_OUTPUT'], 'a') as f:
|
||||||
|
f.write('\n'.join(f'{key}={value}' for key, value in outputs.items()))
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
outputs = setup_variables(dict(os.environ))
|
||||||
|
if not outputs:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
print('::group::Output variables')
|
||||||
|
print(json.dumps(outputs, indent=2))
|
||||||
|
print('::endgroup::')
|
||||||
|
|
||||||
|
with open(os.environ['GITHUB_OUTPUT'], 'a') as f:
|
||||||
|
f.write('\n'.join(f'{key}={value or ""}' for key, value in outputs.items()))
|
||||||
324
devscripts/setup_variables_tests.py
Normal file
324
devscripts/setup_variables_tests.py
Normal file
@@ -0,0 +1,324 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
import datetime as dt
|
||||||
|
import json
|
||||||
|
|
||||||
|
from devscripts.setup_variables import STABLE_REPOSITORY, process_inputs, setup_variables
|
||||||
|
from devscripts.utils import calculate_version
|
||||||
|
|
||||||
|
|
||||||
|
def _test(github_repository, note, repo_vars, repo_secrets, inputs, expected=None, ignore_revision=False):
|
||||||
|
inp = inputs.copy()
|
||||||
|
inp.setdefault('linux_armv7l', True)
|
||||||
|
inp.setdefault('prerelease', False)
|
||||||
|
processed = process_inputs(inp)
|
||||||
|
source_repo = processed['source_repo'].upper()
|
||||||
|
target_repo = processed['target_repo'].upper()
|
||||||
|
variables = {k.upper(): v for k, v in repo_vars.items()}
|
||||||
|
secrets = {k.upper(): v for k, v in repo_secrets.items()}
|
||||||
|
|
||||||
|
env = {
|
||||||
|
# Keep this in sync with prepare.setup_variables in release.yml
|
||||||
|
'INPUTS': json.dumps(inp),
|
||||||
|
'PROCESSED': json.dumps(processed),
|
||||||
|
'REPOSITORY': github_repository,
|
||||||
|
'PUSH_VERSION_COMMIT': variables.get('PUSH_VERSION_COMMIT') or '',
|
||||||
|
'PYPI_PROJECT': variables.get('PYPI_PROJECT') or '',
|
||||||
|
'SOURCE_PYPI_PROJECT': variables.get(f'{source_repo}_PYPI_PROJECT') or '',
|
||||||
|
'SOURCE_PYPI_SUFFIX': variables.get(f'{source_repo}_PYPI_SUFFIX') or '',
|
||||||
|
'TARGET_PYPI_PROJECT': variables.get(f'{target_repo}_PYPI_PROJECT') or '',
|
||||||
|
'TARGET_PYPI_SUFFIX': variables.get(f'{target_repo}_PYPI_SUFFIX') or '',
|
||||||
|
'SOURCE_ARCHIVE_REPO': variables.get(f'{source_repo}_ARCHIVE_REPO') or '',
|
||||||
|
'TARGET_ARCHIVE_REPO': variables.get(f'{target_repo}_ARCHIVE_REPO') or '',
|
||||||
|
'HAS_SOURCE_ARCHIVE_REPO_TOKEN': json.dumps(bool(secrets.get(f'{source_repo}_ARCHIVE_REPO_TOKEN'))),
|
||||||
|
'HAS_TARGET_ARCHIVE_REPO_TOKEN': json.dumps(bool(secrets.get(f'{target_repo}_ARCHIVE_REPO_TOKEN'))),
|
||||||
|
'HAS_ARCHIVE_REPO_TOKEN': json.dumps(bool(secrets.get('ARCHIVE_REPO_TOKEN'))),
|
||||||
|
}
|
||||||
|
|
||||||
|
result = setup_variables(env)
|
||||||
|
if not expected:
|
||||||
|
print(' {\n' + '\n'.join(f' {k!r}: {v!r},' for k, v in result.items()) + '\n }')
|
||||||
|
return
|
||||||
|
|
||||||
|
exp = expected.copy()
|
||||||
|
if ignore_revision:
|
||||||
|
assert len(result['version']) == len(exp['version']), f'revision missing: {github_repository} {note}'
|
||||||
|
version_is_tag = result['version'] == result['target_tag']
|
||||||
|
for dct in (result, exp):
|
||||||
|
dct['version'] = '.'.join(dct['version'].split('.')[:3])
|
||||||
|
if version_is_tag:
|
||||||
|
dct['target_tag'] = dct['version']
|
||||||
|
assert result == exp, f'unexpected result: {github_repository} {note}'
|
||||||
|
|
||||||
|
|
||||||
|
def test_setup_variables():
|
||||||
|
DEFAULT_VERSION_WITH_REVISION = dt.datetime.now(tz=dt.timezone.utc).strftime('%Y.%m.%d.%H%M%S')
|
||||||
|
DEFAULT_VERSION = calculate_version()
|
||||||
|
BASE_REPO_VARS = {
|
||||||
|
'MASTER_ARCHIVE_REPO': 'yt-dlp/yt-dlp-master-builds',
|
||||||
|
'NIGHTLY_ARCHIVE_REPO': 'yt-dlp/yt-dlp-nightly-builds',
|
||||||
|
'NIGHTLY_PYPI_PROJECT': 'yt-dlp',
|
||||||
|
'NIGHTLY_PYPI_SUFFIX': 'dev',
|
||||||
|
'PUSH_VERSION_COMMIT': '1',
|
||||||
|
'PYPI_PROJECT': 'yt-dlp',
|
||||||
|
}
|
||||||
|
BASE_REPO_SECRETS = {
|
||||||
|
'ARCHIVE_REPO_TOKEN': '1',
|
||||||
|
}
|
||||||
|
FORK_REPOSITORY = 'fork/yt-dlp'
|
||||||
|
FORK_ORG = FORK_REPOSITORY.partition('/')[0]
|
||||||
|
|
||||||
|
_test(
|
||||||
|
STABLE_REPOSITORY, 'official vars/secrets, stable',
|
||||||
|
BASE_REPO_VARS, BASE_REPO_SECRETS, {}, {
|
||||||
|
'channel': 'stable',
|
||||||
|
'version': DEFAULT_VERSION,
|
||||||
|
'target_repo': STABLE_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': DEFAULT_VERSION,
|
||||||
|
'pypi_project': 'yt-dlp',
|
||||||
|
'pypi_suffix': None,
|
||||||
|
})
|
||||||
|
_test(
|
||||||
|
STABLE_REPOSITORY, 'official vars/secrets, nightly (w/o target)',
|
||||||
|
BASE_REPO_VARS, BASE_REPO_SECRETS, {
|
||||||
|
'source': 'nightly',
|
||||||
|
'prerelease': True,
|
||||||
|
}, {
|
||||||
|
'channel': 'nightly',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': 'yt-dlp/yt-dlp-nightly-builds',
|
||||||
|
'target_repo_token': 'ARCHIVE_REPO_TOKEN',
|
||||||
|
'target_tag': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'pypi_project': 'yt-dlp',
|
||||||
|
'pypi_suffix': 'dev',
|
||||||
|
}, ignore_revision=True)
|
||||||
|
_test(
|
||||||
|
STABLE_REPOSITORY, 'official vars/secrets, nightly',
|
||||||
|
BASE_REPO_VARS, BASE_REPO_SECRETS, {
|
||||||
|
'source': 'nightly',
|
||||||
|
'target': 'nightly',
|
||||||
|
'prerelease': True,
|
||||||
|
}, {
|
||||||
|
'channel': 'nightly',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': 'yt-dlp/yt-dlp-nightly-builds',
|
||||||
|
'target_repo_token': 'ARCHIVE_REPO_TOKEN',
|
||||||
|
'target_tag': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'pypi_project': 'yt-dlp',
|
||||||
|
'pypi_suffix': 'dev',
|
||||||
|
}, ignore_revision=True)
|
||||||
|
_test(
|
||||||
|
STABLE_REPOSITORY, 'official vars/secrets, master (w/o target)',
|
||||||
|
BASE_REPO_VARS, BASE_REPO_SECRETS, {
|
||||||
|
'source': 'master',
|
||||||
|
'prerelease': True,
|
||||||
|
}, {
|
||||||
|
'channel': 'master',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': 'yt-dlp/yt-dlp-master-builds',
|
||||||
|
'target_repo_token': 'ARCHIVE_REPO_TOKEN',
|
||||||
|
'target_tag': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
|
_test(
|
||||||
|
STABLE_REPOSITORY, 'official vars/secrets, master',
|
||||||
|
BASE_REPO_VARS, BASE_REPO_SECRETS, {
|
||||||
|
'source': 'master',
|
||||||
|
'target': 'master',
|
||||||
|
'prerelease': True,
|
||||||
|
}, {
|
||||||
|
'channel': 'master',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': 'yt-dlp/yt-dlp-master-builds',
|
||||||
|
'target_repo_token': 'ARCHIVE_REPO_TOKEN',
|
||||||
|
'target_tag': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
|
_test(
|
||||||
|
STABLE_REPOSITORY, 'official vars/secrets, special tag, updates to stable',
|
||||||
|
BASE_REPO_VARS, BASE_REPO_SECRETS, {
|
||||||
|
'target': f'{STABLE_REPOSITORY}@experimental',
|
||||||
|
'prerelease': True,
|
||||||
|
}, {
|
||||||
|
'channel': 'stable',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': STABLE_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': 'experimental',
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
|
_test(
|
||||||
|
STABLE_REPOSITORY, 'official vars/secrets, special tag, "stable" as target repo',
|
||||||
|
BASE_REPO_VARS, BASE_REPO_SECRETS, {
|
||||||
|
'target': 'stable@experimental',
|
||||||
|
'prerelease': True,
|
||||||
|
}, {
|
||||||
|
'channel': 'stable',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': STABLE_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': 'experimental',
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
|
|
||||||
|
_test(
|
||||||
|
FORK_REPOSITORY, 'fork w/o vars/secrets, stable',
|
||||||
|
{}, {}, {}, {
|
||||||
|
'channel': FORK_REPOSITORY,
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': FORK_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
|
_test(
|
||||||
|
FORK_REPOSITORY, 'fork w/o vars/secrets, prerelease',
|
||||||
|
{}, {}, {'prerelease': True}, {
|
||||||
|
'channel': FORK_REPOSITORY,
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': FORK_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
|
_test(
|
||||||
|
FORK_REPOSITORY, 'fork w/o vars/secrets, nightly',
|
||||||
|
{}, {}, {
|
||||||
|
'prerelease': True,
|
||||||
|
'source': 'nightly',
|
||||||
|
'target': 'nightly',
|
||||||
|
}, {
|
||||||
|
'channel': f'{FORK_REPOSITORY}@nightly',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': FORK_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': 'nightly',
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
|
_test(
|
||||||
|
FORK_REPOSITORY, 'fork w/o vars/secrets, master',
|
||||||
|
{}, {}, {
|
||||||
|
'prerelease': True,
|
||||||
|
'source': 'master',
|
||||||
|
'target': 'master',
|
||||||
|
}, {
|
||||||
|
'channel': f'{FORK_REPOSITORY}@master',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': FORK_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': 'master',
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
|
_test(
|
||||||
|
FORK_REPOSITORY, 'fork w/o vars/secrets, revision',
|
||||||
|
{}, {}, {'version': '123'}, {
|
||||||
|
'channel': FORK_REPOSITORY,
|
||||||
|
'version': f'{DEFAULT_VERSION[:10]}.123',
|
||||||
|
'target_repo': FORK_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': f'{DEFAULT_VERSION[:10]}.123',
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
})
|
||||||
|
|
||||||
|
_test(
|
||||||
|
FORK_REPOSITORY, 'fork w/ PUSH_VERSION_COMMIT, stable',
|
||||||
|
{'PUSH_VERSION_COMMIT': '1'}, {}, {}, {
|
||||||
|
'channel': FORK_REPOSITORY,
|
||||||
|
'version': DEFAULT_VERSION,
|
||||||
|
'target_repo': FORK_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': DEFAULT_VERSION,
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
})
|
||||||
|
_test(
|
||||||
|
FORK_REPOSITORY, 'fork w/ PUSH_VERSION_COMMIT, prerelease',
|
||||||
|
{'PUSH_VERSION_COMMIT': '1'}, {}, {'prerelease': True}, {
|
||||||
|
'channel': FORK_REPOSITORY,
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': FORK_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
|
|
||||||
|
_test(
|
||||||
|
FORK_REPOSITORY, 'fork w/NIGHTLY_ARCHIVE_REPO_TOKEN, nightly', {
|
||||||
|
'NIGHTLY_ARCHIVE_REPO': f'{FORK_ORG}/yt-dlp-nightly-builds',
|
||||||
|
'PYPI_PROJECT': 'yt-dlp-test',
|
||||||
|
}, {
|
||||||
|
'NIGHTLY_ARCHIVE_REPO_TOKEN': '1',
|
||||||
|
}, {
|
||||||
|
'source': f'{FORK_ORG}/yt-dlp-nightly-builds',
|
||||||
|
'target': 'nightly',
|
||||||
|
'prerelease': True,
|
||||||
|
}, {
|
||||||
|
'channel': f'{FORK_ORG}/yt-dlp-nightly-builds',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': f'{FORK_ORG}/yt-dlp-nightly-builds',
|
||||||
|
'target_repo_token': 'NIGHTLY_ARCHIVE_REPO_TOKEN',
|
||||||
|
'target_tag': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
|
_test(
|
||||||
|
FORK_REPOSITORY, 'fork w/MASTER_ARCHIVE_REPO_TOKEN, master', {
|
||||||
|
'MASTER_ARCHIVE_REPO': f'{FORK_ORG}/yt-dlp-master-builds',
|
||||||
|
'MASTER_PYPI_PROJECT': 'yt-dlp-test',
|
||||||
|
'MASTER_PYPI_SUFFIX': 'dev',
|
||||||
|
}, {
|
||||||
|
'MASTER_ARCHIVE_REPO_TOKEN': '1',
|
||||||
|
}, {
|
||||||
|
'source': f'{FORK_ORG}/yt-dlp-master-builds',
|
||||||
|
'target': 'master',
|
||||||
|
'prerelease': True,
|
||||||
|
}, {
|
||||||
|
'channel': f'{FORK_ORG}/yt-dlp-master-builds',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': f'{FORK_ORG}/yt-dlp-master-builds',
|
||||||
|
'target_repo_token': 'MASTER_ARCHIVE_REPO_TOKEN',
|
||||||
|
'target_tag': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'pypi_project': 'yt-dlp-test',
|
||||||
|
'pypi_suffix': 'dev',
|
||||||
|
}, ignore_revision=True)
|
||||||
|
|
||||||
|
_test(
|
||||||
|
FORK_REPOSITORY, 'fork, non-numeric tag',
|
||||||
|
{}, {}, {'source': 'experimental'}, {
|
||||||
|
'channel': f'{FORK_REPOSITORY}@experimental',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': FORK_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': 'experimental',
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
|
_test(
|
||||||
|
FORK_REPOSITORY, 'fork, non-numeric tag, updates to stable',
|
||||||
|
{}, {}, {
|
||||||
|
'prerelease': True,
|
||||||
|
'source': 'stable',
|
||||||
|
'target': 'experimental',
|
||||||
|
}, {
|
||||||
|
'channel': 'stable',
|
||||||
|
'version': DEFAULT_VERSION_WITH_REVISION,
|
||||||
|
'target_repo': FORK_REPOSITORY,
|
||||||
|
'target_repo_token': None,
|
||||||
|
'target_tag': 'experimental',
|
||||||
|
'pypi_project': None,
|
||||||
|
'pypi_suffix': None,
|
||||||
|
}, ignore_revision=True)
|
||||||
@@ -9,24 +9,9 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import contextlib
|
import contextlib
|
||||||
import datetime as dt
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from devscripts.utils import read_version, run_process, write_file
|
from devscripts.utils import calculate_version, run_process, write_file
|
||||||
|
|
||||||
|
|
||||||
def get_new_version(version, revision):
|
|
||||||
if not version:
|
|
||||||
version = dt.datetime.now(dt.timezone.utc).strftime('%Y.%m.%d')
|
|
||||||
|
|
||||||
if revision:
|
|
||||||
assert revision.isdecimal(), 'Revision must be a number'
|
|
||||||
else:
|
|
||||||
old_version = read_version().split('.')
|
|
||||||
if version.split('.') == old_version[:3]:
|
|
||||||
revision = str(int(([*old_version, 0])[3]) + 1)
|
|
||||||
|
|
||||||
return f'{version}.{revision}' if revision else version
|
|
||||||
|
|
||||||
|
|
||||||
def get_git_head():
|
def get_git_head():
|
||||||
@@ -72,9 +57,7 @@ if __name__ == '__main__':
|
|||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
git_head = get_git_head()
|
git_head = get_git_head()
|
||||||
version = (
|
version = calculate_version(args.version)
|
||||||
args.version if args.version and '.' in args.version
|
|
||||||
else get_new_version(None, args.version))
|
|
||||||
write_file(args.output, VERSION_TEMPLATE.format(
|
write_file(args.output, VERSION_TEMPLATE.format(
|
||||||
version=version, git_head=git_head, channel=args.channel, origin=args.origin,
|
version=version, git_head=git_head, channel=args.channel, origin=args.origin,
|
||||||
package_version=f'{version}{args.suffix}'))
|
package_version=f'{version}{args.suffix}'))
|
||||||
|
|||||||
@@ -20,7 +20,9 @@ if __name__ == '__main__':
|
|||||||
'--changelog-path', type=Path, default=Path(__file__).parent.parent / 'Changelog.md',
|
'--changelog-path', type=Path, default=Path(__file__).parent.parent / 'Changelog.md',
|
||||||
help='path to the Changelog file')
|
help='path to the Changelog file')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
new_entry = create_changelog(args)
|
|
||||||
|
|
||||||
header, sep, changelog = read_file(args.changelog_path).partition('\n### ')
|
header, sep, changelog = read_file(args.changelog_path).partition('\n### ')
|
||||||
write_file(args.changelog_path, f'{header}{sep}{read_version()}\n{new_entry}\n{sep}{changelog}')
|
current_version = read_version()
|
||||||
|
if current_version != changelog.splitlines()[0]:
|
||||||
|
new_entry = create_changelog(args)
|
||||||
|
write_file(args.changelog_path, f'{header}{sep}{current_version}\n{new_entry}\n{sep}{changelog}')
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
import argparse
|
import argparse
|
||||||
|
import datetime as dt
|
||||||
import functools
|
import functools
|
||||||
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
@@ -20,6 +22,23 @@ def read_version(fname='yt_dlp/version.py', varname='__version__'):
|
|||||||
return items[varname]
|
return items[varname]
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_version(version=None, fname='yt_dlp/version.py'):
|
||||||
|
if version and '.' in version:
|
||||||
|
return version
|
||||||
|
|
||||||
|
revision = version
|
||||||
|
version = dt.datetime.now(dt.timezone.utc).strftime('%Y.%m.%d')
|
||||||
|
|
||||||
|
if revision:
|
||||||
|
assert re.fullmatch(r'[0-9]+', revision), 'Revision must be numeric'
|
||||||
|
else:
|
||||||
|
old_version = read_version(fname=fname).split('.')
|
||||||
|
if version.split('.') == old_version[:3]:
|
||||||
|
revision = str(int(([*old_version, 0])[3]) + 1)
|
||||||
|
|
||||||
|
return f'{version}.{revision}' if revision else version
|
||||||
|
|
||||||
|
|
||||||
def get_filename_args(has_infile=False, default_outfile=None):
|
def get_filename_args(has_infile=False, default_outfile=None):
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
if has_infile:
|
if has_infile:
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[build-system]
|
[build-system]
|
||||||
requires = ["hatchling"]
|
requires = ["hatchling>=1.27.0"]
|
||||||
build-backend = "hatchling.build"
|
build-backend = "hatchling.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
@@ -22,7 +22,8 @@ keywords = [
|
|||||||
"sponsorblock",
|
"sponsorblock",
|
||||||
"yt-dlp",
|
"yt-dlp",
|
||||||
]
|
]
|
||||||
license = {file = "LICENSE"}
|
license = "Unlicense"
|
||||||
|
license-files = ["LICENSE"]
|
||||||
classifiers = [
|
classifiers = [
|
||||||
"Topic :: Multimedia :: Video",
|
"Topic :: Multimedia :: Video",
|
||||||
"Development Status :: 5 - Production/Stable",
|
"Development Status :: 5 - Production/Stable",
|
||||||
@@ -34,10 +35,10 @@ classifiers = [
|
|||||||
"Programming Language :: Python :: 3.11",
|
"Programming Language :: Python :: 3.11",
|
||||||
"Programming Language :: Python :: 3.12",
|
"Programming Language :: Python :: 3.12",
|
||||||
"Programming Language :: Python :: 3.13",
|
"Programming Language :: Python :: 3.13",
|
||||||
|
"Programming Language :: Python :: 3.14",
|
||||||
"Programming Language :: Python :: Implementation",
|
"Programming Language :: Python :: Implementation",
|
||||||
"Programming Language :: Python :: Implementation :: CPython",
|
"Programming Language :: Python :: Implementation :: CPython",
|
||||||
"Programming Language :: Python :: Implementation :: PyPy",
|
"Programming Language :: Python :: Implementation :: PyPy",
|
||||||
"License :: OSI Approved :: The Unlicense (Unlicense)",
|
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
]
|
]
|
||||||
dynamic = ["version"]
|
dynamic = ["version"]
|
||||||
@@ -63,7 +64,7 @@ secretstorage = [
|
|||||||
]
|
]
|
||||||
build = [
|
build = [
|
||||||
"build",
|
"build",
|
||||||
"hatchling",
|
"hatchling>=1.27.0",
|
||||||
"pip",
|
"pip",
|
||||||
"setuptools>=71.0.2,<81", # See https://github.com/pyinstaller/pyinstaller/issues/9149
|
"setuptools>=71.0.2,<81", # See https://github.com/pyinstaller/pyinstaller/issues/9149
|
||||||
"wheel",
|
"wheel",
|
||||||
@@ -75,7 +76,7 @@ dev = [
|
|||||||
]
|
]
|
||||||
static-analysis = [
|
static-analysis = [
|
||||||
"autopep8~=2.0",
|
"autopep8~=2.0",
|
||||||
"ruff~=0.12.0",
|
"ruff~=0.13.0",
|
||||||
]
|
]
|
||||||
test = [
|
test = [
|
||||||
"pytest~=8.1",
|
"pytest~=8.1",
|
||||||
@@ -107,7 +108,6 @@ include = [
|
|||||||
"/LICENSE", # included as license
|
"/LICENSE", # included as license
|
||||||
"/pyproject.toml", # included by default
|
"/pyproject.toml", # included by default
|
||||||
"/README.md", # included as readme
|
"/README.md", # included as readme
|
||||||
"/setup.cfg",
|
|
||||||
"/supportedsites.md",
|
"/supportedsites.md",
|
||||||
]
|
]
|
||||||
artifacts = [
|
artifacts = [
|
||||||
@@ -173,7 +173,8 @@ python = [
|
|||||||
"3.11",
|
"3.11",
|
||||||
"3.12",
|
"3.12",
|
||||||
"3.13",
|
"3.13",
|
||||||
"pypy3.10",
|
"3.14",
|
||||||
|
"pypy3.11",
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.ruff]
|
[tool.ruff]
|
||||||
@@ -315,6 +316,7 @@ banned-from = [
|
|||||||
"yt_dlp.utils.error_to_compat_str".msg = "Use `str` instead."
|
"yt_dlp.utils.error_to_compat_str".msg = "Use `str` instead."
|
||||||
"yt_dlp.utils.bytes_to_intlist".msg = "Use `list` instead."
|
"yt_dlp.utils.bytes_to_intlist".msg = "Use `list` instead."
|
||||||
"yt_dlp.utils.intlist_to_bytes".msg = "Use `bytes` instead."
|
"yt_dlp.utils.intlist_to_bytes".msg = "Use `bytes` instead."
|
||||||
|
"yt_dlp.utils.jwt_encode_hs256".msg = "Use `yt_dlp.utils.jwt_encode` instead."
|
||||||
"yt_dlp.utils.decodeArgument".msg = "Do not use"
|
"yt_dlp.utils.decodeArgument".msg = "Do not use"
|
||||||
"yt_dlp.utils.decodeFilename".msg = "Do not use"
|
"yt_dlp.utils.decodeFilename".msg = "Do not use"
|
||||||
"yt_dlp.utils.encodeFilename".msg = "Do not use"
|
"yt_dlp.utils.encodeFilename".msg = "Do not use"
|
||||||
|
|||||||
39
setup.cfg
39
setup.cfg
@@ -1,39 +0,0 @@
|
|||||||
[flake8]
|
|
||||||
exclude = build,venv,.tox,.git,.pytest_cache
|
|
||||||
ignore = E402,E501,E731,E741,W503
|
|
||||||
max_line_length = 120
|
|
||||||
per_file_ignores =
|
|
||||||
devscripts/lazy_load_template.py: F401
|
|
||||||
|
|
||||||
|
|
||||||
[autoflake]
|
|
||||||
ignore-init-module-imports = true
|
|
||||||
ignore-pass-after-docstring = true
|
|
||||||
remove-all-unused-imports = true
|
|
||||||
remove-duplicate-keys = true
|
|
||||||
remove-unused-variables = true
|
|
||||||
|
|
||||||
|
|
||||||
[tox:tox]
|
|
||||||
skipsdist = true
|
|
||||||
envlist = py{39,310,311,312,313},pypy311
|
|
||||||
skip_missing_interpreters = true
|
|
||||||
|
|
||||||
[testenv] # tox
|
|
||||||
deps =
|
|
||||||
pytest
|
|
||||||
commands = pytest {posargs:"-m not download"}
|
|
||||||
passenv = HOME # For test_compat_expanduser
|
|
||||||
setenv =
|
|
||||||
# PYTHONWARNINGS = error # Catches PIP's warnings too
|
|
||||||
|
|
||||||
|
|
||||||
[isort]
|
|
||||||
py_version = 39
|
|
||||||
multi_line_output = VERTICAL_HANGING_INDENT
|
|
||||||
line_length = 80
|
|
||||||
reverse_relative = true
|
|
||||||
ensure_newline_before_comments = true
|
|
||||||
include_trailing_comma = true
|
|
||||||
known_first_party =
|
|
||||||
test
|
|
||||||
@@ -20,7 +20,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **3sat**
|
- **3sat**
|
||||||
- **4tube**
|
- **4tube**
|
||||||
- **56.com**
|
- **56.com**
|
||||||
- **6play**
|
|
||||||
- **7plus**
|
- **7plus**
|
||||||
- **8tracks**
|
- **8tracks**
|
||||||
- **9c9media**
|
- **9c9media**
|
||||||
@@ -44,11 +43,7 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **ADN**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
- **ADN**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
||||||
- **ADNSeason**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
- **ADNSeason**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
||||||
- **AdobeConnect**
|
- **AdobeConnect**
|
||||||
- **adobetv**: (**Currently broken**)
|
- **adobetv**
|
||||||
- **adobetv:channel**: (**Currently broken**)
|
|
||||||
- **adobetv:embed**: (**Currently broken**)
|
|
||||||
- **adobetv:show**: (**Currently broken**)
|
|
||||||
- **adobetv:video**
|
|
||||||
- **AdultSwim**
|
- **AdultSwim**
|
||||||
- **aenetworks**: A+E Networks: A&E, Lifetime, History.com, FYI Network and History Vault
|
- **aenetworks**: A+E Networks: A&E, Lifetime, History.com, FYI Network and History Vault
|
||||||
- **aenetworks:collection**
|
- **aenetworks:collection**
|
||||||
@@ -100,7 +95,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **ARD**
|
- **ARD**
|
||||||
- **ARDMediathek**
|
- **ARDMediathek**
|
||||||
- **ARDMediathekCollection**
|
- **ARDMediathekCollection**
|
||||||
- **Arkena**
|
|
||||||
- **Art19**
|
- **Art19**
|
||||||
- **Art19Show**
|
- **Art19Show**
|
||||||
- **arte.sky.it**
|
- **arte.sky.it**
|
||||||
@@ -155,9 +149,8 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **Beatport**
|
- **Beatport**
|
||||||
- **Beeg**
|
- **Beeg**
|
||||||
- **BehindKink**: (**Currently broken**)
|
- **BehindKink**: (**Currently broken**)
|
||||||
- **Bellator**
|
|
||||||
- **BerufeTV**
|
- **BerufeTV**
|
||||||
- **Bet**: (**Currently broken**)
|
- **Bet**
|
||||||
- **bfi:player**: (**Currently broken**)
|
- **bfi:player**: (**Currently broken**)
|
||||||
- **bfmtv**
|
- **bfmtv**
|
||||||
- **bfmtv:article**
|
- **bfmtv:article**
|
||||||
@@ -290,12 +283,10 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **CloudyCDN**
|
- **CloudyCDN**
|
||||||
- **Clubic**: (**Currently broken**)
|
- **Clubic**: (**Currently broken**)
|
||||||
- **Clyp**
|
- **Clyp**
|
||||||
- **cmt.com**: (**Currently broken**)
|
|
||||||
- **CNBCVideo**
|
- **CNBCVideo**
|
||||||
- **CNN**
|
- **CNN**
|
||||||
- **CNNIndonesia**
|
- **CNNIndonesia**
|
||||||
- **ComedyCentral**
|
- **ComedyCentral**
|
||||||
- **ComedyCentralTV**
|
|
||||||
- **ConanClassic**: (**Currently broken**)
|
- **ConanClassic**: (**Currently broken**)
|
||||||
- **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED
|
- **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED
|
||||||
- **CONtv**
|
- **CONtv**
|
||||||
@@ -307,7 +298,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **cpac**
|
- **cpac**
|
||||||
- **cpac:playlist**
|
- **cpac:playlist**
|
||||||
- **Cracked**
|
- **Cracked**
|
||||||
- **Crackle**
|
|
||||||
- **Craftsy**
|
- **Craftsy**
|
||||||
- **CrooksAndLiars**
|
- **CrooksAndLiars**
|
||||||
- **CrowdBunker**
|
- **CrowdBunker**
|
||||||
@@ -322,8 +312,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **curiositystream**: [*curiositystream*](## "netrc machine")
|
- **curiositystream**: [*curiositystream*](## "netrc machine")
|
||||||
- **curiositystream:collections**: [*curiositystream*](## "netrc machine")
|
- **curiositystream:collections**: [*curiositystream*](## "netrc machine")
|
||||||
- **curiositystream:series**: [*curiositystream*](## "netrc machine")
|
- **curiositystream:series**: [*curiositystream*](## "netrc machine")
|
||||||
- **cwtv**
|
|
||||||
- **cwtv:movie**
|
|
||||||
- **Cybrary**: [*cybrary*](## "netrc machine")
|
- **Cybrary**: [*cybrary*](## "netrc machine")
|
||||||
- **CybraryCourse**: [*cybrary*](## "netrc machine")
|
- **CybraryCourse**: [*cybrary*](## "netrc machine")
|
||||||
- **DacastPlaylist**
|
- **DacastPlaylist**
|
||||||
@@ -445,6 +433,7 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **fancode:live**: [*fancode*](## "netrc machine") (**Currently broken**)
|
- **fancode:live**: [*fancode*](## "netrc machine") (**Currently broken**)
|
||||||
- **fancode:vod**: [*fancode*](## "netrc machine") (**Currently broken**)
|
- **fancode:vod**: [*fancode*](## "netrc machine") (**Currently broken**)
|
||||||
- **Fathom**
|
- **Fathom**
|
||||||
|
- **Faulio**
|
||||||
- **FaulioLive**
|
- **FaulioLive**
|
||||||
- **faz.net**
|
- **faz.net**
|
||||||
- **fc2**: [*fc2*](## "netrc machine")
|
- **fc2**: [*fc2*](## "netrc machine")
|
||||||
@@ -457,7 +446,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **Filmweb**
|
- **Filmweb**
|
||||||
- **FiveThirtyEight**
|
- **FiveThirtyEight**
|
||||||
- **FiveTV**
|
- **FiveTV**
|
||||||
- **FlexTV**
|
|
||||||
- **Flickr**
|
- **Flickr**
|
||||||
- **Floatplane**
|
- **Floatplane**
|
||||||
- **FloatplaneChannel**
|
- **FloatplaneChannel**
|
||||||
@@ -700,8 +688,8 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **lbry:channel**: odysee.com channels
|
- **lbry:channel**: odysee.com channels
|
||||||
- **lbry:playlist**: odysee.com playlists
|
- **lbry:playlist**: odysee.com playlists
|
||||||
- **LCI**
|
- **LCI**
|
||||||
- **Lcp**
|
- **Lcp**: (**Currently broken**)
|
||||||
- **LcpPlay**
|
- **LcpPlay**: (**Currently broken**)
|
||||||
- **Le**: 乐视网
|
- **Le**: 乐视网
|
||||||
- **LearningOnScreen**
|
- **LearningOnScreen**
|
||||||
- **Lecture2Go**: (**Currently broken**)
|
- **Lecture2Go**: (**Currently broken**)
|
||||||
@@ -805,7 +793,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **mirrativ**
|
- **mirrativ**
|
||||||
- **mirrativ:user**
|
- **mirrativ:user**
|
||||||
- **MirrorCoUK**
|
- **MirrorCoUK**
|
||||||
- **MiTele**: mitele.es
|
|
||||||
- **mixch**
|
- **mixch**
|
||||||
- **mixch:archive**
|
- **mixch:archive**
|
||||||
- **mixch:movie**
|
- **mixch:movie**
|
||||||
@@ -840,12 +827,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **MSN**
|
- **MSN**
|
||||||
- **mtg**: MTG services
|
- **mtg**: MTG services
|
||||||
- **mtv**
|
- **mtv**
|
||||||
- **mtv.de**: (**Currently broken**)
|
|
||||||
- **mtv.it**
|
|
||||||
- **mtv.it:programma**
|
|
||||||
- **mtv:video**
|
|
||||||
- **mtvjapan**
|
|
||||||
- **mtvservices:embedded**
|
|
||||||
- **MTVUutisetArticle**: (**Currently broken**)
|
- **MTVUutisetArticle**: (**Currently broken**)
|
||||||
- **MuenchenTV**: münchen.tv (**Currently broken**)
|
- **MuenchenTV**: münchen.tv (**Currently broken**)
|
||||||
- **MujRozhlas**
|
- **MujRozhlas**
|
||||||
@@ -945,9 +926,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **NhkVodProgram**
|
- **NhkVodProgram**
|
||||||
- **nhl.com**
|
- **nhl.com**
|
||||||
- **nick.com**
|
- **nick.com**
|
||||||
- **nick.de**
|
|
||||||
- **nickelodeon:br**
|
|
||||||
- **nickelodeonru**
|
|
||||||
- **niconico**: [*niconico*](## "netrc machine") ニコニコ動画
|
- **niconico**: [*niconico*](## "netrc machine") ニコニコ動画
|
||||||
- **niconico:history**: NicoNico user history or likes. Requires cookies.
|
- **niconico:history**: NicoNico user history or likes. Requires cookies.
|
||||||
- **niconico:live**: [*niconico*](## "netrc machine") ニコニコ生放送
|
- **niconico:live**: [*niconico*](## "netrc machine") ニコニコ生放送
|
||||||
@@ -1025,6 +1003,7 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **onet.tv:channel**
|
- **onet.tv:channel**
|
||||||
- **OnetMVP**
|
- **OnetMVP**
|
||||||
- **OnionStudios**
|
- **OnionStudios**
|
||||||
|
- **onsen**: [*onsen*](## "netrc machine") インターネットラジオステーション<音泉>
|
||||||
- **Opencast**
|
- **Opencast**
|
||||||
- **OpencastPlaylist**
|
- **OpencastPlaylist**
|
||||||
- **openrec**
|
- **openrec**
|
||||||
@@ -1049,9 +1028,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **Panopto**
|
- **Panopto**
|
||||||
- **PanoptoList**
|
- **PanoptoList**
|
||||||
- **PanoptoPlaylist**
|
- **PanoptoPlaylist**
|
||||||
- **ParamountNetwork**
|
|
||||||
- **ParamountPlus**
|
|
||||||
- **ParamountPlusSeries**
|
|
||||||
- **ParamountPressExpress**
|
- **ParamountPressExpress**
|
||||||
- **Parler**: Posts on parler.com
|
- **Parler**: Posts on parler.com
|
||||||
- **parliamentlive.tv**: UK parliament videos
|
- **parliamentlive.tv**: UK parliament videos
|
||||||
@@ -1086,9 +1062,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **PinterestCollection**
|
- **PinterestCollection**
|
||||||
- **PiramideTV**
|
- **PiramideTV**
|
||||||
- **PiramideTVChannel**
|
- **PiramideTVChannel**
|
||||||
- **pixiv:sketch**
|
|
||||||
- **pixiv:sketch:user**
|
|
||||||
- **Pladform**
|
|
||||||
- **PlanetMarathi**
|
- **PlanetMarathi**
|
||||||
- **Platzi**: [*platzi*](## "netrc machine")
|
- **Platzi**: [*platzi*](## "netrc machine")
|
||||||
- **PlatziCourse**: [*platzi*](## "netrc machine")
|
- **PlatziCourse**: [*platzi*](## "netrc machine")
|
||||||
@@ -1275,7 +1248,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **rutube:person**: Rutube person videos
|
- **rutube:person**: Rutube person videos
|
||||||
- **rutube:playlist**: Rutube playlists
|
- **rutube:playlist**: Rutube playlists
|
||||||
- **rutube:tags**: Rutube tags
|
- **rutube:tags**: Rutube tags
|
||||||
- **RUTV**: RUTV.RU
|
|
||||||
- **Ruutu**: (**Currently broken**)
|
- **Ruutu**: (**Currently broken**)
|
||||||
- **Ruv**
|
- **Ruv**
|
||||||
- **ruv.is:spila**
|
- **ruv.is:spila**
|
||||||
@@ -1350,7 +1322,10 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **Slideshare**
|
- **Slideshare**
|
||||||
- **SlidesLive**
|
- **SlidesLive**
|
||||||
- **Slutload**
|
- **Slutload**
|
||||||
- **Smotrim**
|
- **smotrim**
|
||||||
|
- **smotrim:audio**
|
||||||
|
- **smotrim:live**
|
||||||
|
- **smotrim:playlist**
|
||||||
- **SnapchatSpotlight**
|
- **SnapchatSpotlight**
|
||||||
- **Snotr**
|
- **Snotr**
|
||||||
- **SoftWhiteUnderbelly**: [*softwhiteunderbelly*](## "netrc machine")
|
- **SoftWhiteUnderbelly**: [*softwhiteunderbelly*](## "netrc machine")
|
||||||
@@ -1377,8 +1352,9 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **southpark.cc.com:español**
|
- **southpark.cc.com:español**
|
||||||
- **southpark.de**
|
- **southpark.de**
|
||||||
- **southpark.lat**
|
- **southpark.lat**
|
||||||
- **southpark.nl**
|
- **southparkstudios.co.uk**
|
||||||
- **southparkstudios.dk**
|
- **southparkstudios.com.br**
|
||||||
|
- **southparkstudios.nu**
|
||||||
- **SovietsCloset**
|
- **SovietsCloset**
|
||||||
- **SovietsClosetPlaylist**
|
- **SovietsClosetPlaylist**
|
||||||
- **SpankBang**
|
- **SpankBang**
|
||||||
@@ -1387,8 +1363,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **Sport5**
|
- **Sport5**
|
||||||
- **SportBox**: (**Currently broken**)
|
- **SportBox**: (**Currently broken**)
|
||||||
- **SportDeutschland**
|
- **SportDeutschland**
|
||||||
- **spotify**: Spotify episodes (**Currently broken**)
|
|
||||||
- **spotify:show**: Spotify shows (**Currently broken**)
|
|
||||||
- **Spreaker**
|
- **Spreaker**
|
||||||
- **SpreakerShow**
|
- **SpreakerShow**
|
||||||
- **SpringboardPlatform**
|
- **SpringboardPlatform**
|
||||||
@@ -1403,6 +1377,7 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **startrek**: STAR TREK
|
- **startrek**: STAR TREK
|
||||||
- **startv**
|
- **startv**
|
||||||
- **Steam**
|
- **Steam**
|
||||||
|
- **SteamCommunity**
|
||||||
- **SteamCommunityBroadcast**
|
- **SteamCommunityBroadcast**
|
||||||
- **Stitcher**
|
- **Stitcher**
|
||||||
- **StitcherShow**
|
- **StitcherShow**
|
||||||
@@ -1526,15 +1501,17 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **TrueID**
|
- **TrueID**
|
||||||
- **TruNews**
|
- **TruNews**
|
||||||
- **Truth**
|
- **Truth**
|
||||||
|
- **ttinglive**: 띵라이브 (formerly FlexTV)
|
||||||
- **Tube8**: (**Currently broken**)
|
- **Tube8**: (**Currently broken**)
|
||||||
- **TubeTuGraz**: [*tubetugraz*](## "netrc machine") tube.tugraz.at
|
- **TubeTuGraz**: [*tubetugraz*](## "netrc machine") tube.tugraz.at
|
||||||
- **TubeTuGrazSeries**: [*tubetugraz*](## "netrc machine")
|
- **TubeTuGrazSeries**: [*tubetugraz*](## "netrc machine")
|
||||||
- **tubitv**: [*tubitv*](## "netrc machine")
|
- **tubitv**: [*tubitv*](## "netrc machine")
|
||||||
- **tubitv:series**
|
- **tubitv:series**
|
||||||
- **Tumblr**: [*tumblr*](## "netrc machine")
|
- **Tumblr**: [*tumblr*](## "netrc machine")
|
||||||
- **TuneInPodcast**
|
- **tunein:embed**
|
||||||
- **TuneInPodcastEpisode**
|
- **tunein:podcast**
|
||||||
- **TuneInStation**
|
- **tunein:podcast:program**
|
||||||
|
- **tunein:station**
|
||||||
- **tv.dfb.de**
|
- **tv.dfb.de**
|
||||||
- **TV2**
|
- **TV2**
|
||||||
- **TV2Article**
|
- **TV2Article**
|
||||||
@@ -1557,7 +1534,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **TVer**
|
- **TVer**
|
||||||
- **tvigle**: Интернет-телевидение Tvigle.ru
|
- **tvigle**: Интернет-телевидение Tvigle.ru
|
||||||
- **TVIPlayer**
|
- **TVIPlayer**
|
||||||
- **tvland.com**
|
|
||||||
- **TVN24**: (**Currently broken**)
|
- **TVN24**: (**Currently broken**)
|
||||||
- **TVNoe**: (**Currently broken**)
|
- **TVNoe**: (**Currently broken**)
|
||||||
- **tvopengr:embed**: tvopen.gr embedded videos
|
- **tvopengr:embed**: tvopen.gr embedded videos
|
||||||
@@ -1617,7 +1593,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **Varzesh3**: (**Currently broken**)
|
- **Varzesh3**: (**Currently broken**)
|
||||||
- **Vbox7**
|
- **Vbox7**
|
||||||
- **Veo**
|
- **Veo**
|
||||||
- **Vesti**: Вести.Ru (**Currently broken**)
|
|
||||||
- **Vevo**
|
- **Vevo**
|
||||||
- **VevoPlaylist**
|
- **VevoPlaylist**
|
||||||
- **VGTV**: VGTV, BTTV, FTV, Aftenposten and Aftonbladet
|
- **VGTV**: VGTV, BTTV, FTV, Aftenposten and Aftonbladet
|
||||||
@@ -1698,7 +1673,7 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **vrsquare:section**
|
- **vrsquare:section**
|
||||||
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
||||||
- **vrtmax**: [*vrtnu*](## "netrc machine") VRT MAX (formerly VRT NU)
|
- **vrtmax**: [*vrtnu*](## "netrc machine") VRT MAX (formerly VRT NU)
|
||||||
- **VTM**: (**Currently broken**)
|
- **VTM**
|
||||||
- **VTV**
|
- **VTV**
|
||||||
- **VTVGo**
|
- **VTVGo**
|
||||||
- **VTXTV**: [*vtxtv*](## "netrc machine")
|
- **VTXTV**: [*vtxtv*](## "netrc machine")
|
||||||
@@ -1765,7 +1740,6 @@ The only reliable way to check if a site is supported is to try it.
|
|||||||
- **wykop:dig:comment**
|
- **wykop:dig:comment**
|
||||||
- **wykop:post**
|
- **wykop:post**
|
||||||
- **wykop:post:comment**
|
- **wykop:post:comment**
|
||||||
- **Xanimu**
|
|
||||||
- **XboxClips**
|
- **XboxClips**
|
||||||
- **XHamster**
|
- **XHamster**
|
||||||
- **XHamsterEmbed**
|
- **XHamsterEmbed**
|
||||||
|
|||||||
@@ -36,7 +36,6 @@
|
|||||||
"verbose": true,
|
"verbose": true,
|
||||||
"writedescription": false,
|
"writedescription": false,
|
||||||
"writeinfojson": true,
|
"writeinfojson": true,
|
||||||
"writeannotations": false,
|
|
||||||
"writelink": false,
|
"writelink": false,
|
||||||
"writeurllink": false,
|
"writeurllink": false,
|
||||||
"writewebloclink": false,
|
"writewebloclink": false,
|
||||||
|
|||||||
@@ -1945,7 +1945,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
|||||||
server_thread.daemon = True
|
server_thread.daemon = True
|
||||||
server_thread.start()
|
server_thread.start()
|
||||||
|
|
||||||
(content, urlh) = self.ie._download_webpage_handle(
|
content, _ = self.ie._download_webpage_handle(
|
||||||
f'http://127.0.0.1:{port}/teapot', None,
|
f'http://127.0.0.1:{port}/teapot', None,
|
||||||
expected_status=TEAPOT_RESPONSE_STATUS)
|
expected_status=TEAPOT_RESPONSE_STATUS)
|
||||||
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
|
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
|
import datetime as dt
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
@@ -12,7 +13,7 @@ import struct
|
|||||||
|
|
||||||
from yt_dlp import compat
|
from yt_dlp import compat
|
||||||
from yt_dlp.compat import urllib # isort: split
|
from yt_dlp.compat import urllib # isort: split
|
||||||
from yt_dlp.compat import compat_etree_fromstring, compat_expanduser
|
from yt_dlp.compat import compat_etree_fromstring, compat_expanduser, compat_datetime_from_timestamp
|
||||||
from yt_dlp.compat.urllib.request import getproxies
|
from yt_dlp.compat.urllib.request import getproxies
|
||||||
|
|
||||||
|
|
||||||
@@ -59,6 +60,45 @@ class TestCompat(unittest.TestCase):
|
|||||||
def test_struct_unpack(self):
|
def test_struct_unpack(self):
|
||||||
self.assertEqual(struct.unpack('!B', b'\x00'), (0,))
|
self.assertEqual(struct.unpack('!B', b'\x00'), (0,))
|
||||||
|
|
||||||
|
def test_compat_datetime_from_timestamp(self):
|
||||||
|
self.assertEqual(
|
||||||
|
compat_datetime_from_timestamp(0),
|
||||||
|
dt.datetime(1970, 1, 1, 0, 0, 0, tzinfo=dt.timezone.utc))
|
||||||
|
self.assertEqual(
|
||||||
|
compat_datetime_from_timestamp(1),
|
||||||
|
dt.datetime(1970, 1, 1, 0, 0, 1, tzinfo=dt.timezone.utc))
|
||||||
|
self.assertEqual(
|
||||||
|
compat_datetime_from_timestamp(3600),
|
||||||
|
dt.datetime(1970, 1, 1, 1, 0, 0, tzinfo=dt.timezone.utc))
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
compat_datetime_from_timestamp(-1),
|
||||||
|
dt.datetime(1969, 12, 31, 23, 59, 59, tzinfo=dt.timezone.utc))
|
||||||
|
self.assertEqual(
|
||||||
|
compat_datetime_from_timestamp(-86400),
|
||||||
|
dt.datetime(1969, 12, 31, 0, 0, 0, tzinfo=dt.timezone.utc))
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
compat_datetime_from_timestamp(0.5),
|
||||||
|
dt.datetime(1970, 1, 1, 0, 0, 0, 500000, tzinfo=dt.timezone.utc))
|
||||||
|
self.assertEqual(
|
||||||
|
compat_datetime_from_timestamp(1.000001),
|
||||||
|
dt.datetime(1970, 1, 1, 0, 0, 1, 1, tzinfo=dt.timezone.utc))
|
||||||
|
self.assertEqual(
|
||||||
|
compat_datetime_from_timestamp(-1.25),
|
||||||
|
dt.datetime(1969, 12, 31, 23, 59, 58, 750000, tzinfo=dt.timezone.utc))
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
compat_datetime_from_timestamp(-1577923200),
|
||||||
|
dt.datetime(1920, 1, 1, 0, 0, 0, tzinfo=dt.timezone.utc))
|
||||||
|
self.assertEqual(
|
||||||
|
compat_datetime_from_timestamp(4102444800),
|
||||||
|
dt.datetime(2100, 1, 1, 0, 0, 0, tzinfo=dt.timezone.utc))
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
compat_datetime_from_timestamp(173568960000),
|
||||||
|
dt.datetime(7470, 3, 8, 0, 0, 0, tzinfo=dt.timezone.utc))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ class TestOverwrites(unittest.TestCase):
|
|||||||
'-o', 'test.webm',
|
'-o', 'test.webm',
|
||||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||||
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
sout, _ = outp.communicate()
|
||||||
self.assertTrue(b'has already been downloaded' in sout)
|
self.assertTrue(b'has already been downloaded' in sout)
|
||||||
# if the file has no content, it has not been redownloaded
|
# if the file has no content, it has not been redownloaded
|
||||||
self.assertTrue(os.path.getsize(download_file) < 1)
|
self.assertTrue(os.path.getsize(download_file) < 1)
|
||||||
@@ -41,7 +41,7 @@ class TestOverwrites(unittest.TestCase):
|
|||||||
'-o', 'test.webm',
|
'-o', 'test.webm',
|
||||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||||
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
sout, _ = outp.communicate()
|
||||||
self.assertTrue(b'has already been downloaded' not in sout)
|
self.assertTrue(b'has already been downloaded' not in sout)
|
||||||
# if the file has no content, it has not been redownloaded
|
# if the file has no content, it has not been redownloaded
|
||||||
self.assertTrue(os.path.getsize(download_file) > 1)
|
self.assertTrue(os.path.getsize(download_file) > 1)
|
||||||
|
|||||||
@@ -153,7 +153,7 @@ class TestPoTokenProvider:
|
|||||||
|
|
||||||
with pytest.raises(
|
with pytest.raises(
|
||||||
PoTokenProviderRejectedRequest,
|
PoTokenProviderRejectedRequest,
|
||||||
match='External requests by "example" provider do not support proxy scheme "socks4". Supported proxy '
|
match=r'External requests by "example" provider do not support proxy scheme "socks4"\. Supported proxy '
|
||||||
'schemes: http, socks5h',
|
'schemes: http, socks5h',
|
||||||
):
|
):
|
||||||
provider.request_pot(pot_request)
|
provider.request_pot(pot_request)
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ from yt_dlp.extractor import (
|
|||||||
NRKTVIE,
|
NRKTVIE,
|
||||||
PBSIE,
|
PBSIE,
|
||||||
CeskaTelevizeIE,
|
CeskaTelevizeIE,
|
||||||
ComedyCentralIE,
|
|
||||||
DailymotionIE,
|
DailymotionIE,
|
||||||
DemocracynowIE,
|
DemocracynowIE,
|
||||||
LyndaIE,
|
LyndaIE,
|
||||||
@@ -279,23 +278,6 @@ class TestNPOSubtitles(BaseTestSubtitles):
|
|||||||
self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
|
self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
|
||||||
|
|
||||||
|
|
||||||
@is_download_test
|
|
||||||
@unittest.skip('IE broken')
|
|
||||||
class TestMTVSubtitles(BaseTestSubtitles):
|
|
||||||
url = 'http://www.cc.com/video-clips/p63lk0/adam-devine-s-house-party-chasing-white-swans'
|
|
||||||
IE = ComedyCentralIE
|
|
||||||
|
|
||||||
def getInfoDict(self):
|
|
||||||
return super().getInfoDict()['entries'][0]
|
|
||||||
|
|
||||||
def test_allsubtitles(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
self.DL.params['allsubtitles'] = True
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(set(subtitles.keys()), {'en'})
|
|
||||||
self.assertEqual(md5(subtitles['en']), '78206b8d8a0cfa9da64dc026eea48961')
|
|
||||||
|
|
||||||
|
|
||||||
@is_download_test
|
@is_download_test
|
||||||
class TestNRKSubtitles(BaseTestSubtitles):
|
class TestNRKSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://tv.nrk.no/serie/ikke-gjoer-dette-hjemme/DMPV73000411/sesong-2/episode-1'
|
url = 'http://tv.nrk.no/serie/ikke-gjoer-dette-hjemme/DMPV73000411/sesong-2/episode-1'
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|||||||
|
|
||||||
|
|
||||||
from test.helper import FakeYDL, report_warning
|
from test.helper import FakeYDL, report_warning
|
||||||
from yt_dlp.update import UpdateInfo, Updater
|
from yt_dlp.update import UpdateInfo, Updater, UPDATE_SOURCES, _make_label
|
||||||
|
|
||||||
|
|
||||||
# XXX: Keep in sync with yt_dlp.update.UPDATE_SOURCES
|
# XXX: Keep in sync with yt_dlp.update.UPDATE_SOURCES
|
||||||
@@ -84,8 +84,9 @@ lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
|||||||
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||||
lock 2024.10.22 py2exe .+
|
lock 2024.10.22 py2exe .+
|
||||||
lock 2024.10.22 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
lock 2024.10.22 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
||||||
lock 2024.10.22 (?!\w+_exe).+ Python 3\.8
|
lock 2024.10.22 zip Python 3\.8
|
||||||
lock 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
lock 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
||||||
|
lock 2025.08.11 darwin_legacy_exe .+
|
||||||
'''
|
'''
|
||||||
|
|
||||||
TEST_LOCKFILE_V2_TMPL = r'''%s
|
TEST_LOCKFILE_V2_TMPL = r'''%s
|
||||||
@@ -94,20 +95,23 @@ lockV2 yt-dlp/yt-dlp 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
|||||||
lockV2 yt-dlp/yt-dlp 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
lockV2 yt-dlp/yt-dlp 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||||
lockV2 yt-dlp/yt-dlp 2024.10.22 py2exe .+
|
lockV2 yt-dlp/yt-dlp 2024.10.22 py2exe .+
|
||||||
lockV2 yt-dlp/yt-dlp 2024.10.22 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
lockV2 yt-dlp/yt-dlp 2024.10.22 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
||||||
lockV2 yt-dlp/yt-dlp 2024.10.22 (?!\w+_exe).+ Python 3\.8
|
lockV2 yt-dlp/yt-dlp 2024.10.22 zip Python 3\.8
|
||||||
lockV2 yt-dlp/yt-dlp 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
lockV2 yt-dlp/yt-dlp 2024.10.22 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
||||||
|
lockV2 yt-dlp/yt-dlp 2025.08.11 darwin_legacy_exe .+
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 (?!win_x86_exe).+ Python 3\.7
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 (?!win_x86_exe).+ Python 3\.7
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 py2exe .+
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 py2exe .+
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 (?!\w+_exe).+ Python 3\.8
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 zip Python 3\.8
|
||||||
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2024.10.22.051025 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
||||||
|
lockV2 yt-dlp/yt-dlp-nightly-builds 2025.08.12.233030 darwin_legacy_exe .+
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 (?!win_x86_exe).+ Python 3\.7
|
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 (?!win_x86_exe).+ Python 3\.7
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.045052 py2exe .+
|
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.045052 py2exe .+
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 linux_(?:armv7l|aarch64)_exe .+-glibc2\.(?:[12]?\d|30)\b
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 (?!\w+_exe).+ Python 3\.8
|
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 zip Python 3\.8
|
||||||
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
lockV2 yt-dlp/yt-dlp-master-builds 2024.10.22.060347 win(?:_x86)?_exe Python 3\.[78].+ Windows-(?:7-|2008ServerR2)
|
||||||
|
lockV2 yt-dlp/yt-dlp-master-builds 2025.08.12.232447 darwin_legacy_exe .+
|
||||||
'''
|
'''
|
||||||
|
|
||||||
TEST_LOCKFILE_V2 = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_COMMENT
|
TEST_LOCKFILE_V2 = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_COMMENT
|
||||||
@@ -217,6 +221,10 @@ class TestUpdate(unittest.TestCase):
|
|||||||
test( # linux_aarch64_exe w/glibc2.3 should only update to glibc<2.31 lock
|
test( # linux_aarch64_exe w/glibc2.3 should only update to glibc<2.31 lock
|
||||||
lockfile, 'linux_aarch64_exe Python 3.8.0 (CPython aarch64 64bit) - Linux-6.5.0-1025-azure-aarch64-with-glibc2.3 (OpenSSL',
|
lockfile, 'linux_aarch64_exe Python 3.8.0 (CPython aarch64 64bit) - Linux-6.5.0-1025-azure-aarch64-with-glibc2.3 (OpenSSL',
|
||||||
'2025.01.01', '2024.10.22')
|
'2025.01.01', '2024.10.22')
|
||||||
|
test(lockfile, 'darwin_legacy_exe Python 3.10.5', '2025.08.11', '2025.08.11')
|
||||||
|
test(lockfile, 'darwin_legacy_exe Python 3.10.5', '2025.08.11', '2025.08.11', exact=True)
|
||||||
|
test(lockfile, 'darwin_legacy_exe Python 3.10.5', '2025.08.12', '2025.08.11')
|
||||||
|
test(lockfile, 'darwin_legacy_exe Python 3.10.5', '2025.08.12', None, exact=True)
|
||||||
|
|
||||||
# Forks can block updates to non-numeric tags rather than lock
|
# Forks can block updates to non-numeric tags rather than lock
|
||||||
test(TEST_LOCKFILE_FORK, 'zip Python 3.6.3', 'pr0000', None, repo='fork/yt-dlp')
|
test(TEST_LOCKFILE_FORK, 'zip Python 3.6.3', 'pr0000', None, repo='fork/yt-dlp')
|
||||||
@@ -272,6 +280,26 @@ class TestUpdate(unittest.TestCase):
|
|||||||
test('testing', None, current_commit='9' * 40)
|
test('testing', None, current_commit='9' * 40)
|
||||||
test('testing', UpdateInfo('testing', commit='9' * 40))
|
test('testing', UpdateInfo('testing', commit='9' * 40))
|
||||||
|
|
||||||
|
def test_make_label(self):
|
||||||
|
STABLE_REPO = UPDATE_SOURCES['stable']
|
||||||
|
NIGHTLY_REPO = UPDATE_SOURCES['nightly']
|
||||||
|
MASTER_REPO = UPDATE_SOURCES['master']
|
||||||
|
|
||||||
|
for inputs, expected in [
|
||||||
|
([STABLE_REPO, '2025.09.02', '2025.09.02'], f'stable@2025.09.02 from {STABLE_REPO}'),
|
||||||
|
([NIGHTLY_REPO, '2025.09.02.123456', '2025.09.02.123456'], f'nightly@2025.09.02.123456 from {NIGHTLY_REPO}'),
|
||||||
|
([MASTER_REPO, '2025.09.02.987654', '2025.09.02.987654'], f'master@2025.09.02.987654 from {MASTER_REPO}'),
|
||||||
|
(['fork/yt-dlp', 'experimental', '2025.12.31.000000'], 'fork/yt-dlp@experimental build 2025.12.31.000000'),
|
||||||
|
(['fork/yt-dlp', '2025.09.02', '2025.09.02'], 'fork/yt-dlp@2025.09.02'),
|
||||||
|
([STABLE_REPO, 'experimental', '2025.12.31.000000'], f'{STABLE_REPO}@experimental build 2025.12.31.000000'),
|
||||||
|
([STABLE_REPO, 'experimental'], f'{STABLE_REPO}@experimental'),
|
||||||
|
(['fork/yt-dlp', 'experimental'], 'fork/yt-dlp@experimental'),
|
||||||
|
]:
|
||||||
|
result = _make_label(*inputs)
|
||||||
|
self.assertEqual(
|
||||||
|
result, expected,
|
||||||
|
f'{inputs!r} returned {result!r} instead of {expected!r}')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import datetime as dt
|
|||||||
import io
|
import io
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
|
import ntpath
|
||||||
import pickle
|
import pickle
|
||||||
import subprocess
|
import subprocess
|
||||||
import unittest
|
import unittest
|
||||||
@@ -71,6 +72,8 @@ from yt_dlp.utils import (
|
|||||||
iri_to_uri,
|
iri_to_uri,
|
||||||
is_html,
|
is_html,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
|
jwt_decode_hs256,
|
||||||
|
jwt_encode,
|
||||||
limit_length,
|
limit_length,
|
||||||
locked_file,
|
locked_file,
|
||||||
lowercase_escape,
|
lowercase_escape,
|
||||||
@@ -99,11 +102,13 @@ from yt_dlp.utils import (
|
|||||||
remove_start,
|
remove_start,
|
||||||
render_table,
|
render_table,
|
||||||
replace_extension,
|
replace_extension,
|
||||||
|
datetime_round,
|
||||||
rot47,
|
rot47,
|
||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
sanitize_path,
|
sanitize_path,
|
||||||
sanitize_url,
|
sanitize_url,
|
||||||
shell_quote,
|
shell_quote,
|
||||||
|
strftime_or_none,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
strip_jsonp,
|
strip_jsonp,
|
||||||
@@ -249,12 +254,6 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
|
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
|
||||||
self.assertEqual(sanitize_path('C:\\abc:%(title)s.%(ext)s'), 'C:\\abc#%(title)s.%(ext)s')
|
self.assertEqual(sanitize_path('C:\\abc:%(title)s.%(ext)s'), 'C:\\abc#%(title)s.%(ext)s')
|
||||||
|
|
||||||
# Check with nt._path_normpath if available
|
|
||||||
try:
|
|
||||||
from nt import _path_normpath as nt_path_normpath
|
|
||||||
except ImportError:
|
|
||||||
nt_path_normpath = None
|
|
||||||
|
|
||||||
for test, expected in [
|
for test, expected in [
|
||||||
('C:\\', 'C:\\'),
|
('C:\\', 'C:\\'),
|
||||||
('../abc', '..\\abc'),
|
('../abc', '..\\abc'),
|
||||||
@@ -272,8 +271,7 @@ class TestUtil(unittest.TestCase):
|
|||||||
result = sanitize_path(test)
|
result = sanitize_path(test)
|
||||||
assert result == expected, f'{test} was incorrectly resolved'
|
assert result == expected, f'{test} was incorrectly resolved'
|
||||||
assert result == sanitize_path(result), f'{test} changed after sanitizing again'
|
assert result == sanitize_path(result), f'{test} changed after sanitizing again'
|
||||||
if nt_path_normpath:
|
assert result == ntpath.normpath(test), f'{test} does not match ntpath.normpath'
|
||||||
assert result == nt_path_normpath(test), f'{test} does not match nt._path_normpath'
|
|
||||||
|
|
||||||
def test_sanitize_url(self):
|
def test_sanitize_url(self):
|
||||||
self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar')
|
self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar')
|
||||||
@@ -407,6 +405,25 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(datetime_from_str('now+1day', precision='hour'), datetime_from_str('now+24hours', precision='auto'))
|
self.assertEqual(datetime_from_str('now+1day', precision='hour'), datetime_from_str('now+24hours', precision='auto'))
|
||||||
self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto'))
|
self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto'))
|
||||||
|
|
||||||
|
def test_datetime_round(self):
|
||||||
|
self.assertEqual(datetime_round(dt.datetime.strptime('1820-05-12T01:23:45Z', '%Y-%m-%dT%H:%M:%SZ')),
|
||||||
|
dt.datetime(1820, 5, 12, tzinfo=dt.timezone.utc))
|
||||||
|
self.assertEqual(datetime_round(dt.datetime.strptime('1969-12-31T23:34:45Z', '%Y-%m-%dT%H:%M:%SZ'), 'hour'),
|
||||||
|
dt.datetime(1970, 1, 1, 0, tzinfo=dt.timezone.utc))
|
||||||
|
self.assertEqual(datetime_round(dt.datetime.strptime('2024-12-25T01:23:45Z', '%Y-%m-%dT%H:%M:%SZ'), 'minute'),
|
||||||
|
dt.datetime(2024, 12, 25, 1, 24, tzinfo=dt.timezone.utc))
|
||||||
|
self.assertEqual(datetime_round(dt.datetime.strptime('2024-12-25T01:23:45.123Z', '%Y-%m-%dT%H:%M:%S.%fZ'), 'second'),
|
||||||
|
dt.datetime(2024, 12, 25, 1, 23, 45, tzinfo=dt.timezone.utc))
|
||||||
|
self.assertEqual(datetime_round(dt.datetime.strptime('2024-12-25T01:23:45.678Z', '%Y-%m-%dT%H:%M:%S.%fZ'), 'second'),
|
||||||
|
dt.datetime(2024, 12, 25, 1, 23, 46, tzinfo=dt.timezone.utc))
|
||||||
|
|
||||||
|
def test_strftime_or_none(self):
|
||||||
|
self.assertEqual(strftime_or_none(-4722192000), '18200512')
|
||||||
|
self.assertEqual(strftime_or_none(0), '19700101')
|
||||||
|
self.assertEqual(strftime_or_none(1735084800), '20241225')
|
||||||
|
# Throws OverflowError
|
||||||
|
self.assertEqual(strftime_or_none(1735084800000), None)
|
||||||
|
|
||||||
def test_daterange(self):
|
def test_daterange(self):
|
||||||
_20century = DateRange('19000101', '20000101')
|
_20century = DateRange('19000101', '20000101')
|
||||||
self.assertFalse('17890714' in _20century)
|
self.assertFalse('17890714' in _20century)
|
||||||
@@ -2180,6 +2197,41 @@ Line 1
|
|||||||
assert int_or_none(v=10) == 10, 'keyword passed positional should call function'
|
assert int_or_none(v=10) == 10, 'keyword passed positional should call function'
|
||||||
assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function'
|
assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function'
|
||||||
|
|
||||||
|
_JWT_KEY = '12345678'
|
||||||
|
_JWT_HEADERS_1 = {'a': 'b'}
|
||||||
|
_JWT_HEADERS_2 = {'typ': 'JWT', 'alg': 'HS256'}
|
||||||
|
_JWT_HEADERS_3 = {'typ': 'JWT', 'alg': 'RS256'}
|
||||||
|
_JWT_HEADERS_4 = {'c': 'd', 'alg': 'ES256'}
|
||||||
|
_JWT_DECODED = {
|
||||||
|
'foo': 'bar',
|
||||||
|
'qux': 'baz',
|
||||||
|
}
|
||||||
|
_JWT_SIMPLE = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJxdXgiOiJiYXoifQ.fKojvTWqnjNTbsdoDTmYNc4tgYAG3h_SWRzM77iLH0U'
|
||||||
|
_JWT_WITH_EXTRA_HEADERS = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImEiOiJiIn0.eyJmb28iOiJiYXIiLCJxdXgiOiJiYXoifQ.Ia91-B77yasfYM7jsB6iVKLew-3rO6ITjNmjWUVXCvQ'
|
||||||
|
_JWT_WITH_REORDERED_HEADERS = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJmb28iOiJiYXIiLCJxdXgiOiJiYXoifQ.slg-7COta5VOfB36p3tqV4MGPV6TTA_ouGnD48UEVq4'
|
||||||
|
_JWT_WITH_REORDERED_HEADERS_AND_RS256_ALG = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIiLCJxdXgiOiJiYXoifQ.XWp496oVgQnoits0OOocutdjxoaQwn4GUWWxUsKENPM'
|
||||||
|
_JWT_WITH_EXTRA_HEADERS_AND_ES256_ALG = 'eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCIsImMiOiJkIn0.eyJmb28iOiJiYXIiLCJxdXgiOiJiYXoifQ.oM_tc7IkfrwkoRh43rFFE1wOi3J3mQGwx7_lMyKQqDg'
|
||||||
|
|
||||||
|
def test_jwt_encode(self):
|
||||||
|
def test(expected, headers={}):
|
||||||
|
self.assertEqual(jwt_encode(self._JWT_DECODED, self._JWT_KEY, headers=headers), expected)
|
||||||
|
|
||||||
|
test(self._JWT_SIMPLE)
|
||||||
|
test(self._JWT_WITH_EXTRA_HEADERS, headers=self._JWT_HEADERS_1)
|
||||||
|
test(self._JWT_WITH_REORDERED_HEADERS, headers=self._JWT_HEADERS_2)
|
||||||
|
test(self._JWT_WITH_REORDERED_HEADERS_AND_RS256_ALG, headers=self._JWT_HEADERS_3)
|
||||||
|
test(self._JWT_WITH_EXTRA_HEADERS_AND_ES256_ALG, headers=self._JWT_HEADERS_4)
|
||||||
|
|
||||||
|
def test_jwt_decode_hs256(self):
|
||||||
|
def test(inp):
|
||||||
|
self.assertEqual(jwt_decode_hs256(inp), self._JWT_DECODED)
|
||||||
|
|
||||||
|
test(self._JWT_SIMPLE)
|
||||||
|
test(self._JWT_WITH_EXTRA_HEADERS)
|
||||||
|
test(self._JWT_WITH_REORDERED_HEADERS)
|
||||||
|
test(self._JWT_WITH_REORDERED_HEADERS_AND_RS256_ALG)
|
||||||
|
test(self._JWT_WITH_EXTRA_HEADERS_AND_ES256_ALG)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ class TestVerboseOutput(unittest.TestCase):
|
|||||||
'--username', 'johnsmith@gmail.com',
|
'--username', 'johnsmith@gmail.com',
|
||||||
'--password', 'my_secret_password',
|
'--password', 'my_secret_password',
|
||||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
_, serr = outp.communicate()
|
||||||
self.assertTrue(b'--username' in serr)
|
self.assertTrue(b'--username' in serr)
|
||||||
self.assertTrue(b'johnsmith' not in serr)
|
self.assertTrue(b'johnsmith' not in serr)
|
||||||
self.assertTrue(b'--password' in serr)
|
self.assertTrue(b'--password' in serr)
|
||||||
@@ -36,7 +36,7 @@ class TestVerboseOutput(unittest.TestCase):
|
|||||||
'-u', 'johnsmith@gmail.com',
|
'-u', 'johnsmith@gmail.com',
|
||||||
'-p', 'my_secret_password',
|
'-p', 'my_secret_password',
|
||||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
_, serr = outp.communicate()
|
||||||
self.assertTrue(b'-u' in serr)
|
self.assertTrue(b'-u' in serr)
|
||||||
self.assertTrue(b'johnsmith' not in serr)
|
self.assertTrue(b'johnsmith' not in serr)
|
||||||
self.assertTrue(b'-p' in serr)
|
self.assertTrue(b'-p' in serr)
|
||||||
@@ -50,7 +50,7 @@ class TestVerboseOutput(unittest.TestCase):
|
|||||||
'--username=johnsmith@gmail.com',
|
'--username=johnsmith@gmail.com',
|
||||||
'--password=my_secret_password',
|
'--password=my_secret_password',
|
||||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
_, serr = outp.communicate()
|
||||||
self.assertTrue(b'--username' in serr)
|
self.assertTrue(b'--username' in serr)
|
||||||
self.assertTrue(b'johnsmith' not in serr)
|
self.assertTrue(b'johnsmith' not in serr)
|
||||||
self.assertTrue(b'--password' in serr)
|
self.assertTrue(b'--password' in serr)
|
||||||
@@ -64,7 +64,7 @@ class TestVerboseOutput(unittest.TestCase):
|
|||||||
'-u=johnsmith@gmail.com',
|
'-u=johnsmith@gmail.com',
|
||||||
'-p=my_secret_password',
|
'-p=my_secret_password',
|
||||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
_, serr = outp.communicate()
|
||||||
self.assertTrue(b'-u' in serr)
|
self.assertTrue(b'-u' in serr)
|
||||||
self.assertTrue(b'johnsmith' not in serr)
|
self.assertTrue(b'johnsmith' not in serr)
|
||||||
self.assertTrue(b'-p' in serr)
|
self.assertTrue(b'-p' in serr)
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ import random
|
|||||||
import ssl
|
import ssl
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
from yt_dlp import socks, traverse_obj
|
from yt_dlp import socks
|
||||||
from yt_dlp.cookies import YoutubeDLCookieJar
|
from yt_dlp.cookies import YoutubeDLCookieJar
|
||||||
from yt_dlp.dependencies import websockets
|
from yt_dlp.dependencies import websockets
|
||||||
from yt_dlp.networking import Request
|
from yt_dlp.networking import Request
|
||||||
@@ -32,6 +32,7 @@ from yt_dlp.networking.exceptions import (
|
|||||||
SSLError,
|
SSLError,
|
||||||
TransportError,
|
TransportError,
|
||||||
)
|
)
|
||||||
|
from yt_dlp.utils.traversal import traverse_obj
|
||||||
from yt_dlp.utils.networking import HTTPHeaderDict
|
from yt_dlp.utils.networking import HTTPHeaderDict
|
||||||
|
|
||||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|||||||
@@ -1,77 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# Allow direct execution
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
|
|
||||||
|
|
||||||
import xml.etree.ElementTree
|
|
||||||
|
|
||||||
import yt_dlp.extractor
|
|
||||||
import yt_dlp.YoutubeDL
|
|
||||||
from test.helper import get_params, is_download_test, try_rm
|
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDL(yt_dlp.YoutubeDL):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.to_stderr = self.to_screen
|
|
||||||
|
|
||||||
|
|
||||||
params = get_params({
|
|
||||||
'writeannotations': True,
|
|
||||||
'skip_download': True,
|
|
||||||
'writeinfojson': False,
|
|
||||||
'format': 'flv',
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
TEST_ID = 'gr51aVj-mLg'
|
|
||||||
ANNOTATIONS_FILE = TEST_ID + '.annotations.xml'
|
|
||||||
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
|
||||||
|
|
||||||
|
|
||||||
@is_download_test
|
|
||||||
class TestAnnotations(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
# Clear old files
|
|
||||||
self.tearDown()
|
|
||||||
|
|
||||||
def test_info_json(self):
|
|
||||||
expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
|
|
||||||
ie = yt_dlp.extractor.YoutubeIE()
|
|
||||||
ydl = YoutubeDL(params)
|
|
||||||
ydl.add_info_extractor(ie)
|
|
||||||
ydl.download([TEST_ID])
|
|
||||||
self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
|
|
||||||
annoxml = None
|
|
||||||
with open(ANNOTATIONS_FILE, encoding='utf-8') as annof:
|
|
||||||
annoxml = xml.etree.ElementTree.parse(annof)
|
|
||||||
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
|
|
||||||
root = annoxml.getroot()
|
|
||||||
self.assertEqual(root.tag, 'document')
|
|
||||||
annotationsTag = root.find('annotations')
|
|
||||||
self.assertEqual(annotationsTag.tag, 'annotations')
|
|
||||||
annotations = annotationsTag.findall('annotation')
|
|
||||||
|
|
||||||
# Not all the annotations have TEXT children and the annotations are returned unsorted.
|
|
||||||
for a in annotations:
|
|
||||||
self.assertEqual(a.tag, 'annotation')
|
|
||||||
if a.get('type') == 'text':
|
|
||||||
textTag = a.find('TEXT')
|
|
||||||
text = textTag.text
|
|
||||||
self.assertTrue(text in expected) # assertIn only added in python 2.7
|
|
||||||
# remove the first occurrence, there could be more than one annotation with the same text
|
|
||||||
expected.remove(text)
|
|
||||||
# We should have seen (and removed) all the expected annotation texts.
|
|
||||||
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
try_rm(ANNOTATIONS_FILE)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
@@ -138,6 +138,21 @@ _SIG_TESTS = [
|
|||||||
'gN7a-hudCuAuPH6fByOk1_GNXN0yNMHShjZXS2VOgsEItAJz0tipeavEOmNdYN-wUtcEqD3bCXjc0iyKfAyZxCBGgIARwsSdQfJ2CJtt',
|
'gN7a-hudCuAuPH6fByOk1_GNXN0yNMHShjZXS2VOgsEItAJz0tipeavEOmNdYN-wUtcEqD3bCXjc0iyKfAyZxCBGgIARwsSdQfJ2CJtt',
|
||||||
'JC2JfQdSswRAIgGBCxZyAfKyi0cjXCb3DqEctUw-NYdNmOEvaepit0zJAtIEsgOV2SXZjhSHMNy0NXNG_1kOyBf6HPuAuCduh-a',
|
'JC2JfQdSswRAIgGBCxZyAfKyi0cjXCb3DqEctUw-NYdNmOEvaepit0zJAtIEsgOV2SXZjhSHMNy0NXNG_1kOyBf6HPuAuCduh-a',
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/010fbc8d/player_es5.vflset/en_US/base.js',
|
||||||
|
'gN7a-hudCuAuPH6fByOk1_GNXN0yNMHShjZXS2VOgsEItAJz0tipeavEOmNdYN-wUtcEqD3bCXjc0iyKfAyZxCBGgIARwsSdQfJ2CJtt',
|
||||||
|
'ttJC2JfQdSswRAIgGBCxZyAfKyi0cjXCb3DqEctUw-NYdNmOEvaepit2zJAsIEggOVaSXZjhSHMNy0NXNG_1kOyBf6HPuAuCduh-',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/010fbc8d/player_es6.vflset/en_US/base.js',
|
||||||
|
'gN7a-hudCuAuPH6fByOk1_GNXN0yNMHShjZXS2VOgsEItAJz0tipeavEOmNdYN-wUtcEqD3bCXjc0iyKfAyZxCBGgIARwsSdQfJ2CJtt',
|
||||||
|
'ttJC2JfQdSswRAIgGBCxZyAfKyi0cjXCb3DqEctUw-NYdNmOEvaepit2zJAsIEggOVaSXZjhSHMNy0NXNG_1kOyBf6HPuAuCduh-',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/5ec65609/player_ias_tcc.vflset/en_US/base.js',
|
||||||
|
'AAJAJfQdSswRAIgNSN0GDUcHnCIXkKcF61yLBgDHiX1sUhOJdY4_GxunRYCIDeYNYP_16mQTPm5f1OVq3oV1ijUNYPjP4iUSMAjO9bZ',
|
||||||
|
'AJfQdSswRAIgNSN0GDUcHnCIXkKcF61ZLBgDHiX1sUhOJdY4_GxunRYCIDyYNYP_16mQTPm5f1OVq3oV1ijUNYPjP4iUSMAjO9be',
|
||||||
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
_NSIG_TESTS = [
|
_NSIG_TESTS = [
|
||||||
@@ -377,6 +392,18 @@ _NSIG_TESTS = [
|
|||||||
'https://www.youtube.com/s/player/ef259203/player_ias_tce.vflset/en_US/base.js',
|
'https://www.youtube.com/s/player/ef259203/player_ias_tce.vflset/en_US/base.js',
|
||||||
'rPqBC01nJpqhhi2iA2U', 'hY7dbiKFT51UIA',
|
'rPqBC01nJpqhhi2iA2U', 'hY7dbiKFT51UIA',
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/010fbc8d/player_es5.vflset/en_US/base.js',
|
||||||
|
'0hlOAlqjFszVvF4Z', 'R-H23bZGAsRFTg',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/010fbc8d/player_es6.vflset/en_US/base.js',
|
||||||
|
'0hlOAlqjFszVvF4Z', 'R-H23bZGAsRFTg',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/5ec65609/player_ias_tcc.vflset/en_US/base.js',
|
||||||
|
'6l5CTNx4AzIqH4MXM', 'NupToduxHBew1g',
|
||||||
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -304,7 +304,6 @@ class YoutubeDL:
|
|||||||
clean_infojson: Remove internal metadata from the infojson
|
clean_infojson: Remove internal metadata from the infojson
|
||||||
getcomments: Extract video comments. This will not be written to disk
|
getcomments: Extract video comments. This will not be written to disk
|
||||||
unless writeinfojson is also given
|
unless writeinfojson is also given
|
||||||
writeannotations: Write the video annotations to a .annotations.xml file
|
|
||||||
writethumbnail: Write the thumbnail image to a file
|
writethumbnail: Write the thumbnail image to a file
|
||||||
allow_playlist_files: Whether to write playlists' description, infojson etc
|
allow_playlist_files: Whether to write playlists' description, infojson etc
|
||||||
also to disk when using the 'write*' options
|
also to disk when using the 'write*' options
|
||||||
@@ -511,11 +510,11 @@ class YoutubeDL:
|
|||||||
the downloader (see yt_dlp/downloader/common.py):
|
the downloader (see yt_dlp/downloader/common.py):
|
||||||
nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
|
nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
|
||||||
max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
|
max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
|
||||||
continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
|
continuedl, hls_use_mpegts, http_chunk_size, external_downloader_args,
|
||||||
external_downloader_args, concurrent_fragment_downloads, progress_delta.
|
concurrent_fragment_downloads, progress_delta.
|
||||||
|
|
||||||
The following options are used by the post processors:
|
The following options are used by the post processors:
|
||||||
ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
|
ffmpeg_location: Location of the ffmpeg binary; either the path
|
||||||
to the binary or its containing directory.
|
to the binary or its containing directory.
|
||||||
postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
|
postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
|
||||||
and a list of additional command-line arguments for the
|
and a list of additional command-line arguments for the
|
||||||
@@ -566,32 +565,14 @@ class YoutubeDL:
|
|||||||
allsubtitles: - Use subtitleslangs = ['all']
|
allsubtitles: - Use subtitleslangs = ['all']
|
||||||
Downloads all the subtitles of the video
|
Downloads all the subtitles of the video
|
||||||
(requires writesubtitles or writeautomaticsub)
|
(requires writesubtitles or writeautomaticsub)
|
||||||
include_ads: - Doesn't work
|
|
||||||
Download ads as well
|
|
||||||
call_home: - Not implemented
|
|
||||||
Boolean, true if we are allowed to contact the
|
|
||||||
yt-dlp servers for debugging.
|
|
||||||
post_hooks: - Register a custom postprocessor
|
post_hooks: - Register a custom postprocessor
|
||||||
A list of functions that get called as the final step
|
A list of functions that get called as the final step
|
||||||
for each video file, after all postprocessors have been
|
for each video file, after all postprocessors have been
|
||||||
called. The filename will be passed as the only argument.
|
called. The filename will be passed as the only argument.
|
||||||
hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
|
hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
|
||||||
Use the native HLS downloader instead of ffmpeg/avconv
|
Use the native HLS downloader instead of ffmpeg
|
||||||
if True, otherwise use ffmpeg/avconv if False, otherwise
|
if True, otherwise use ffmpeg if False, otherwise
|
||||||
use downloader suggested by extractor if None.
|
use downloader suggested by extractor if None.
|
||||||
prefer_ffmpeg: - avconv support is deprecated
|
|
||||||
If False, use avconv instead of ffmpeg if both are available,
|
|
||||||
otherwise prefer ffmpeg.
|
|
||||||
youtube_include_dash_manifest: - Use extractor_args
|
|
||||||
If True (default), DASH manifests and related
|
|
||||||
data will be downloaded and processed by extractor.
|
|
||||||
You can reduce network I/O by disabling it if you don't
|
|
||||||
care about DASH. (only for youtube)
|
|
||||||
youtube_include_hls_manifest: - Use extractor_args
|
|
||||||
If True (default), HLS manifests and related
|
|
||||||
data will be downloaded and processed by extractor.
|
|
||||||
You can reduce network I/O by disabling it if you don't
|
|
||||||
care about HLS. (only for youtube)
|
|
||||||
no_color: Same as `color='no_color'`
|
no_color: Same as `color='no_color'`
|
||||||
no_overwrites: Same as `overwrites=False`
|
no_overwrites: Same as `overwrites=False`
|
||||||
"""
|
"""
|
||||||
@@ -599,7 +580,7 @@ class YoutubeDL:
|
|||||||
_NUMERIC_FIELDS = {
|
_NUMERIC_FIELDS = {
|
||||||
'width', 'height', 'asr', 'audio_channels', 'fps',
|
'width', 'height', 'asr', 'audio_channels', 'fps',
|
||||||
'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
|
'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
|
||||||
'timestamp', 'release_timestamp',
|
'timestamp', 'release_timestamp', 'available_at',
|
||||||
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
|
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
|
||||||
'average_rating', 'comment_count', 'age_limit',
|
'average_rating', 'comment_count', 'age_limit',
|
||||||
'start_time', 'end_time',
|
'start_time', 'end_time',
|
||||||
@@ -609,7 +590,7 @@ class YoutubeDL:
|
|||||||
|
|
||||||
_format_fields = {
|
_format_fields = {
|
||||||
# NB: Keep in sync with the docstring of extractor/common.py
|
# NB: Keep in sync with the docstring of extractor/common.py
|
||||||
'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
|
'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note', 'available_at',
|
||||||
'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
|
'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
|
||||||
'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns', 'hls_media_playlist_data',
|
'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns', 'hls_media_playlist_data',
|
||||||
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data',
|
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data',
|
||||||
@@ -750,10 +731,6 @@ class YoutubeDL:
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
|
|
||||||
if self.params.get('geo_verification_proxy') is None:
|
|
||||||
self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
|
|
||||||
|
|
||||||
check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
|
check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
|
||||||
|
|
||||||
for msg in self.params.get('_warnings', []):
|
for msg in self.params.get('_warnings', []):
|
||||||
@@ -2717,11 +2694,7 @@ class YoutubeDL:
|
|||||||
('modified_timestamp', 'modified_date'),
|
('modified_timestamp', 'modified_date'),
|
||||||
):
|
):
|
||||||
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
|
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
|
||||||
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
|
info_dict[date_key] = strftime_or_none(info_dict[ts_key])
|
||||||
# see http://bugs.python.org/issue1646728)
|
|
||||||
with contextlib.suppress(ValueError, OverflowError, OSError):
|
|
||||||
upload_date = dt.datetime.fromtimestamp(info_dict[ts_key], dt.timezone.utc)
|
|
||||||
info_dict[date_key] = upload_date.strftime('%Y%m%d')
|
|
||||||
|
|
||||||
if not info_dict.get('release_year'):
|
if not info_dict.get('release_year'):
|
||||||
info_dict['release_year'] = traverse_obj(info_dict, ('release_date', {lambda x: int(x[:4])}))
|
info_dict['release_year'] = traverse_obj(info_dict, ('release_date', {lambda x: int(x[:4])}))
|
||||||
@@ -3339,28 +3312,6 @@ class YoutubeDL:
|
|||||||
elif _infojson_written is None:
|
elif _infojson_written is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Note: Annotations are deprecated
|
|
||||||
annofn = None
|
|
||||||
if self.params.get('writeannotations', False):
|
|
||||||
annofn = self.prepare_filename(info_dict, 'annotation')
|
|
||||||
if annofn:
|
|
||||||
if not self._ensure_dir_exists(annofn):
|
|
||||||
return
|
|
||||||
if not self.params.get('overwrites', True) and os.path.exists(annofn):
|
|
||||||
self.to_screen('[info] Video annotations are already present')
|
|
||||||
elif not info_dict.get('annotations'):
|
|
||||||
self.report_warning('There are no annotations to write.')
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
self.to_screen('[info] Writing video annotations to: ' + annofn)
|
|
||||||
with open(annofn, 'w', encoding='utf-8') as annofile:
|
|
||||||
annofile.write(info_dict['annotations'])
|
|
||||||
except (KeyError, TypeError):
|
|
||||||
self.report_warning('There are no annotations to write.')
|
|
||||||
except OSError:
|
|
||||||
self.report_error('Cannot write annotations file: ' + annofn)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Write internet shortcut files
|
# Write internet shortcut files
|
||||||
def _write_link_file(link_type):
|
def _write_link_file(link_type):
|
||||||
url = try_get(info_dict['webpage_url'], iri_to_uri)
|
url = try_get(info_dict['webpage_url'], iri_to_uri)
|
||||||
|
|||||||
@@ -59,11 +59,9 @@ from .utils import (
|
|||||||
render_table,
|
render_table,
|
||||||
setproctitle,
|
setproctitle,
|
||||||
shell_quote,
|
shell_quote,
|
||||||
traverse_obj,
|
|
||||||
variadic,
|
variadic,
|
||||||
write_string,
|
write_string,
|
||||||
)
|
)
|
||||||
from .utils.networking import std_headers
|
|
||||||
from .utils._utils import _UnsafeExtensionError
|
from .utils._utils import _UnsafeExtensionError
|
||||||
from .YoutubeDL import YoutubeDL
|
from .YoutubeDL import YoutubeDL
|
||||||
|
|
||||||
@@ -500,6 +498,14 @@ def validate_options(opts):
|
|||||||
'To let yt-dlp download and merge the best available formats, simply do not pass any format selection',
|
'To let yt-dlp download and merge the best available formats, simply do not pass any format selection',
|
||||||
'If you know what you are doing and want only the best pre-merged format, use "-f b" instead to suppress this warning')))
|
'If you know what you are doing and want only the best pre-merged format, use "-f b" instead to suppress this warning')))
|
||||||
|
|
||||||
|
# Common mistake: -f mp4
|
||||||
|
if opts.format == 'mp4':
|
||||||
|
warnings.append('.\n '.join((
|
||||||
|
'"-f mp4" selects the best pre-merged mp4 format which is often not what\'s intended',
|
||||||
|
'Pre-merged mp4 formats are not available from all sites, or may only be available in lower quality',
|
||||||
|
'To prioritize the best h264 video and aac audio in an mp4 container, use "-t mp4" instead',
|
||||||
|
'If you know what you are doing and want a pre-merged mp4 format, use "-f b[ext=mp4]" instead to suppress this warning')))
|
||||||
|
|
||||||
# --(postprocessor/downloader)-args without name
|
# --(postprocessor/downloader)-args without name
|
||||||
def report_args_compat(name, value, key1, key2=None, where=None):
|
def report_args_compat(name, value, key1, key2=None, where=None):
|
||||||
if key1 in value and key2 not in value:
|
if key1 in value and key2 not in value:
|
||||||
@@ -515,7 +521,6 @@ def validate_options(opts):
|
|||||||
|
|
||||||
if report_args_compat('post-processor', opts.postprocessor_args, 'default-compat', 'default'):
|
if report_args_compat('post-processor', opts.postprocessor_args, 'default-compat', 'default'):
|
||||||
opts.postprocessor_args['default'] = opts.postprocessor_args.pop('default-compat')
|
opts.postprocessor_args['default'] = opts.postprocessor_args.pop('default-compat')
|
||||||
opts.postprocessor_args.setdefault('sponskrub', [])
|
|
||||||
|
|
||||||
def report_conflict(arg1, opt1, arg2='--allow-unplayable-formats', opt2='allow_unplayable_formats',
|
def report_conflict(arg1, opt1, arg2='--allow-unplayable-formats', opt2='allow_unplayable_formats',
|
||||||
val1=NO_DEFAULT, val2=NO_DEFAULT, default=False):
|
val1=NO_DEFAULT, val2=NO_DEFAULT, default=False):
|
||||||
@@ -540,11 +545,6 @@ def validate_options(opts):
|
|||||||
'"--exec before_dl:"', 'exec_cmd', val2=opts.exec_cmd.get('before_dl'))
|
'"--exec before_dl:"', 'exec_cmd', val2=opts.exec_cmd.get('before_dl'))
|
||||||
report_conflict('--id', 'useid', '--output', 'outtmpl', val2=opts.outtmpl.get('default'))
|
report_conflict('--id', 'useid', '--output', 'outtmpl', val2=opts.outtmpl.get('default'))
|
||||||
report_conflict('--remux-video', 'remuxvideo', '--recode-video', 'recodevideo')
|
report_conflict('--remux-video', 'remuxvideo', '--recode-video', 'recodevideo')
|
||||||
report_conflict('--sponskrub', 'sponskrub', '--remove-chapters', 'remove_chapters')
|
|
||||||
report_conflict('--sponskrub', 'sponskrub', '--sponsorblock-mark', 'sponsorblock_mark')
|
|
||||||
report_conflict('--sponskrub', 'sponskrub', '--sponsorblock-remove', 'sponsorblock_remove')
|
|
||||||
report_conflict('--sponskrub-cut', 'sponskrub_cut', '--split-chapter', 'split_chapters',
|
|
||||||
val1=opts.sponskrub and opts.sponskrub_cut)
|
|
||||||
|
|
||||||
# Conflicts with --allow-unplayable-formats
|
# Conflicts with --allow-unplayable-formats
|
||||||
report_conflict('--embed-metadata', 'addmetadata')
|
report_conflict('--embed-metadata', 'addmetadata')
|
||||||
@@ -557,23 +557,15 @@ def validate_options(opts):
|
|||||||
report_conflict('--recode-video', 'recodevideo')
|
report_conflict('--recode-video', 'recodevideo')
|
||||||
report_conflict('--remove-chapters', 'remove_chapters', default=[])
|
report_conflict('--remove-chapters', 'remove_chapters', default=[])
|
||||||
report_conflict('--remux-video', 'remuxvideo')
|
report_conflict('--remux-video', 'remuxvideo')
|
||||||
report_conflict('--sponskrub', 'sponskrub')
|
|
||||||
report_conflict('--sponsorblock-remove', 'sponsorblock_remove', default=set())
|
report_conflict('--sponsorblock-remove', 'sponsorblock_remove', default=set())
|
||||||
report_conflict('--xattrs', 'xattrs')
|
report_conflict('--xattrs', 'xattrs')
|
||||||
|
|
||||||
# Fully deprecated options
|
if hasattr(opts, '_deprecated_options'):
|
||||||
def report_deprecation(val, old, new=None):
|
|
||||||
if not val:
|
|
||||||
return
|
|
||||||
deprecation_warnings.append(
|
deprecation_warnings.append(
|
||||||
f'{old} is deprecated and may be removed in a future version. Use {new} instead' if new
|
f'The following options have been deprecated: {", ".join(opts._deprecated_options)}\n'
|
||||||
else f'{old} is deprecated and may not work as expected')
|
'Please remove them from your command/configuration to avoid future errors.\n'
|
||||||
|
'See https://github.com/yt-dlp/yt-dlp/issues/14198 for more details')
|
||||||
report_deprecation(opts.sponskrub, '--sponskrub', '--sponsorblock-mark or --sponsorblock-remove')
|
del opts._deprecated_options
|
||||||
report_deprecation(not opts.prefer_ffmpeg, '--prefer-avconv', 'ffmpeg')
|
|
||||||
# report_deprecation(opts.include_ads, '--include-ads') # We may re-implement this in future
|
|
||||||
# report_deprecation(opts.call_home, '--call-home') # We may re-implement this in future
|
|
||||||
# report_deprecation(opts.writeannotations, '--write-annotations') # It's just that no website has it
|
|
||||||
|
|
||||||
# Dependent options
|
# Dependent options
|
||||||
opts.date = DateRange.day(opts.date) if opts.date else DateRange(opts.dateafter, opts.datebefore)
|
opts.date = DateRange.day(opts.date) if opts.date else DateRange(opts.dateafter, opts.datebefore)
|
||||||
@@ -704,21 +696,6 @@ def get_postprocessors(opts):
|
|||||||
'add_metadata': opts.addmetadata,
|
'add_metadata': opts.addmetadata,
|
||||||
'add_infojson': opts.embed_infojson,
|
'add_infojson': opts.embed_infojson,
|
||||||
}
|
}
|
||||||
# Deprecated
|
|
||||||
# This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment
|
|
||||||
# but must be below EmbedSubtitle and FFmpegMetadata
|
|
||||||
# See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29
|
|
||||||
# If opts.sponskrub is None, sponskrub is used, but it silently fails if the executable can't be found
|
|
||||||
if opts.sponskrub is not False:
|
|
||||||
yield {
|
|
||||||
'key': 'SponSkrub',
|
|
||||||
'path': opts.sponskrub_path,
|
|
||||||
'args': opts.sponskrub_args,
|
|
||||||
'cut': opts.sponskrub_cut,
|
|
||||||
'force': opts.sponskrub_force,
|
|
||||||
'ignoreerror': opts.sponskrub is None,
|
|
||||||
'_from_cli': True,
|
|
||||||
}
|
|
||||||
if opts.embedthumbnail:
|
if opts.embedthumbnail:
|
||||||
yield {
|
yield {
|
||||||
'key': 'EmbedThumbnail',
|
'key': 'EmbedThumbnail',
|
||||||
@@ -877,7 +854,6 @@ def parse_options(argv=None):
|
|||||||
'nopart': opts.nopart,
|
'nopart': opts.nopart,
|
||||||
'updatetime': opts.updatetime,
|
'updatetime': opts.updatetime,
|
||||||
'writedescription': opts.writedescription,
|
'writedescription': opts.writedescription,
|
||||||
'writeannotations': opts.writeannotations,
|
|
||||||
'writeinfojson': opts.writeinfojson,
|
'writeinfojson': opts.writeinfojson,
|
||||||
'allow_playlist_files': opts.allow_playlist_files,
|
'allow_playlist_files': opts.allow_playlist_files,
|
||||||
'clean_infojson': opts.clean_infojson,
|
'clean_infojson': opts.clean_infojson,
|
||||||
@@ -911,7 +887,6 @@ def parse_options(argv=None):
|
|||||||
'max_views': opts.max_views,
|
'max_views': opts.max_views,
|
||||||
'daterange': opts.date,
|
'daterange': opts.date,
|
||||||
'cachedir': opts.cachedir,
|
'cachedir': opts.cachedir,
|
||||||
'youtube_print_sig_code': opts.youtube_print_sig_code,
|
|
||||||
'age_limit': opts.age_limit,
|
'age_limit': opts.age_limit,
|
||||||
'download_archive': opts.download_archive,
|
'download_archive': opts.download_archive,
|
||||||
'break_on_existing': opts.break_on_existing,
|
'break_on_existing': opts.break_on_existing,
|
||||||
@@ -929,13 +904,9 @@ def parse_options(argv=None):
|
|||||||
'socket_timeout': opts.socket_timeout,
|
'socket_timeout': opts.socket_timeout,
|
||||||
'bidi_workaround': opts.bidi_workaround,
|
'bidi_workaround': opts.bidi_workaround,
|
||||||
'debug_printtraffic': opts.debug_printtraffic,
|
'debug_printtraffic': opts.debug_printtraffic,
|
||||||
'prefer_ffmpeg': opts.prefer_ffmpeg,
|
|
||||||
'include_ads': opts.include_ads,
|
|
||||||
'default_search': opts.default_search,
|
'default_search': opts.default_search,
|
||||||
'dynamic_mpd': opts.dynamic_mpd,
|
'dynamic_mpd': opts.dynamic_mpd,
|
||||||
'extractor_args': opts.extractor_args,
|
'extractor_args': opts.extractor_args,
|
||||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
|
||||||
'youtube_include_hls_manifest': opts.youtube_include_hls_manifest,
|
|
||||||
'encoding': opts.encoding,
|
'encoding': opts.encoding,
|
||||||
'extract_flat': opts.extract_flat,
|
'extract_flat': opts.extract_flat,
|
||||||
'live_from_start': opts.live_from_start,
|
'live_from_start': opts.live_from_start,
|
||||||
@@ -947,7 +918,6 @@ def parse_options(argv=None):
|
|||||||
'fixup': opts.fixup,
|
'fixup': opts.fixup,
|
||||||
'source_address': opts.source_address,
|
'source_address': opts.source_address,
|
||||||
'impersonate': opts.impersonate,
|
'impersonate': opts.impersonate,
|
||||||
'call_home': opts.call_home,
|
|
||||||
'sleep_interval_requests': opts.sleep_interval_requests,
|
'sleep_interval_requests': opts.sleep_interval_requests,
|
||||||
'sleep_interval': opts.sleep_interval,
|
'sleep_interval': opts.sleep_interval,
|
||||||
'max_sleep_interval': opts.max_sleep_interval,
|
'max_sleep_interval': opts.max_sleep_interval,
|
||||||
@@ -957,7 +927,6 @@ def parse_options(argv=None):
|
|||||||
'force_keyframes_at_cuts': opts.force_keyframes_at_cuts,
|
'force_keyframes_at_cuts': opts.force_keyframes_at_cuts,
|
||||||
'list_thumbnails': opts.list_thumbnails,
|
'list_thumbnails': opts.list_thumbnails,
|
||||||
'playlist_items': opts.playlist_items,
|
'playlist_items': opts.playlist_items,
|
||||||
'xattr_set_filesize': opts.xattr_set_filesize,
|
|
||||||
'match_filter': opts.match_filter,
|
'match_filter': opts.match_filter,
|
||||||
'color': opts.color,
|
'color': opts.color,
|
||||||
'ffmpeg_location': opts.ffmpeg_location,
|
'ffmpeg_location': opts.ffmpeg_location,
|
||||||
@@ -966,11 +935,11 @@ def parse_options(argv=None):
|
|||||||
'hls_split_discontinuity': opts.hls_split_discontinuity,
|
'hls_split_discontinuity': opts.hls_split_discontinuity,
|
||||||
'external_downloader_args': opts.external_downloader_args,
|
'external_downloader_args': opts.external_downloader_args,
|
||||||
'postprocessor_args': opts.postprocessor_args,
|
'postprocessor_args': opts.postprocessor_args,
|
||||||
'cn_verification_proxy': opts.cn_verification_proxy,
|
|
||||||
'geo_verification_proxy': opts.geo_verification_proxy,
|
'geo_verification_proxy': opts.geo_verification_proxy,
|
||||||
'geo_bypass': opts.geo_bypass,
|
'geo_bypass': opts.geo_bypass,
|
||||||
'geo_bypass_country': opts.geo_bypass_country,
|
'geo_bypass_country': opts.geo_bypass_country,
|
||||||
'geo_bypass_ip_block': opts.geo_bypass_ip_block,
|
'geo_bypass_ip_block': opts.geo_bypass_ip_block,
|
||||||
|
'useid': opts.useid or None,
|
||||||
'warn_when_outdated': opts.update_self is None,
|
'warn_when_outdated': opts.update_self is None,
|
||||||
'_warnings': warnings,
|
'_warnings': warnings,
|
||||||
'_deprecation_warnings': deprecation_warnings,
|
'_deprecation_warnings': deprecation_warnings,
|
||||||
@@ -983,12 +952,6 @@ def _real_main(argv=None):
|
|||||||
|
|
||||||
parser, opts, all_urls, ydl_opts = parse_options(argv)
|
parser, opts, all_urls, ydl_opts = parse_options(argv)
|
||||||
|
|
||||||
# Dump user agent
|
|
||||||
if opts.dump_user_agent:
|
|
||||||
ua = traverse_obj(opts.headers, 'User-Agent', casesense=False, default=std_headers['User-Agent'])
|
|
||||||
write_string(f'{ua}\n', out=sys.stdout)
|
|
||||||
return
|
|
||||||
|
|
||||||
if print_extractor_information(opts, all_urls):
|
if print_extractor_information(opts, all_urls):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import datetime as dt
|
||||||
import os
|
import os
|
||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
@@ -27,6 +28,13 @@ def compat_ord(c):
|
|||||||
return c if isinstance(c, int) else ord(c)
|
return c if isinstance(c, int) else ord(c)
|
||||||
|
|
||||||
|
|
||||||
|
def compat_datetime_from_timestamp(timestamp):
|
||||||
|
# Calling dt.datetime.fromtimestamp with negative timestamps throws error in Windows
|
||||||
|
# Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/81708,
|
||||||
|
# https://github.com/yt-dlp/yt-dlp/issues/6706#issuecomment-1496842642
|
||||||
|
return (dt.datetime.fromtimestamp(0, dt.timezone.utc) + dt.timedelta(seconds=timestamp))
|
||||||
|
|
||||||
|
|
||||||
# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl
|
# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl
|
||||||
# See https://github.com/yt-dlp/yt-dlp/issues/792
|
# See https://github.com/yt-dlp/yt-dlp/issues/792
|
||||||
# https://docs.python.org/3/library/os.path.html#os.path.expanduser
|
# https://docs.python.org/3/library/os.path.html#os.path.expanduser
|
||||||
|
|||||||
@@ -125,6 +125,8 @@ def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(),
|
|||||||
|
|
||||||
|
|
||||||
def _extract_firefox_cookies(profile, container, logger):
|
def _extract_firefox_cookies(profile, container, logger):
|
||||||
|
MAX_SUPPORTED_DB_SCHEMA_VERSION = 16
|
||||||
|
|
||||||
logger.info('Extracting cookies from firefox')
|
logger.info('Extracting cookies from firefox')
|
||||||
if not sqlite3:
|
if not sqlite3:
|
||||||
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
||||||
@@ -159,9 +161,11 @@ def _extract_firefox_cookies(profile, container, logger):
|
|||||||
raise ValueError(f'could not find firefox container "{container}" in containers.json')
|
raise ValueError(f'could not find firefox container "{container}" in containers.json')
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
||||||
cursor = None
|
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
||||||
try:
|
with contextlib.closing(cursor.connection):
|
||||||
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
db_schema_version = cursor.execute('PRAGMA user_version;').fetchone()[0]
|
||||||
|
if db_schema_version > MAX_SUPPORTED_DB_SCHEMA_VERSION:
|
||||||
|
logger.warning(f'Possibly unsupported firefox cookies database version: {db_schema_version}')
|
||||||
if isinstance(container_id, int):
|
if isinstance(container_id, int):
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f'Only loading cookies from firefox container "{container}", ID {container_id}')
|
f'Only loading cookies from firefox container "{container}", ID {container_id}')
|
||||||
@@ -180,6 +184,10 @@ def _extract_firefox_cookies(profile, container, logger):
|
|||||||
total_cookie_count = len(table)
|
total_cookie_count = len(table)
|
||||||
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
|
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
|
||||||
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
|
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
|
||||||
|
# FF142 upgraded cookies DB to schema version 16 and started using milliseconds for cookie expiry
|
||||||
|
# Ref: https://github.com/mozilla-firefox/firefox/commit/5869af852cd20425165837f6c2d9971f3efba83d
|
||||||
|
if db_schema_version >= 16 and expiry is not None:
|
||||||
|
expiry /= 1000
|
||||||
cookie = http.cookiejar.Cookie(
|
cookie = http.cookiejar.Cookie(
|
||||||
version=0, name=name, value=value, port=None, port_specified=False,
|
version=0, name=name, value=value, port=None, port_specified=False,
|
||||||
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
|
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
|
||||||
@@ -188,9 +196,6 @@ def _extract_firefox_cookies(profile, container, logger):
|
|||||||
jar.set_cookie(cookie)
|
jar.set_cookie(cookie)
|
||||||
logger.info(f'Extracted {len(jar)} cookies from firefox')
|
logger.info(f'Extracted {len(jar)} cookies from firefox')
|
||||||
return jar
|
return jar
|
||||||
finally:
|
|
||||||
if cursor is not None:
|
|
||||||
cursor.connection.close()
|
|
||||||
|
|
||||||
|
|
||||||
def _firefox_browser_dirs():
|
def _firefox_browser_dirs():
|
||||||
|
|||||||
@@ -62,7 +62,6 @@ class FileDownloader:
|
|||||||
test: Download only first bytes to test the downloader.
|
test: Download only first bytes to test the downloader.
|
||||||
min_filesize: Skip files smaller than this size
|
min_filesize: Skip files smaller than this size
|
||||||
max_filesize: Skip files larger than this size
|
max_filesize: Skip files larger than this size
|
||||||
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
|
||||||
progress_delta: The minimum time between progress output, in seconds
|
progress_delta: The minimum time between progress output, in seconds
|
||||||
external_downloader_args: A dictionary of downloader keys (in lower case)
|
external_downloader_args: A dictionary of downloader keys (in lower case)
|
||||||
and a list of additional command-line arguments for the
|
and a list of additional command-line arguments for the
|
||||||
@@ -455,14 +454,26 @@ class FileDownloader:
|
|||||||
self._finish_multiline_status()
|
self._finish_multiline_status()
|
||||||
return True, False
|
return True, False
|
||||||
|
|
||||||
|
sleep_note = ''
|
||||||
if subtitle:
|
if subtitle:
|
||||||
sleep_interval = self.params.get('sleep_interval_subtitles') or 0
|
sleep_interval = self.params.get('sleep_interval_subtitles') or 0
|
||||||
else:
|
else:
|
||||||
min_sleep_interval = self.params.get('sleep_interval') or 0
|
min_sleep_interval = self.params.get('sleep_interval') or 0
|
||||||
|
max_sleep_interval = self.params.get('max_sleep_interval') or 0
|
||||||
|
|
||||||
|
if available_at := info_dict.get('available_at'):
|
||||||
|
forced_sleep_interval = available_at - int(time.time())
|
||||||
|
if forced_sleep_interval > min_sleep_interval:
|
||||||
|
sleep_note = 'as required by the site'
|
||||||
|
min_sleep_interval = forced_sleep_interval
|
||||||
|
if forced_sleep_interval > max_sleep_interval:
|
||||||
|
max_sleep_interval = forced_sleep_interval
|
||||||
|
|
||||||
sleep_interval = random.uniform(
|
sleep_interval = random.uniform(
|
||||||
min_sleep_interval, self.params.get('max_sleep_interval') or min_sleep_interval)
|
min_sleep_interval, max_sleep_interval or min_sleep_interval)
|
||||||
|
|
||||||
if sleep_interval > 0:
|
if sleep_interval > 0:
|
||||||
self.to_screen(f'[download] Sleeping {sleep_interval:.2f} seconds ...')
|
self.to_screen(f'[download] Sleeping {sleep_interval:.2f} seconds {sleep_note}...')
|
||||||
time.sleep(sleep_interval)
|
time.sleep(sleep_interval)
|
||||||
|
|
||||||
ret = self.real_download(filename, info_dict)
|
ret = self.real_download(filename, info_dict)
|
||||||
|
|||||||
@@ -563,7 +563,7 @@ class FFmpegFD(ExternalFD):
|
|||||||
f'{cookie.name}={cookie.value}; path={cookie.path}; domain={cookie.domain};\r\n'
|
f'{cookie.name}={cookie.value}; path={cookie.path}; domain={cookie.domain};\r\n'
|
||||||
for cookie in cookies)])
|
for cookie in cookies)])
|
||||||
if fmt.get('http_headers') and is_http:
|
if fmt.get('http_headers') and is_http:
|
||||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg:
|
||||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||||
args.extend(['-headers', ''.join(f'{key}: {val}\r\n' for key, val in fmt['http_headers'].items())])
|
args.extend(['-headers', ''.join(f'{key}: {val}\r\n' for key, val in fmt['http_headers'].items())])
|
||||||
|
|
||||||
@@ -654,10 +654,6 @@ class FFmpegFD(ExternalFD):
|
|||||||
return retval
|
return retval
|
||||||
|
|
||||||
|
|
||||||
class AVconvFD(FFmpegFD):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
_BY_NAME = {
|
_BY_NAME = {
|
||||||
klass.get_basename(): klass
|
klass.get_basename(): klass
|
||||||
for name, klass in globals().items()
|
for name, klass in globals().items()
|
||||||
|
|||||||
@@ -149,14 +149,14 @@ class FlvReader(io.BytesIO):
|
|||||||
segments_count = self.read_unsigned_char()
|
segments_count = self.read_unsigned_char()
|
||||||
segments = []
|
segments = []
|
||||||
for _ in range(segments_count):
|
for _ in range(segments_count):
|
||||||
box_size, box_type, box_data = self.read_box_info()
|
_box_size, box_type, box_data = self.read_box_info()
|
||||||
assert box_type == b'asrt'
|
assert box_type == b'asrt'
|
||||||
segment = FlvReader(box_data).read_asrt()
|
segment = FlvReader(box_data).read_asrt()
|
||||||
segments.append(segment)
|
segments.append(segment)
|
||||||
fragments_run_count = self.read_unsigned_char()
|
fragments_run_count = self.read_unsigned_char()
|
||||||
fragments = []
|
fragments = []
|
||||||
for _ in range(fragments_run_count):
|
for _ in range(fragments_run_count):
|
||||||
box_size, box_type, box_data = self.read_box_info()
|
_box_size, box_type, box_data = self.read_box_info()
|
||||||
assert box_type == b'afrt'
|
assert box_type == b'afrt'
|
||||||
fragments.append(FlvReader(box_data).read_afrt())
|
fragments.append(FlvReader(box_data).read_afrt())
|
||||||
|
|
||||||
@@ -167,7 +167,7 @@ class FlvReader(io.BytesIO):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def read_bootstrap_info(self):
|
def read_bootstrap_info(self):
|
||||||
total_size, box_type, box_data = self.read_box_info()
|
_, box_type, box_data = self.read_box_info()
|
||||||
assert box_type == b'abst'
|
assert box_type == b'abst'
|
||||||
return FlvReader(box_data).read_abst()
|
return FlvReader(box_data).read_abst()
|
||||||
|
|
||||||
@@ -324,9 +324,9 @@ class F4mFD(FragmentFD):
|
|||||||
if requested_bitrate is None or len(formats) == 1:
|
if requested_bitrate is None or len(formats) == 1:
|
||||||
# get the best format
|
# get the best format
|
||||||
formats = sorted(formats, key=lambda f: f[0])
|
formats = sorted(formats, key=lambda f: f[0])
|
||||||
rate, media = formats[-1]
|
_, media = formats[-1]
|
||||||
else:
|
else:
|
||||||
rate, media = next(filter(
|
_, media = next(filter(
|
||||||
lambda f: int(f[0]) == requested_bitrate, formats))
|
lambda f: int(f[0]) == requested_bitrate, formats))
|
||||||
|
|
||||||
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
||||||
|
|||||||
@@ -13,12 +13,9 @@ from ..utils import (
|
|||||||
ContentTooShortError,
|
ContentTooShortError,
|
||||||
RetryManager,
|
RetryManager,
|
||||||
ThrottledDownload,
|
ThrottledDownload,
|
||||||
XAttrMetadataError,
|
|
||||||
XAttrUnavailableError,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_http_range,
|
parse_http_range,
|
||||||
try_call,
|
try_call,
|
||||||
write_xattr,
|
|
||||||
)
|
)
|
||||||
from ..utils.networking import HTTPHeaderDict
|
from ..utils.networking import HTTPHeaderDict
|
||||||
|
|
||||||
@@ -273,12 +270,6 @@ class HttpFD(FileDownloader):
|
|||||||
self.report_error(f'unable to open for writing: {err}')
|
self.report_error(f'unable to open for writing: {err}')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self.params.get('xattr_set_filesize', False) and data_len is not None:
|
|
||||||
try:
|
|
||||||
write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode())
|
|
||||||
except (XAttrUnavailableError, XAttrMetadataError) as err:
|
|
||||||
self.report_error(f'unable to set filesize xattr: {err}')
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ctx.stream.write(data_block)
|
ctx.stream.write(data_block)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
|
|||||||
@@ -58,13 +58,7 @@ from .adn import (
|
|||||||
ADNSeasonIE,
|
ADNSeasonIE,
|
||||||
)
|
)
|
||||||
from .adobeconnect import AdobeConnectIE
|
from .adobeconnect import AdobeConnectIE
|
||||||
from .adobetv import (
|
from .adobetv import AdobeTVVideoIE
|
||||||
AdobeTVChannelIE,
|
|
||||||
AdobeTVEmbedIE,
|
|
||||||
AdobeTVIE,
|
|
||||||
AdobeTVShowIE,
|
|
||||||
AdobeTVVideoIE,
|
|
||||||
)
|
|
||||||
from .adultswim import AdultSwimIE
|
from .adultswim import AdultSwimIE
|
||||||
from .aenetworks import (
|
from .aenetworks import (
|
||||||
AENetworksCollectionIE,
|
AENetworksCollectionIE,
|
||||||
@@ -152,7 +146,6 @@ from .ard import (
|
|||||||
ARDBetaMediathekIE,
|
ARDBetaMediathekIE,
|
||||||
ARDMediathekCollectionIE,
|
ARDMediathekCollectionIE,
|
||||||
)
|
)
|
||||||
from .arkena import ArkenaIE
|
|
||||||
from .arnes import ArnesIE
|
from .arnes import ArnesIE
|
||||||
from .art19 import (
|
from .art19 import (
|
||||||
Art19IE,
|
Art19IE,
|
||||||
@@ -405,16 +398,12 @@ from .cloudflarestream import CloudflareStreamIE
|
|||||||
from .cloudycdn import CloudyCDNIE
|
from .cloudycdn import CloudyCDNIE
|
||||||
from .clubic import ClubicIE
|
from .clubic import ClubicIE
|
||||||
from .clyp import ClypIE
|
from .clyp import ClypIE
|
||||||
from .cmt import CMTIE
|
|
||||||
from .cnbc import CNBCVideoIE
|
from .cnbc import CNBCVideoIE
|
||||||
from .cnn import (
|
from .cnn import (
|
||||||
CNNIE,
|
CNNIE,
|
||||||
CNNIndonesiaIE,
|
CNNIndonesiaIE,
|
||||||
)
|
)
|
||||||
from .comedycentral import (
|
from .comedycentral import ComedyCentralIE
|
||||||
ComedyCentralIE,
|
|
||||||
ComedyCentralTVIE,
|
|
||||||
)
|
|
||||||
from .commonmistakes import (
|
from .commonmistakes import (
|
||||||
BlobIE,
|
BlobIE,
|
||||||
CommonMistakesIE,
|
CommonMistakesIE,
|
||||||
@@ -435,7 +424,6 @@ from .cpac import (
|
|||||||
CPACPlaylistIE,
|
CPACPlaylistIE,
|
||||||
)
|
)
|
||||||
from .cracked import CrackedIE
|
from .cracked import CrackedIE
|
||||||
from .crackle import CrackleIE
|
|
||||||
from .craftsy import CraftsyIE
|
from .craftsy import CraftsyIE
|
||||||
from .crooksandliars import CrooksAndLiarsIE
|
from .crooksandliars import CrooksAndLiarsIE
|
||||||
from .crowdbunker import (
|
from .crowdbunker import (
|
||||||
@@ -455,10 +443,6 @@ from .curiositystream import (
|
|||||||
CuriosityStreamIE,
|
CuriosityStreamIE,
|
||||||
CuriosityStreamSeriesIE,
|
CuriosityStreamSeriesIE,
|
||||||
)
|
)
|
||||||
from .cwtv import (
|
|
||||||
CWTVIE,
|
|
||||||
CWTVMovieIE,
|
|
||||||
)
|
|
||||||
from .cybrary import (
|
from .cybrary import (
|
||||||
CybraryCourseIE,
|
CybraryCourseIE,
|
||||||
CybraryIE,
|
CybraryIE,
|
||||||
@@ -636,7 +620,10 @@ from .fancode import (
|
|||||||
FancodeVodIE,
|
FancodeVodIE,
|
||||||
)
|
)
|
||||||
from .fathom import FathomIE
|
from .fathom import FathomIE
|
||||||
from .faulio import FaulioLiveIE
|
from .faulio import (
|
||||||
|
FaulioIE,
|
||||||
|
FaulioLiveIE,
|
||||||
|
)
|
||||||
from .faz import FazIE
|
from .faz import FazIE
|
||||||
from .fc2 import (
|
from .fc2 import (
|
||||||
FC2IE,
|
FC2IE,
|
||||||
@@ -1149,7 +1136,6 @@ from .mit import (
|
|||||||
OCWMITIE,
|
OCWMITIE,
|
||||||
TechTVMITIE,
|
TechTVMITIE,
|
||||||
)
|
)
|
||||||
from .mitele import MiTeleIE
|
|
||||||
from .mixch import (
|
from .mixch import (
|
||||||
MixchArchiveIE,
|
MixchArchiveIE,
|
||||||
MixchIE,
|
MixchIE,
|
||||||
@@ -1187,15 +1173,7 @@ from .moview import MoviewPlayIE
|
|||||||
from .moviezine import MoviezineIE
|
from .moviezine import MoviezineIE
|
||||||
from .movingimage import MovingImageIE
|
from .movingimage import MovingImageIE
|
||||||
from .msn import MSNIE
|
from .msn import MSNIE
|
||||||
from .mtv import (
|
from .mtv import MTVIE
|
||||||
MTVDEIE,
|
|
||||||
MTVIE,
|
|
||||||
MTVItaliaIE,
|
|
||||||
MTVItaliaProgrammaIE,
|
|
||||||
MTVJapanIE,
|
|
||||||
MTVServicesEmbeddedIE,
|
|
||||||
MTVVideoIE,
|
|
||||||
)
|
|
||||||
from .muenchentv import MuenchenTVIE
|
from .muenchentv import MuenchenTVIE
|
||||||
from .murrtube import (
|
from .murrtube import (
|
||||||
MurrtubeIE,
|
MurrtubeIE,
|
||||||
@@ -1337,12 +1315,7 @@ from .nhk import (
|
|||||||
NhkVodProgramIE,
|
NhkVodProgramIE,
|
||||||
)
|
)
|
||||||
from .nhl import NHLIE
|
from .nhl import NHLIE
|
||||||
from .nick import (
|
from .nick import NickIE
|
||||||
NickBrIE,
|
|
||||||
NickDeIE,
|
|
||||||
NickIE,
|
|
||||||
NickRuIE,
|
|
||||||
)
|
|
||||||
from .niconico import (
|
from .niconico import (
|
||||||
NiconicoHistoryIE,
|
NiconicoHistoryIE,
|
||||||
NiconicoIE,
|
NiconicoIE,
|
||||||
@@ -1454,6 +1427,7 @@ from .onet import (
|
|||||||
OnetPlIE,
|
OnetPlIE,
|
||||||
)
|
)
|
||||||
from .onionstudios import OnionStudiosIE
|
from .onionstudios import OnionStudiosIE
|
||||||
|
from .onsen import OnsenIE
|
||||||
from .opencast import (
|
from .opencast import (
|
||||||
OpencastIE,
|
OpencastIE,
|
||||||
OpencastPlaylistIE,
|
OpencastPlaylistIE,
|
||||||
@@ -1487,10 +1461,6 @@ from .panopto import (
|
|||||||
PanoptoListIE,
|
PanoptoListIE,
|
||||||
PanoptoPlaylistIE,
|
PanoptoPlaylistIE,
|
||||||
)
|
)
|
||||||
from .paramountplus import (
|
|
||||||
ParamountPlusIE,
|
|
||||||
ParamountPlusSeriesIE,
|
|
||||||
)
|
|
||||||
from .parler import ParlerIE
|
from .parler import ParlerIE
|
||||||
from .parlview import ParlviewIE
|
from .parlview import ParlviewIE
|
||||||
from .parti import (
|
from .parti import (
|
||||||
@@ -1544,11 +1514,6 @@ from .piramidetv import (
|
|||||||
PiramideTVChannelIE,
|
PiramideTVChannelIE,
|
||||||
PiramideTVIE,
|
PiramideTVIE,
|
||||||
)
|
)
|
||||||
from .pixivsketch import (
|
|
||||||
PixivSketchIE,
|
|
||||||
PixivSketchUserIE,
|
|
||||||
)
|
|
||||||
from .pladform import PladformIE
|
|
||||||
from .planetmarathi import PlanetMarathiIE
|
from .planetmarathi import PlanetMarathiIE
|
||||||
from .platzi import (
|
from .platzi import (
|
||||||
PlatziCourseIE,
|
PlatziCourseIE,
|
||||||
@@ -1805,7 +1770,6 @@ from .rutube import (
|
|||||||
RutubePlaylistIE,
|
RutubePlaylistIE,
|
||||||
RutubeTagsIE,
|
RutubeTagsIE,
|
||||||
)
|
)
|
||||||
from .rutv import RUTVIE
|
|
||||||
from .ruutu import RuutuIE
|
from .ruutu import RuutuIE
|
||||||
from .ruv import (
|
from .ruv import (
|
||||||
RuvIE,
|
RuvIE,
|
||||||
@@ -1875,7 +1839,6 @@ from .simplecast import (
|
|||||||
SimplecastPodcastIE,
|
SimplecastPodcastIE,
|
||||||
)
|
)
|
||||||
from .sina import SinaIE
|
from .sina import SinaIE
|
||||||
from .sixplay import SixPlayIE
|
|
||||||
from .skeb import SkebIE
|
from .skeb import SkebIE
|
||||||
from .sky import (
|
from .sky import (
|
||||||
SkyNewsIE,
|
SkyNewsIE,
|
||||||
@@ -1903,7 +1866,12 @@ from .skynewsau import SkyNewsAUIE
|
|||||||
from .slideshare import SlideshareIE
|
from .slideshare import SlideshareIE
|
||||||
from .slideslive import SlidesLiveIE
|
from .slideslive import SlidesLiveIE
|
||||||
from .slutload import SlutloadIE
|
from .slutload import SlutloadIE
|
||||||
from .smotrim import SmotrimIE
|
from .smotrim import (
|
||||||
|
SmotrimAudioIE,
|
||||||
|
SmotrimIE,
|
||||||
|
SmotrimLiveIE,
|
||||||
|
SmotrimPlaylistIE,
|
||||||
|
)
|
||||||
from .snapchat import SnapchatSpotlightIE
|
from .snapchat import SnapchatSpotlightIE
|
||||||
from .snotr import SnotrIE
|
from .snotr import SnotrIE
|
||||||
from .softwhiteunderbelly import SoftWhiteUnderbellyIE
|
from .softwhiteunderbelly import SoftWhiteUnderbellyIE
|
||||||
@@ -1931,12 +1899,13 @@ from .soundgasm import (
|
|||||||
SoundgasmProfileIE,
|
SoundgasmProfileIE,
|
||||||
)
|
)
|
||||||
from .southpark import (
|
from .southpark import (
|
||||||
|
SouthParkComBrIE,
|
||||||
|
SouthParkCoUkIE,
|
||||||
SouthParkDeIE,
|
SouthParkDeIE,
|
||||||
SouthParkDkIE,
|
SouthParkDkIE,
|
||||||
SouthParkEsIE,
|
SouthParkEsIE,
|
||||||
SouthParkIE,
|
SouthParkIE,
|
||||||
SouthParkLatIE,
|
SouthParkLatIE,
|
||||||
SouthParkNlIE,
|
|
||||||
)
|
)
|
||||||
from .sovietscloset import (
|
from .sovietscloset import (
|
||||||
SovietsClosetIE,
|
SovietsClosetIE,
|
||||||
@@ -1947,17 +1916,9 @@ from .spankbang import (
|
|||||||
SpankBangPlaylistIE,
|
SpankBangPlaylistIE,
|
||||||
)
|
)
|
||||||
from .spiegel import SpiegelIE
|
from .spiegel import SpiegelIE
|
||||||
from .spike import (
|
|
||||||
BellatorIE,
|
|
||||||
ParamountNetworkIE,
|
|
||||||
)
|
|
||||||
from .sport5 import Sport5IE
|
from .sport5 import Sport5IE
|
||||||
from .sportbox import SportBoxIE
|
from .sportbox import SportBoxIE
|
||||||
from .sportdeutschland import SportDeutschlandIE
|
from .sportdeutschland import SportDeutschlandIE
|
||||||
from .spotify import (
|
|
||||||
SpotifyIE,
|
|
||||||
SpotifyShowIE,
|
|
||||||
)
|
|
||||||
from .spreaker import (
|
from .spreaker import (
|
||||||
SpreakerIE,
|
SpreakerIE,
|
||||||
SpreakerShowIE,
|
SpreakerShowIE,
|
||||||
@@ -1984,6 +1945,7 @@ from .startrek import StarTrekIE
|
|||||||
from .startv import StarTVIE
|
from .startv import StarTVIE
|
||||||
from .steam import (
|
from .steam import (
|
||||||
SteamCommunityBroadcastIE,
|
SteamCommunityBroadcastIE,
|
||||||
|
SteamCommunityIE,
|
||||||
SteamIE,
|
SteamIE,
|
||||||
)
|
)
|
||||||
from .stitcher import (
|
from .stitcher import (
|
||||||
@@ -2177,6 +2139,7 @@ from .tubitv import (
|
|||||||
)
|
)
|
||||||
from .tumblr import TumblrIE
|
from .tumblr import TumblrIE
|
||||||
from .tunein import (
|
from .tunein import (
|
||||||
|
TuneInEmbedIE,
|
||||||
TuneInPodcastEpisodeIE,
|
TuneInPodcastEpisodeIE,
|
||||||
TuneInPodcastIE,
|
TuneInPodcastIE,
|
||||||
TuneInShortenerIE,
|
TuneInShortenerIE,
|
||||||
@@ -2215,7 +2178,6 @@ from .tvc import (
|
|||||||
from .tver import TVerIE
|
from .tver import TVerIE
|
||||||
from .tvigle import TvigleIE
|
from .tvigle import TvigleIE
|
||||||
from .tviplayer import TVIPlayerIE
|
from .tviplayer import TVIPlayerIE
|
||||||
from .tvland import TVLandIE
|
|
||||||
from .tvn24 import TVN24IE
|
from .tvn24 import TVN24IE
|
||||||
from .tvnoe import TVNoeIE
|
from .tvnoe import TVNoeIE
|
||||||
from .tvopengr import (
|
from .tvopengr import (
|
||||||
@@ -2312,7 +2274,6 @@ from .utreon import UtreonIE
|
|||||||
from .varzesh3 import Varzesh3IE
|
from .varzesh3 import Varzesh3IE
|
||||||
from .vbox7 import Vbox7IE
|
from .vbox7 import Vbox7IE
|
||||||
from .veo import VeoIE
|
from .veo import VeoIE
|
||||||
from .vesti import VestiIE
|
|
||||||
from .vevo import (
|
from .vevo import (
|
||||||
VevoIE,
|
VevoIE,
|
||||||
VevoPlaylistIE,
|
VevoPlaylistIE,
|
||||||
@@ -2501,7 +2462,6 @@ from .wykop import (
|
|||||||
WykopPostCommentIE,
|
WykopPostCommentIE,
|
||||||
WykopPostIE,
|
WykopPostIE,
|
||||||
)
|
)
|
||||||
from .xanimu import XanimuIE
|
|
||||||
from .xboxclips import XboxClipsIE
|
from .xboxclips import XboxClipsIE
|
||||||
from .xhamster import (
|
from .xhamster import (
|
||||||
XHamsterEmbedIE,
|
XHamsterEmbedIE,
|
||||||
|
|||||||
@@ -1,297 +1,100 @@
|
|||||||
import functools
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ISO639Utils,
|
ISO639Utils,
|
||||||
OnDemandPagedList,
|
clean_html,
|
||||||
|
determine_ext,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
parse_duration,
|
url_or_none,
|
||||||
str_or_none,
|
|
||||||
str_to_int,
|
|
||||||
unified_strdate,
|
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class AdobeTVBaseIE(InfoExtractor):
|
class AdobeTVVideoIE(InfoExtractor):
|
||||||
def _call_api(self, path, video_id, query, note=None):
|
|
||||||
return self._download_json(
|
|
||||||
'http://tv.adobe.com/api/v4/' + path,
|
|
||||||
video_id, note, query=query)['data']
|
|
||||||
|
|
||||||
def _parse_subtitles(self, video_data, url_key):
|
|
||||||
subtitles = {}
|
|
||||||
for translation in video_data.get('translations', []):
|
|
||||||
vtt_path = translation.get(url_key)
|
|
||||||
if not vtt_path:
|
|
||||||
continue
|
|
||||||
lang = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium'])
|
|
||||||
subtitles.setdefault(lang, []).append({
|
|
||||||
'ext': 'vtt',
|
|
||||||
'url': vtt_path,
|
|
||||||
})
|
|
||||||
return subtitles
|
|
||||||
|
|
||||||
def _parse_video_data(self, video_data):
|
|
||||||
video_id = str(video_data['id'])
|
|
||||||
title = video_data['title']
|
|
||||||
|
|
||||||
s3_extracted = False
|
|
||||||
formats = []
|
|
||||||
for source in video_data.get('videos', []):
|
|
||||||
source_url = source.get('url')
|
|
||||||
if not source_url:
|
|
||||||
continue
|
|
||||||
f = {
|
|
||||||
'format_id': source.get('quality_level'),
|
|
||||||
'fps': int_or_none(source.get('frame_rate')),
|
|
||||||
'height': int_or_none(source.get('height')),
|
|
||||||
'tbr': int_or_none(source.get('video_data_rate')),
|
|
||||||
'width': int_or_none(source.get('width')),
|
|
||||||
'url': source_url,
|
|
||||||
}
|
|
||||||
original_filename = source.get('original_filename')
|
|
||||||
if original_filename:
|
|
||||||
if not (f.get('height') and f.get('width')):
|
|
||||||
mobj = re.search(r'_(\d+)x(\d+)', original_filename)
|
|
||||||
if mobj:
|
|
||||||
f.update({
|
|
||||||
'height': int(mobj.group(2)),
|
|
||||||
'width': int(mobj.group(1)),
|
|
||||||
})
|
|
||||||
if original_filename.startswith('s3://') and not s3_extracted:
|
|
||||||
formats.append({
|
|
||||||
'format_id': 'original',
|
|
||||||
'quality': 1,
|
|
||||||
'url': original_filename.replace('s3://', 'https://s3.amazonaws.com/'),
|
|
||||||
})
|
|
||||||
s3_extracted = True
|
|
||||||
formats.append(f)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'description': video_data.get('description'),
|
|
||||||
'thumbnail': video_data.get('thumbnail'),
|
|
||||||
'upload_date': unified_strdate(video_data.get('start_date')),
|
|
||||||
'duration': parse_duration(video_data.get('duration')),
|
|
||||||
'view_count': str_to_int(video_data.get('playcount')),
|
|
||||||
'formats': formats,
|
|
||||||
'subtitles': self._parse_subtitles(video_data, 'vtt'),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class AdobeTVEmbedIE(AdobeTVBaseIE):
|
|
||||||
_WORKING = False
|
|
||||||
IE_NAME = 'adobetv:embed'
|
|
||||||
_VALID_URL = r'https?://tv\.adobe\.com/embed/\d+/(?P<id>\d+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://tv.adobe.com/embed/22/4153',
|
|
||||||
'md5': 'c8c0461bf04d54574fc2b4d07ac6783a',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '4153',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'Creating Graphics Optimized for BlackBerry',
|
|
||||||
'description': 'md5:eac6e8dced38bdaae51cd94447927459',
|
|
||||||
'thumbnail': r're:https?://.+\.jpg',
|
|
||||||
'upload_date': '20091109',
|
|
||||||
'duration': 377,
|
|
||||||
'view_count': int,
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
|
|
||||||
video_data = self._call_api(
|
|
||||||
'episode/' + video_id, video_id, {'disclosure': 'standard'})[0]
|
|
||||||
return self._parse_video_data(video_data)
|
|
||||||
|
|
||||||
|
|
||||||
class AdobeTVIE(AdobeTVBaseIE):
|
|
||||||
_WORKING = False
|
|
||||||
IE_NAME = 'adobetv'
|
IE_NAME = 'adobetv'
|
||||||
_VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?watch/(?P<show_urlname>[^/]+)/(?P<id>[^/]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/',
|
|
||||||
'md5': '9bc5727bcdd55251f35ad311ca74fa1e',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '10981',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop',
|
|
||||||
'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311',
|
|
||||||
'thumbnail': r're:https?://.+\.jpg',
|
|
||||||
'upload_date': '20110914',
|
|
||||||
'duration': 60,
|
|
||||||
'view_count': int,
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
language, show_urlname, urlname = self._match_valid_url(url).groups()
|
|
||||||
if not language:
|
|
||||||
language = 'en'
|
|
||||||
|
|
||||||
video_data = self._call_api(
|
|
||||||
'episode/get', urlname, {
|
|
||||||
'disclosure': 'standard',
|
|
||||||
'language': language,
|
|
||||||
'show_urlname': show_urlname,
|
|
||||||
'urlname': urlname,
|
|
||||||
})[0]
|
|
||||||
return self._parse_video_data(video_data)
|
|
||||||
|
|
||||||
|
|
||||||
class AdobeTVPlaylistBaseIE(AdobeTVBaseIE):
|
|
||||||
_PAGE_SIZE = 25
|
|
||||||
|
|
||||||
def _fetch_page(self, display_id, query, page):
|
|
||||||
page += 1
|
|
||||||
query['page'] = page
|
|
||||||
for element_data in self._call_api(
|
|
||||||
self._RESOURCE, display_id, query, f'Download Page {page}'):
|
|
||||||
yield self._process_data(element_data)
|
|
||||||
|
|
||||||
def _extract_playlist_entries(self, display_id, query):
|
|
||||||
return OnDemandPagedList(functools.partial(
|
|
||||||
self._fetch_page, display_id, query), self._PAGE_SIZE)
|
|
||||||
|
|
||||||
|
|
||||||
class AdobeTVShowIE(AdobeTVPlaylistBaseIE):
|
|
||||||
_WORKING = False
|
|
||||||
IE_NAME = 'adobetv:show'
|
|
||||||
_VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?show/(?P<id>[^/]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://tv.adobe.com/show/the-complete-picture-with-julieanne-kost',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '36',
|
|
||||||
'title': 'The Complete Picture with Julieanne Kost',
|
|
||||||
'description': 'md5:fa50867102dcd1aa0ddf2ab039311b27',
|
|
||||||
},
|
|
||||||
'playlist_mincount': 136,
|
|
||||||
}]
|
|
||||||
_RESOURCE = 'episode'
|
|
||||||
_process_data = AdobeTVBaseIE._parse_video_data
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
language, show_urlname = self._match_valid_url(url).groups()
|
|
||||||
if not language:
|
|
||||||
language = 'en'
|
|
||||||
query = {
|
|
||||||
'disclosure': 'standard',
|
|
||||||
'language': language,
|
|
||||||
'show_urlname': show_urlname,
|
|
||||||
}
|
|
||||||
|
|
||||||
show_data = self._call_api(
|
|
||||||
'show/get', show_urlname, query)[0]
|
|
||||||
|
|
||||||
return self.playlist_result(
|
|
||||||
self._extract_playlist_entries(show_urlname, query),
|
|
||||||
str_or_none(show_data.get('id')),
|
|
||||||
show_data.get('show_name'),
|
|
||||||
show_data.get('show_description'))
|
|
||||||
|
|
||||||
|
|
||||||
class AdobeTVChannelIE(AdobeTVPlaylistBaseIE):
|
|
||||||
_WORKING = False
|
|
||||||
IE_NAME = 'adobetv:channel'
|
|
||||||
_VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?channel/(?P<id>[^/]+)(?:/(?P<category_urlname>[^/]+))?'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://tv.adobe.com/channel/development',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'development',
|
|
||||||
},
|
|
||||||
'playlist_mincount': 96,
|
|
||||||
}]
|
|
||||||
_RESOURCE = 'show'
|
|
||||||
|
|
||||||
def _process_data(self, show_data):
|
|
||||||
return self.url_result(
|
|
||||||
show_data['url'], 'AdobeTVShow', str_or_none(show_data.get('id')))
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
language, channel_urlname, category_urlname = self._match_valid_url(url).groups()
|
|
||||||
if not language:
|
|
||||||
language = 'en'
|
|
||||||
query = {
|
|
||||||
'channel_urlname': channel_urlname,
|
|
||||||
'language': language,
|
|
||||||
}
|
|
||||||
if category_urlname:
|
|
||||||
query['category_urlname'] = category_urlname
|
|
||||||
|
|
||||||
return self.playlist_result(
|
|
||||||
self._extract_playlist_entries(channel_urlname, query),
|
|
||||||
channel_urlname)
|
|
||||||
|
|
||||||
|
|
||||||
class AdobeTVVideoIE(AdobeTVBaseIE):
|
|
||||||
IE_NAME = 'adobetv:video'
|
|
||||||
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
|
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+src=[\'"](?P<url>(?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]']
|
_EMBED_REGEX = [r'<iframe[^>]+src=["\'](?P<url>(?:https?:)?//video\.tv\.adobe\.com/v/\d+)']
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners
|
'url': 'https://video.tv.adobe.com/v/2456',
|
||||||
'url': 'https://video.tv.adobe.com/v/2456/',
|
|
||||||
'md5': '43662b577c018ad707a63766462b1e87',
|
'md5': '43662b577c018ad707a63766462b1e87',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2456',
|
'id': '2456',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'New experience with Acrobat DC',
|
'title': 'New experience with Acrobat DC',
|
||||||
'description': 'New experience with Acrobat DC',
|
'description': 'New experience with Acrobat DC',
|
||||||
'duration': 248.667,
|
'duration': 248.522,
|
||||||
|
'thumbnail': r're:https?://images-tv\.adobe\.com/.+\.jpg',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://video.tv.adobe.com/v/3463980/adobe-acrobat',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3463980',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Adobe Acrobat: How to Customize the Toolbar for Faster PDF Editing',
|
||||||
|
'description': 'md5:94368ab95ae24f9c1bee0cb346e03dc3',
|
||||||
|
'duration': 97.514,
|
||||||
'thumbnail': r're:https?://images-tv\.adobe\.com/.+\.jpg',
|
'thumbnail': r're:https?://images-tv\.adobe\.com/.+\.jpg',
|
||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
_WEBPAGE_TESTS = [{
|
_WEBPAGE_TESTS = [{
|
||||||
# FIXME: Invalid extension
|
# https://video.tv.adobe.com/v/3442499
|
||||||
'url': 'https://www.adobe.com/learn/acrobat/web/customize-toolbar',
|
'url': 'https://business.adobe.com/dx-fragments/summit/2025/marquees/S335/ondemand.live.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '3463980',
|
'id': '3442499',
|
||||||
'ext': 'm3u8',
|
'ext': 'mp4',
|
||||||
'title': 'Adobe Acrobat: How to Customize the Toolbar for Faster PDF Editing',
|
'title': 'S335 - Beyond Personalization: Creating Intent-Based Experiences at Scale',
|
||||||
'description': 'md5:94368ab95ae24f9c1bee0cb346e03dc3',
|
'description': 'Beyond Personalization: Creating Intent-Based Experiences at Scale',
|
||||||
'duration': 97.557,
|
'duration': 2906.8,
|
||||||
|
'thumbnail': r're:https?://images-tv\.adobe\.com/.+\.jpg',
|
||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
video_data = self._search_json(
|
||||||
video_data = self._parse_json(self._search_regex(
|
r'var\s+bridge\s*=', webpage, 'bridged data', video_id)
|
||||||
r'var\s+bridge\s*=\s*([^;]+);', webpage, 'bridged data'), video_id)
|
|
||||||
title = video_data['title']
|
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
sources = video_data.get('sources') or []
|
for source in traverse_obj(video_data, (
|
||||||
for source in sources:
|
'sources', lambda _, v: v['format'] != 'playlist' and url_or_none(v['src']),
|
||||||
source_src = source.get('src')
|
)):
|
||||||
if not source_src:
|
source_url = self._proto_relative_url(source['src'])
|
||||||
continue
|
if determine_ext(source_url) == 'm3u8':
|
||||||
formats.append({
|
fmts = self._extract_m3u8_formats(
|
||||||
'filesize': int_or_none(source.get('kilobytes') or None, invscale=1000),
|
source_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
|
||||||
'format_id': join_nonempty(source.get('format'), source.get('label')),
|
else:
|
||||||
'height': int_or_none(source.get('height') or None),
|
fmts = [{'url': source_url}]
|
||||||
'tbr': int_or_none(source.get('bitrate') or None),
|
|
||||||
'width': int_or_none(source.get('width') or None),
|
|
||||||
'url': source_src,
|
|
||||||
})
|
|
||||||
|
|
||||||
# For both metadata and downloaded files the duration varies among
|
for fmt in fmts:
|
||||||
# formats. I just pick the max one
|
fmt.update(traverse_obj(source, {
|
||||||
duration = max(filter(None, [
|
'duration': ('duration', {float_or_none(scale=1000)}),
|
||||||
float_or_none(source.get('duration'), scale=1000)
|
'filesize': ('kilobytes', {float_or_none(invscale=1000)}),
|
||||||
for source in sources]))
|
'format_id': (('format', 'label'), {str}, all, {lambda x: join_nonempty(*x)}),
|
||||||
|
'height': ('height', {int_or_none}),
|
||||||
|
'tbr': ('bitrate', {int_or_none}),
|
||||||
|
'width': ('width', {int_or_none}),
|
||||||
|
}))
|
||||||
|
formats.extend(fmts)
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
for translation in traverse_obj(video_data, (
|
||||||
|
'translations', lambda _, v: url_or_none(v['vttPath']),
|
||||||
|
)):
|
||||||
|
lang = translation.get('language_w3c') or ISO639Utils.long2short(translation.get('language_medium')) or 'und'
|
||||||
|
subtitles.setdefault(lang, []).append({
|
||||||
|
'ext': 'vtt',
|
||||||
|
'url': self._proto_relative_url(translation['vttPath']),
|
||||||
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'title': title,
|
'subtitles': subtitles,
|
||||||
'description': video_data.get('description'),
|
**traverse_obj(video_data, {
|
||||||
'thumbnail': video_data.get('video', {}).get('poster'),
|
'title': ('title', {clean_html}),
|
||||||
'duration': duration,
|
'description': ('description', {clean_html}, filter),
|
||||||
'subtitles': self._parse_subtitles(video_data, 'vttPath'),
|
'thumbnail': ('video', 'poster', {self._proto_relative_url}, {url_or_none}),
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
clean_html,
|
||||||
clean_podcast_url,
|
clean_podcast_url,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
@@ -17,7 +18,7 @@ class ApplePodcastsIE(InfoExtractor):
|
|||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': 'Ferreck Dawn - To The Break of Dawn 117',
|
'title': 'Ferreck Dawn - To The Break of Dawn 117',
|
||||||
'episode': 'Ferreck Dawn - To The Break of Dawn 117',
|
'episode': 'Ferreck Dawn - To The Break of Dawn 117',
|
||||||
'description': 'md5:1fc571102f79dbd0a77bfd71ffda23bc',
|
'description': 'md5:8c4f5c2c30af17ed6a98b0b9daf15b76',
|
||||||
'upload_date': '20240812',
|
'upload_date': '20240812',
|
||||||
'timestamp': 1723449600,
|
'timestamp': 1723449600,
|
||||||
'duration': 3596,
|
'duration': 3596,
|
||||||
@@ -58,7 +59,7 @@ class ApplePodcastsIE(InfoExtractor):
|
|||||||
r'<script [^>]*\bid=["\']serialized-server-data["\'][^>]*>', webpage,
|
r'<script [^>]*\bid=["\']serialized-server-data["\'][^>]*>', webpage,
|
||||||
'server data', episode_id, contains_pattern=r'\[{(?s:.+)}\]')[0]['data']
|
'server data', episode_id, contains_pattern=r'\[{(?s:.+)}\]')[0]['data']
|
||||||
model_data = traverse_obj(server_data, (
|
model_data = traverse_obj(server_data, (
|
||||||
'headerButtonItems', lambda _, v: v['$kind'] == 'bookmark' and v['modelType'] == 'EpisodeOffer',
|
'headerButtonItems', lambda _, v: v['$kind'] == 'share' and v['modelType'] == 'EpisodeLockup',
|
||||||
'model', {dict}, any))
|
'model', {dict}, any))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
@@ -68,7 +69,8 @@ class ApplePodcastsIE(InfoExtractor):
|
|||||||
or self._yield_json_ld(webpage, episode_id, fatal=False), episode_id, fatal=False),
|
or self._yield_json_ld(webpage, episode_id, fatal=False), episode_id, fatal=False),
|
||||||
**traverse_obj(model_data, {
|
**traverse_obj(model_data, {
|
||||||
'title': ('title', {str}),
|
'title': ('title', {str}),
|
||||||
'url': ('streamUrl', {clean_podcast_url}),
|
'description': ('summary', {clean_html}),
|
||||||
|
'url': ('playAction', 'episodeOffer', 'streamUrl', {clean_podcast_url}),
|
||||||
'timestamp': ('releaseDate', {parse_iso8601}),
|
'timestamp': ('releaseDate', {parse_iso8601}),
|
||||||
'duration': ('duration', {int_or_none}),
|
'duration': ('duration', {int_or_none}),
|
||||||
}),
|
}),
|
||||||
|
|||||||
@@ -1,150 +0,0 @@
|
|||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
ExtractorError,
|
|
||||||
float_or_none,
|
|
||||||
int_or_none,
|
|
||||||
parse_iso8601,
|
|
||||||
parse_qs,
|
|
||||||
try_get,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ArkenaIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'''(?x)
|
|
||||||
https?://
|
|
||||||
(?:
|
|
||||||
video\.(?:arkena|qbrick)\.com/play2/embed/player\?|
|
|
||||||
play\.arkena\.com/(?:config|embed)/avp/v\d/player/media/(?P<id>[^/]+)/[^/]+/(?P<account_id>\d+)
|
|
||||||
)
|
|
||||||
'''
|
|
||||||
# See https://support.arkena.com/display/PLAY/Ways+to+embed+your+video
|
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//play\.arkena\.com/embed/avp/.+?)\1']
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://video.qbrick.com/play2/embed/player?accountId=1034090&mediaId=d8ab4607-00090107-aab86310',
|
|
||||||
'md5': '97f117754e5f3c020f5f26da4a44ebaf',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'd8ab4607-00090107-aab86310',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'EM_HT20_117_roslund_v2.mp4',
|
|
||||||
'timestamp': 1608285912,
|
|
||||||
'upload_date': '20201218',
|
|
||||||
'duration': 1429.162667,
|
|
||||||
'subtitles': {
|
|
||||||
'sv': 'count:3',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'https://play.arkena.com/config/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411/?callbackMethod=jQuery1111023664739129262213_1469227693893',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://play.arkena.com/config/avp/v1/player/media/327336/darkmatter/131064/?callbackMethod=jQuery1111002221189684892677_1469227595972',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://play.arkena.com/embed/avp/v1/player/media/327336/darkmatter/131064/',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://video.arkena.com/play2/embed/player?accountId=472718&mediaId=35763b3b-00090078-bf604299&pageStyling=styled',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = self._match_valid_url(url)
|
|
||||||
video_id = mobj.group('id')
|
|
||||||
account_id = mobj.group('account_id')
|
|
||||||
|
|
||||||
# Handle http://video.arkena.com/play2/embed/player URL
|
|
||||||
if not video_id:
|
|
||||||
qs = parse_qs(url)
|
|
||||||
video_id = qs.get('mediaId', [None])[0]
|
|
||||||
account_id = qs.get('accountId', [None])[0]
|
|
||||||
if not video_id or not account_id:
|
|
||||||
raise ExtractorError('Invalid URL', expected=True)
|
|
||||||
|
|
||||||
media = self._download_json(
|
|
||||||
f'https://video.qbrick.com/api/v1/public/accounts/{account_id}/medias/{video_id}',
|
|
||||||
video_id, query={
|
|
||||||
# https://video.qbrick.com/docs/api/examples/library-api.html
|
|
||||||
'fields': 'asset/resources/*/renditions/*(height,id,language,links/*(href,mimeType),type,size,videos/*(audios/*(codec,sampleRate),bitrate,codec,duration,height,width),width),created,metadata/*(title,description),tags',
|
|
||||||
})
|
|
||||||
metadata = media.get('metadata') or {}
|
|
||||||
title = metadata['title']
|
|
||||||
|
|
||||||
duration = None
|
|
||||||
formats = []
|
|
||||||
thumbnails = []
|
|
||||||
subtitles = {}
|
|
||||||
for resource in media['asset']['resources']:
|
|
||||||
for rendition in (resource.get('renditions') or []):
|
|
||||||
rendition_type = rendition.get('type')
|
|
||||||
for i, link in enumerate(rendition.get('links') or []):
|
|
||||||
href = link.get('href')
|
|
||||||
if not href:
|
|
||||||
continue
|
|
||||||
if rendition_type == 'image':
|
|
||||||
thumbnails.append({
|
|
||||||
'filesize': int_or_none(rendition.get('size')),
|
|
||||||
'height': int_or_none(rendition.get('height')),
|
|
||||||
'id': rendition.get('id'),
|
|
||||||
'url': href,
|
|
||||||
'width': int_or_none(rendition.get('width')),
|
|
||||||
})
|
|
||||||
elif rendition_type == 'subtitle':
|
|
||||||
subtitles.setdefault(rendition.get('language') or 'en', []).append({
|
|
||||||
'url': href,
|
|
||||||
})
|
|
||||||
elif rendition_type == 'video':
|
|
||||||
f = {
|
|
||||||
'filesize': int_or_none(rendition.get('size')),
|
|
||||||
'format_id': rendition.get('id'),
|
|
||||||
'url': href,
|
|
||||||
}
|
|
||||||
video = try_get(rendition, lambda x: x['videos'][i], dict)
|
|
||||||
if video:
|
|
||||||
if not duration:
|
|
||||||
duration = float_or_none(video.get('duration'))
|
|
||||||
f.update({
|
|
||||||
'height': int_or_none(video.get('height')),
|
|
||||||
'tbr': int_or_none(video.get('bitrate'), 1000),
|
|
||||||
'vcodec': video.get('codec'),
|
|
||||||
'width': int_or_none(video.get('width')),
|
|
||||||
})
|
|
||||||
audio = try_get(video, lambda x: x['audios'][0], dict)
|
|
||||||
if audio:
|
|
||||||
f.update({
|
|
||||||
'acodec': audio.get('codec'),
|
|
||||||
'asr': int_or_none(audio.get('sampleRate')),
|
|
||||||
})
|
|
||||||
formats.append(f)
|
|
||||||
elif rendition_type == 'index':
|
|
||||||
mime_type = link.get('mimeType')
|
|
||||||
if mime_type == 'application/smil+xml':
|
|
||||||
formats.extend(self._extract_smil_formats(
|
|
||||||
href, video_id, fatal=False))
|
|
||||||
elif mime_type == 'application/x-mpegURL':
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
href, video_id, 'mp4', 'm3u8_native',
|
|
||||||
m3u8_id='hls', fatal=False))
|
|
||||||
elif mime_type == 'application/hds+xml':
|
|
||||||
formats.extend(self._extract_f4m_formats(
|
|
||||||
href, video_id, f4m_id='hds', fatal=False))
|
|
||||||
elif mime_type == 'application/dash+xml':
|
|
||||||
formats.extend(self._extract_mpd_formats(
|
|
||||||
href, video_id, mpd_id='dash', fatal=False))
|
|
||||||
elif mime_type == 'application/vnd.ms-sstr+xml':
|
|
||||||
formats.extend(self._extract_ism_formats(
|
|
||||||
href, video_id, ism_id='mss', fatal=False))
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'description': metadata.get('description'),
|
|
||||||
'timestamp': parse_iso8601(media.get('created')),
|
|
||||||
'thumbnails': thumbnails,
|
|
||||||
'subtitles': subtitles,
|
|
||||||
'duration': duration,
|
|
||||||
'tags': media.get('tags'),
|
|
||||||
'formats': formats,
|
|
||||||
}
|
|
||||||
@@ -4,7 +4,7 @@ from .common import InfoExtractor
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
jwt_encode_hs256,
|
jwt_encode,
|
||||||
try_get,
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -83,11 +83,10 @@ class ATVAtIE(InfoExtractor):
|
|||||||
'nbf': int(not_before.timestamp()),
|
'nbf': int(not_before.timestamp()),
|
||||||
'exp': int(expire.timestamp()),
|
'exp': int(expire.timestamp()),
|
||||||
}
|
}
|
||||||
jwt_token = jwt_encode_hs256(payload, self._ENCRYPTION_KEY, headers={'kid': self._ACCESS_ID})
|
|
||||||
videos = self._download_json(
|
videos = self._download_json(
|
||||||
'https://vas-v4.p7s1video.net/4.0/getsources',
|
'https://vas-v4.p7s1video.net/4.0/getsources',
|
||||||
content_id, 'Downloading videos JSON', query={
|
content_id, 'Downloading videos JSON', query={
|
||||||
'token': jwt_token.decode('utf-8'),
|
'token': jwt_encode(payload, self._ENCRYPTION_KEY, headers={'kid': self._ACCESS_ID}),
|
||||||
})
|
})
|
||||||
|
|
||||||
video_id, videos_data = next(iter(videos['data'].items()))
|
video_id, videos_data = next(iter(videos['data'].items()))
|
||||||
|
|||||||
@@ -1,79 +1,47 @@
|
|||||||
from .mtv import MTVServicesInfoExtractor
|
from .mtv import MTVServicesBaseIE
|
||||||
from ..utils import unified_strdate
|
|
||||||
|
|
||||||
|
|
||||||
class BetIE(MTVServicesInfoExtractor):
|
class BetIE(MTVServicesBaseIE):
|
||||||
_WORKING = False
|
_VALID_URL = r'https?://(?:www\.)?bet\.com/(?:video-clips|episodes)/(?P<id>[\da-z]{6})'
|
||||||
_VALID_URL = r'https?://(?:www\.)?bet\.com/(?:[^/]+/)+(?P<id>.+?)\.html'
|
_TESTS = [{
|
||||||
_TESTS = [
|
'url': 'https://www.bet.com/video-clips/w9mk7v',
|
||||||
{
|
'info_dict': {
|
||||||
'url': 'http://www.bet.com/news/politics/2014/12/08/in-bet-exclusive-obama-talks-race-and-racism.html',
|
'id': '3022d121-d191-43fd-b5fb-b2c26f335497',
|
||||||
'info_dict': {
|
'ext': 'mp4',
|
||||||
'id': '07e96bd3-8850-3051-b856-271b457f0ab8',
|
'display_id': 'w9mk7v',
|
||||||
'display_id': 'in-bet-exclusive-obama-talks-race-and-racism',
|
'title': 'New Normal',
|
||||||
'ext': 'flv',
|
'description': 'md5:d7898c124713b4646cecad9d16ff01f3',
|
||||||
'title': 'A Conversation With President Obama',
|
'duration': 30.08,
|
||||||
'description': 'President Obama urges persistence in confronting racism and bias.',
|
'series': 'Tyler Perry\'s Sistas',
|
||||||
'duration': 1534,
|
'season': 'Season 0',
|
||||||
'upload_date': '20141208',
|
'season_number': 0,
|
||||||
'thumbnail': r're:(?i)^https?://.*\.jpg$',
|
'episode': 'Episode 0',
|
||||||
'subtitles': {
|
'episode_number': 0,
|
||||||
'en': 'mincount:2',
|
'timestamp': 1755269073,
|
||||||
},
|
'upload_date': '20250815',
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
'params': {'skip_download': 'm3u8'},
|
||||||
'url': 'http://www.bet.com/video/news/national/2014/justice-for-ferguson-a-community-reacts.html',
|
}, {
|
||||||
'info_dict': {
|
'url': 'https://www.bet.com/episodes/nmce72/tyler-perry-s-sistas-heavy-is-the-crown-season-9-ep-5',
|
||||||
'id': '9f516bf1-7543-39c4-8076-dd441b459ba9',
|
'info_dict': {
|
||||||
'display_id': 'justice-for-ferguson-a-community-reacts',
|
'id': '6427562b-3029-11f0-b405-16fff45bc035',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Justice for Ferguson: A Community Reacts',
|
'display_id': 'nmce72',
|
||||||
'description': 'A BET News special.',
|
'title': 'Heavy Is the Crown',
|
||||||
'duration': 1696,
|
'description': 'md5:1ed345d3157a50572d2464afcc7a652a',
|
||||||
'upload_date': '20141125',
|
'channel': 'BET',
|
||||||
'thumbnail': r're:(?i)^https?://.*\.jpg$',
|
'duration': 2550.0,
|
||||||
'subtitles': {
|
'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref',
|
||||||
'en': 'mincount:2',
|
'series': 'Tyler Perry\'s Sistas',
|
||||||
},
|
'season': 'Season 9',
|
||||||
},
|
'season_number': 9,
|
||||||
'params': {
|
'episode': 'Episode 5',
|
||||||
# rtmp download
|
'episode_number': 5,
|
||||||
'skip_download': True,
|
'timestamp': 1755165600,
|
||||||
},
|
'upload_date': '20250814',
|
||||||
|
'release_timestamp': 1755129600,
|
||||||
|
'release_date': '20250814',
|
||||||
},
|
},
|
||||||
]
|
'params': {'skip_download': 'm3u8'},
|
||||||
|
'skip': 'Requires provider sign-in',
|
||||||
_FEED_URL = 'http://feeds.mtvnservices.com/od/feed/bet-mrss-player'
|
}]
|
||||||
|
|
||||||
def _get_feed_query(self, uri):
|
|
||||||
return {
|
|
||||||
'uuid': uri,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _extract_mgid(self, webpage):
|
|
||||||
return self._search_regex(r'data-uri="([^"]+)', webpage, 'mgid')
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
display_id = self._match_id(url)
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
mgid = self._extract_mgid(webpage)
|
|
||||||
videos_info = self._get_videos_info(mgid)
|
|
||||||
|
|
||||||
info_dict = videos_info['entries'][0]
|
|
||||||
|
|
||||||
upload_date = unified_strdate(self._html_search_meta('date', webpage))
|
|
||||||
description = self._html_search_meta('description', webpage)
|
|
||||||
|
|
||||||
info_dict.update({
|
|
||||||
'display_id': display_id,
|
|
||||||
'description': description,
|
|
||||||
'upload_date': upload_date,
|
|
||||||
})
|
|
||||||
|
|
||||||
return info_dict
|
|
||||||
|
|||||||
@@ -304,7 +304,7 @@ class BilibiliBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class BiliBiliIE(BilibiliBaseIE):
|
class BiliBiliIE(BilibiliBaseIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?bilibili\.com/(?:video/|festival/[^/?#]+\?(?:[^#]*&)?bvid=)[aAbB][vV](?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?bilibili\.com/(?:video/|festival/[^/?#]+\?(?:[^#]*&)?bvid=)(?P<prefix>[aAbB][vV])(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.bilibili.com/video/BV13x41117TL',
|
'url': 'https://www.bilibili.com/video/BV13x41117TL',
|
||||||
@@ -563,7 +563,7 @@ class BiliBiliIE(BilibiliBaseIE):
|
|||||||
},
|
},
|
||||||
}],
|
}],
|
||||||
}, {
|
}, {
|
||||||
'note': '301 redirect to bangumi link',
|
'note': 'redirect from bvid to bangumi link via redirect_url',
|
||||||
'url': 'https://www.bilibili.com/video/BV1TE411f7f1',
|
'url': 'https://www.bilibili.com/video/BV1TE411f7f1',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '288525',
|
'id': '288525',
|
||||||
@@ -580,7 +580,27 @@ class BiliBiliIE(BilibiliBaseIE):
|
|||||||
'duration': 1183.957,
|
'duration': 1183.957,
|
||||||
'timestamp': 1571648124,
|
'timestamp': 1571648124,
|
||||||
'upload_date': '20191021',
|
'upload_date': '20191021',
|
||||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$',
|
'thumbnail': r're:https?://.*\.(jpg|jpeg|png)$',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'note': 'redirect from aid to bangumi link via redirect_url',
|
||||||
|
'url': 'https://www.bilibili.com/video/av114868162141203',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1933368',
|
||||||
|
'title': 'PV 引爆变革的起点',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'duration': 63.139,
|
||||||
|
'series': '时光代理人',
|
||||||
|
'series_id': '5183',
|
||||||
|
'season': '第三季',
|
||||||
|
'season_number': 4,
|
||||||
|
'season_id': '105212',
|
||||||
|
'episode': '引爆变革的起点',
|
||||||
|
'episode_number': 1,
|
||||||
|
'episode_id': '1933368',
|
||||||
|
'timestamp': 1752849001,
|
||||||
|
'upload_date': '20250718',
|
||||||
|
'thumbnail': r're:https?://.*\.(jpg|jpeg|png)$',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'note': 'video has subtitles, which requires login',
|
'note': 'video has subtitles, which requires login',
|
||||||
@@ -636,7 +656,7 @@ class BiliBiliIE(BilibiliBaseIE):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id, prefix = self._match_valid_url(url).group('id', 'prefix')
|
||||||
headers = self.geo_verification_headers()
|
headers = self.geo_verification_headers()
|
||||||
webpage, urlh = self._download_webpage_handle(url, video_id, headers=headers)
|
webpage, urlh = self._download_webpage_handle(url, video_id, headers=headers)
|
||||||
if not self._match_valid_url(urlh.url):
|
if not self._match_valid_url(urlh.url):
|
||||||
@@ -644,7 +664,24 @@ class BiliBiliIE(BilibiliBaseIE):
|
|||||||
|
|
||||||
headers['Referer'] = url
|
headers['Referer'] = url
|
||||||
|
|
||||||
initial_state = self._search_json(r'window\.__INITIAL_STATE__\s*=', webpage, 'initial state', video_id)
|
initial_state = self._search_json(r'window\.__INITIAL_STATE__\s*=', webpage, 'initial state', video_id, default=None)
|
||||||
|
if not initial_state:
|
||||||
|
if self._search_json(r'\bwindow\._riskdata_\s*=', webpage, 'risk', video_id, default={}).get('v_voucher'):
|
||||||
|
raise ExtractorError('You have exceeded the rate limit. Try again later', expected=True)
|
||||||
|
query = {'platform': 'web'}
|
||||||
|
prefix = prefix.upper()
|
||||||
|
if prefix == 'BV':
|
||||||
|
query['bvid'] = prefix + video_id
|
||||||
|
elif prefix == 'AV':
|
||||||
|
query['aid'] = video_id
|
||||||
|
detail = self._download_json(
|
||||||
|
'https://api.bilibili.com/x/web-interface/wbi/view/detail', video_id,
|
||||||
|
note='Downloading redirection URL', errnote='Failed to download redirection URL',
|
||||||
|
query=self._sign_wbi(query, video_id), headers=headers)
|
||||||
|
new_url = traverse_obj(detail, ('data', 'View', 'redirect_url', {url_or_none}))
|
||||||
|
if new_url and BiliBiliBangumiIE.suitable(new_url):
|
||||||
|
return self.url_result(new_url, BiliBiliBangumiIE)
|
||||||
|
raise ExtractorError('Unable to extract initial state')
|
||||||
|
|
||||||
if traverse_obj(initial_state, ('error', 'trueCode')) == -403:
|
if traverse_obj(initial_state, ('error', 'trueCode')) == -403:
|
||||||
self.raise_login_required()
|
self.raise_login_required()
|
||||||
@@ -1329,7 +1366,7 @@ class BilibiliSpaceVideoIE(BilibiliSpaceBaseIE):
|
|||||||
else:
|
else:
|
||||||
yield self.url_result(f'https://www.bilibili.com/video/{entry["bvid"]}', BiliBiliIE, entry['bvid'])
|
yield self.url_result(f'https://www.bilibili.com/video/{entry["bvid"]}', BiliBiliIE, entry['bvid'])
|
||||||
|
|
||||||
metadata, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
_, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
||||||
return self.playlist_result(paged_list, playlist_id)
|
return self.playlist_result(paged_list, playlist_id)
|
||||||
|
|
||||||
|
|
||||||
@@ -1363,7 +1400,7 @@ class BilibiliSpaceAudioIE(BilibiliSpaceBaseIE):
|
|||||||
for entry in page_data.get('data') or []:
|
for entry in page_data.get('data') or []:
|
||||||
yield self.url_result(f'https://www.bilibili.com/audio/au{entry["id"]}', BilibiliAudioIE, entry['id'])
|
yield self.url_result(f'https://www.bilibili.com/audio/au{entry["id"]}', BilibiliAudioIE, entry['id'])
|
||||||
|
|
||||||
metadata, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
_, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries)
|
||||||
return self.playlist_result(paged_list, playlist_id)
|
return self.playlist_result(paged_list, playlist_id)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -174,7 +174,7 @@ class BrainPOPLegacyBaseIE(BrainPOPBaseIE):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
slug, display_id = self._match_valid_url(url).group('slug', 'id')
|
display_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
topic_data = self._search_json(
|
topic_data = self._search_json(
|
||||||
r'var\s+content\s*=\s*', webpage, 'content data',
|
r'var\s+content\s*=\s*', webpage, 'content data',
|
||||||
|
|||||||
@@ -5,8 +5,6 @@ import zlib
|
|||||||
|
|
||||||
from .anvato import AnvatoIE
|
from .anvato import AnvatoIE
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .paramountplus import ParamountPlusIE
|
|
||||||
from ..networking import HEADRequest
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
UserNotLive,
|
UserNotLive,
|
||||||
@@ -132,13 +130,7 @@ class CBSNewsEmbedIE(CBSNewsBaseIE):
|
|||||||
video_id = item['mpxRefId']
|
video_id = item['mpxRefId']
|
||||||
video_url = self._get_video_url(item)
|
video_url = self._get_video_url(item)
|
||||||
if not video_url:
|
if not video_url:
|
||||||
# Old embeds redirect user to ParamountPlus but most links are 404
|
raise ExtractorError('This video is no longer available', expected=True)
|
||||||
pplus_url = f'https://www.paramountplus.com/shows/video/{video_id}'
|
|
||||||
try:
|
|
||||||
self._request_webpage(HEADRequest(pplus_url), video_id)
|
|
||||||
return self.url_result(pplus_url, ParamountPlusIE)
|
|
||||||
except ExtractorError:
|
|
||||||
self.raise_no_formats('This video is no longer available', True, video_id)
|
|
||||||
|
|
||||||
return self._extract_video(item, video_url, video_id)
|
return self._extract_video(item, video_url, video_id)
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ class CharlieRoseIE(InfoExtractor):
|
|||||||
_VALID_URL = r'https?://(?:www\.)?charlierose\.com/(?:video|episode)(?:s|/player)/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?charlierose\.com/(?:video|episode)(?:s|/player)/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://charlierose.com/videos/27996',
|
'url': 'https://charlierose.com/videos/27996',
|
||||||
'md5': 'fda41d49e67d4ce7c2411fd2c4702e09',
|
'md5': '4405b662f557f94aa256fa6a7baf7426',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '27996',
|
'id': '27996',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@@ -39,12 +39,16 @@ class CharlieRoseIE(InfoExtractor):
|
|||||||
self._PLAYER_BASE % video_id, webpage, video_id,
|
self._PLAYER_BASE % video_id, webpage, video_id,
|
||||||
m3u8_entry_protocol='m3u8_native')[0]
|
m3u8_entry_protocol='m3u8_native')[0]
|
||||||
self._remove_duplicate_formats(info_dict['formats'])
|
self._remove_duplicate_formats(info_dict['formats'])
|
||||||
|
for fmt in info_dict['formats']:
|
||||||
|
if fmt.get('protocol') == 'm3u8_native':
|
||||||
|
fmt['__needs_testing'] = True
|
||||||
|
|
||||||
info_dict.update({
|
info_dict.update({
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
|
'_format_sort_fields': ('proto',),
|
||||||
})
|
})
|
||||||
|
|
||||||
return info_dict
|
return info_dict
|
||||||
|
|||||||
@@ -1,55 +0,0 @@
|
|||||||
from .mtv import MTVIE
|
|
||||||
|
|
||||||
# TODO: Remove - Reason: Outdated Site
|
|
||||||
|
|
||||||
|
|
||||||
class CMTIE(MTVIE): # XXX: Do not subclass from concrete IE
|
|
||||||
_WORKING = False
|
|
||||||
IE_NAME = 'cmt.com'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?cmt\.com/(?:videos|shows|(?:full-)?episodes|video-clips)/(?P<id>[^/]+)'
|
|
||||||
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.cmt.com/videos/garth-brooks/989124/the-call-featuring-trisha-yearwood.jhtml#artist=30061',
|
|
||||||
'md5': 'e6b7ef3c4c45bbfae88061799bbba6c2',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '989124',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Garth Brooks - "The Call (featuring Trisha Yearwood)"',
|
|
||||||
'description': 'Blame It All On My Roots',
|
|
||||||
},
|
|
||||||
'skip': 'Video not available',
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.cmt.com/videos/misc/1504699/still-the-king-ep-109-in-3-minutes.jhtml#id=1739908',
|
|
||||||
'md5': 'e61a801ca4a183a466c08bd98dccbb1c',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '1504699',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Still The King Ep. 109 in 3 Minutes',
|
|
||||||
'description': 'Relive or catch up with Still The King by watching this recap of season 1, episode 9.',
|
|
||||||
'timestamp': 1469421000.0,
|
|
||||||
'upload_date': '20160725',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.cmt.com/shows/party-down-south/party-down-south-ep-407-gone-girl/1738172/playlist/#id=1738172',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.cmt.com/full-episodes/537qb3/nashville-the-wayfaring-stranger-season-5-ep-501',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.cmt.com/video-clips/t9e4ci/nashville-juliette-in-2-minutes',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _extract_mgid(self, webpage, url):
|
|
||||||
mgid = self._search_regex(
|
|
||||||
r'MTVN\.VIDEO\.contentUri\s*=\s*([\'"])(?P<mgid>.+?)\1',
|
|
||||||
webpage, 'mgid', group='mgid', default=None)
|
|
||||||
if not mgid:
|
|
||||||
mgid = self._extract_triforce_mgid(webpage)
|
|
||||||
return mgid
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
mgid = self._extract_mgid(webpage, url)
|
|
||||||
return self.url_result(f'http://media.mtvnservices.com/embed/{mgid}')
|
|
||||||
@@ -272,6 +272,7 @@ class CNNIndonesiaIE(InfoExtractor):
|
|||||||
return merge_dicts(json_ld_data, {
|
return merge_dicts(json_ld_data, {
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
'url': embed_url,
|
'url': embed_url,
|
||||||
|
'id': video_id,
|
||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
'tags': try_call(lambda: self._html_search_meta('keywords', webpage).split(', ')),
|
'tags': try_call(lambda: self._html_search_meta('keywords', webpage).split(', ')),
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -1,55 +1,27 @@
|
|||||||
from .mtv import MTVServicesInfoExtractor
|
from .mtv import MTVServicesBaseIE
|
||||||
|
|
||||||
|
|
||||||
class ComedyCentralIE(MTVServicesInfoExtractor):
|
class ComedyCentralIE(MTVServicesBaseIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?cc\.com/(?:episodes|video(?:-clips)?|collection-playlist|movies)/(?P<id>[0-9a-z]{6})'
|
_VALID_URL = r'https?://(?:www\.)?cc\.com/video-clips/(?P<id>[\da-z]{6})'
|
||||||
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.cc.com/video-clips/5ke9v2/the-daily-show-with-trevor-noah-doc-rivers-and-steve-ballmer---the-nba-player-strike',
|
'url': 'https://www.cc.com/video-clips/wl12cx',
|
||||||
'md5': 'b8acb347177c680ff18a292aa2166f80',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '89ccc86e-1b02-4f83-b0c9-1d9592ecd025',
|
'id': 'dec6953e-80c8-43b3-96cd-05e9230e704d',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'The Daily Show with Trevor Noah|August 28, 2020|25|25149|Doc Rivers and Steve Ballmer - The NBA Player Strike',
|
'display_id': 'wl12cx',
|
||||||
'description': 'md5:5334307c433892b85f4f5e5ac9ef7498',
|
'title': 'Alison Brie and Dave Franco -"Together"- Extended Interview',
|
||||||
'timestamp': 1598670000,
|
'description': 'md5:ec68e38d3282f863de9cde0ce5cd231c',
|
||||||
'upload_date': '20200829',
|
'duration': 516.76,
|
||||||
|
'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref:',
|
||||||
|
'series': 'The Daily Show',
|
||||||
|
'season': 'Season 30',
|
||||||
|
'season_number': 30,
|
||||||
|
'episode': 'Episode 0',
|
||||||
|
'episode_number': 0,
|
||||||
|
'timestamp': 1753973314,
|
||||||
|
'upload_date': '20250731',
|
||||||
|
'release_timestamp': 1753977914,
|
||||||
|
'release_date': '20250731',
|
||||||
},
|
},
|
||||||
}, {
|
'params': {'skip_download': 'm3u8'},
|
||||||
'url': 'http://www.cc.com/episodes/pnzzci/drawn-together--american-idol--parody-clip-show-season-3-ep-314',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.cc.com/video/k3sdvm/the-daily-show-with-jon-stewart-exclusive-the-fourth-estate',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.cc.com/collection-playlist/cosnej/stand-up-specials/t6vtjb',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.cc.com/movies/tkp406/a-cluesterfuenke-christmas',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
|
||||||
class ComedyCentralTVIE(MTVServicesInfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?comedycentral\.tv/folgen/(?P<id>[0-9a-z]{6})'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://www.comedycentral.tv/folgen/pxdpec/josh-investigates-klimawandel-staffel-1-ep-1',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '15907dc3-ec3c-11e8-a442-0e40cf2fc285',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Josh Investigates',
|
|
||||||
'description': 'Steht uns das Ende der Welt bevor?',
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
_FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed'
|
|
||||||
_GEO_COUNTRIES = ['DE']
|
|
||||||
|
|
||||||
def _get_feed_query(self, uri):
|
|
||||||
return {
|
|
||||||
'accountOverride': 'intl.mtvi.com',
|
|
||||||
'arcEp': 'web.cc.tv',
|
|
||||||
'ep': 'b9032c3a',
|
|
||||||
'imageEp': 'web.cc.tv',
|
|
||||||
'mgid': uri,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -263,6 +263,7 @@ class InfoExtractor:
|
|||||||
* a string in the format of CLIENT[:OS]
|
* a string in the format of CLIENT[:OS]
|
||||||
* a list or a tuple of CLIENT[:OS] strings or ImpersonateTarget instances
|
* a list or a tuple of CLIENT[:OS] strings or ImpersonateTarget instances
|
||||||
* a boolean value; True means any impersonate target is sufficient
|
* a boolean value; True means any impersonate target is sufficient
|
||||||
|
* available_at Unix timestamp of when a format will be available to download
|
||||||
* downloader_options A dictionary of downloader options
|
* downloader_options A dictionary of downloader options
|
||||||
(For internal use only)
|
(For internal use only)
|
||||||
* http_chunk_size Chunk size for HTTP downloads
|
* http_chunk_size Chunk size for HTTP downloads
|
||||||
@@ -1527,11 +1528,11 @@ class InfoExtractor:
|
|||||||
r'>\s*(?:18\s+U(?:\.S\.C\.|SC)\s+)?(?:§+\s*)?2257\b',
|
r'>\s*(?:18\s+U(?:\.S\.C\.|SC)\s+)?(?:§+\s*)?2257\b',
|
||||||
]
|
]
|
||||||
|
|
||||||
age_limit = 0
|
age_limit = None
|
||||||
for marker in AGE_LIMIT_MARKERS:
|
for marker in AGE_LIMIT_MARKERS:
|
||||||
mobj = re.search(marker, html)
|
mobj = re.search(marker, html)
|
||||||
if mobj:
|
if mobj:
|
||||||
age_limit = max(age_limit, int(traverse_obj(mobj, 1, default=18)))
|
age_limit = max(age_limit or 0, int(traverse_obj(mobj, 1, default=18)))
|
||||||
return age_limit
|
return age_limit
|
||||||
|
|
||||||
def _media_rating_search(self, html):
|
def _media_rating_search(self, html):
|
||||||
@@ -2968,7 +2969,7 @@ class InfoExtractor:
|
|||||||
else:
|
else:
|
||||||
codecs = parse_codecs(codec_str)
|
codecs = parse_codecs(codec_str)
|
||||||
if content_type not in ('video', 'audio', 'text'):
|
if content_type not in ('video', 'audio', 'text'):
|
||||||
if mime_type == 'image/jpeg':
|
if mime_type in ('image/avif', 'image/jpeg'):
|
||||||
content_type = mime_type
|
content_type = mime_type
|
||||||
elif codecs.get('vcodec', 'none') != 'none':
|
elif codecs.get('vcodec', 'none') != 'none':
|
||||||
content_type = 'video'
|
content_type = 'video'
|
||||||
@@ -3028,14 +3029,14 @@ class InfoExtractor:
|
|||||||
'manifest_url': mpd_url,
|
'manifest_url': mpd_url,
|
||||||
'filesize': filesize,
|
'filesize': filesize,
|
||||||
}
|
}
|
||||||
elif content_type == 'image/jpeg':
|
elif content_type in ('image/avif', 'image/jpeg'):
|
||||||
# See test case in VikiIE
|
# See test case in VikiIE
|
||||||
# https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1
|
# https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1
|
||||||
f = {
|
f = {
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
'ext': 'mhtml',
|
'ext': 'mhtml',
|
||||||
'manifest_url': mpd_url,
|
'manifest_url': mpd_url,
|
||||||
'format_note': 'DASH storyboards (jpeg)',
|
'format_note': f'DASH storyboards ({mimetype2ext(mime_type)})',
|
||||||
'acodec': 'none',
|
'acodec': 'none',
|
||||||
'vcodec': 'none',
|
'vcodec': 'none',
|
||||||
}
|
}
|
||||||
@@ -3107,7 +3108,6 @@ class InfoExtractor:
|
|||||||
else:
|
else:
|
||||||
# $Number*$ or $Time$ in media template with S list available
|
# $Number*$ or $Time$ in media template with S list available
|
||||||
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
|
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
|
||||||
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
|
|
||||||
representation_ms_info['fragments'] = []
|
representation_ms_info['fragments'] = []
|
||||||
segment_time = 0
|
segment_time = 0
|
||||||
segment_d = None
|
segment_d = None
|
||||||
@@ -3177,7 +3177,7 @@ class InfoExtractor:
|
|||||||
'url': mpd_url or base_url,
|
'url': mpd_url or base_url,
|
||||||
'fragment_base_url': base_url,
|
'fragment_base_url': base_url,
|
||||||
'fragments': [],
|
'fragments': [],
|
||||||
'protocol': 'http_dash_segments' if mime_type != 'image/jpeg' else 'mhtml',
|
'protocol': 'mhtml' if mime_type in ('image/avif', 'image/jpeg') else 'http_dash_segments',
|
||||||
})
|
})
|
||||||
if 'initialization_url' in representation_ms_info:
|
if 'initialization_url' in representation_ms_info:
|
||||||
initialization_url = representation_ms_info['initialization_url']
|
initialization_url = representation_ms_info['initialization_url']
|
||||||
@@ -3192,7 +3192,7 @@ class InfoExtractor:
|
|||||||
else:
|
else:
|
||||||
# Assuming direct URL to unfragmented media.
|
# Assuming direct URL to unfragmented media.
|
||||||
f['url'] = base_url
|
f['url'] = base_url
|
||||||
if content_type in ('video', 'audio', 'image/jpeg'):
|
if content_type in ('video', 'audio', 'image/avif', 'image/jpeg'):
|
||||||
f['manifest_stream_number'] = stream_numbers[f['url']]
|
f['manifest_stream_number'] = stream_numbers[f['url']]
|
||||||
stream_numbers[f['url']] += 1
|
stream_numbers[f['url']] += 1
|
||||||
period_entry['formats'].append(f)
|
period_entry['formats'].append(f)
|
||||||
|
|||||||
@@ -1,243 +0,0 @@
|
|||||||
import hashlib
|
|
||||||
import hmac
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..networking.exceptions import HTTPError
|
|
||||||
from ..utils import (
|
|
||||||
ExtractorError,
|
|
||||||
determine_ext,
|
|
||||||
float_or_none,
|
|
||||||
int_or_none,
|
|
||||||
orderedSet,
|
|
||||||
parse_age_limit,
|
|
||||||
parse_duration,
|
|
||||||
url_or_none,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CrackleIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'(?:crackle:|https?://(?:(?:www|m)\.)?(?:sony)?crackle\.com/(?:playlist/\d+/|(?:[^/]+/)+))(?P<id>\d+)'
|
|
||||||
_TESTS = [{
|
|
||||||
# Crackle is available in the United States and territories
|
|
||||||
'url': 'https://www.crackle.com/thanksgiving/2510064',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '2510064',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Touch Football',
|
|
||||||
'description': 'md5:cfbb513cf5de41e8b56d7ab756cff4df',
|
|
||||||
'duration': 1398,
|
|
||||||
'view_count': int,
|
|
||||||
'average_rating': 0,
|
|
||||||
'age_limit': 17,
|
|
||||||
'genre': 'Comedy',
|
|
||||||
'creator': 'Daniel Powell',
|
|
||||||
'artist': 'Chris Elliott, Amy Sedaris',
|
|
||||||
'release_year': 2016,
|
|
||||||
'series': 'Thanksgiving',
|
|
||||||
'episode': 'Touch Football',
|
|
||||||
'season_number': 1,
|
|
||||||
'episode_number': 1,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'expected_warnings': [
|
|
||||||
'Trying with a list of known countries',
|
|
||||||
],
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.sonycrackle.com/thanksgiving/2510064',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
_MEDIA_FILE_SLOTS = {
|
|
||||||
'360p.mp4': {
|
|
||||||
'width': 640,
|
|
||||||
'height': 360,
|
|
||||||
},
|
|
||||||
'480p.mp4': {
|
|
||||||
'width': 768,
|
|
||||||
'height': 432,
|
|
||||||
},
|
|
||||||
'480p_1mbps.mp4': {
|
|
||||||
'width': 852,
|
|
||||||
'height': 480,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def _download_json(self, url, *args, **kwargs):
|
|
||||||
# Authorization generation algorithm is reverse engineered from:
|
|
||||||
# https://www.sonycrackle.com/static/js/main.ea93451f.chunk.js
|
|
||||||
timestamp = time.strftime('%Y%m%d%H%M', time.gmtime())
|
|
||||||
h = hmac.new(b'IGSLUQCBDFHEOIFM', '|'.join([url, timestamp]).encode(), hashlib.sha1).hexdigest().upper()
|
|
||||||
headers = {
|
|
||||||
'Accept': 'application/json',
|
|
||||||
'Authorization': '|'.join([h, timestamp, '117', '1']),
|
|
||||||
}
|
|
||||||
return InfoExtractor._download_json(self, url, *args, headers=headers, **kwargs)
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
|
|
||||||
geo_bypass_country = self.get_param('geo_bypass_country', None)
|
|
||||||
countries = orderedSet((geo_bypass_country, 'US', 'AU', 'CA', 'AS', 'FM', 'GU', 'MP', 'PR', 'PW', 'MH', 'VI', ''))
|
|
||||||
num_countries, num = len(countries) - 1, 0
|
|
||||||
|
|
||||||
media = {}
|
|
||||||
for num, country in enumerate(countries):
|
|
||||||
if num == 1: # start hard-coded list
|
|
||||||
self.report_warning('%s. Trying with a list of known countries' % (
|
|
||||||
f'Unable to obtain video formats from {geo_bypass_country} API' if geo_bypass_country
|
|
||||||
else 'No country code was given using --geo-bypass-country'))
|
|
||||||
elif num == num_countries: # end of list
|
|
||||||
geo_info = self._download_json(
|
|
||||||
'https://web-api-us.crackle.com/Service.svc/geo/country',
|
|
||||||
video_id, fatal=False, note='Downloading geo-location information from crackle API',
|
|
||||||
errnote='Unable to fetch geo-location information from crackle') or {}
|
|
||||||
country = geo_info.get('CountryCode')
|
|
||||||
if country is None:
|
|
||||||
continue
|
|
||||||
self.to_screen(f'{self.IE_NAME} identified country as {country}')
|
|
||||||
if country in countries:
|
|
||||||
self.to_screen(f'Downloading from {country} API was already attempted. Skipping...')
|
|
||||||
continue
|
|
||||||
|
|
||||||
if country is None:
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
media = self._download_json(
|
|
||||||
f'https://web-api-us.crackle.com/Service.svc/details/media/{video_id}/{country}?disableProtocols=true',
|
|
||||||
video_id, note=f'Downloading media JSON from {country} API',
|
|
||||||
errnote='Unable to download media JSON')
|
|
||||||
except ExtractorError as e:
|
|
||||||
# 401 means geo restriction, trying next country
|
|
||||||
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
|
||||||
continue
|
|
||||||
raise
|
|
||||||
|
|
||||||
status = media.get('status')
|
|
||||||
if status.get('messageCode') != '0':
|
|
||||||
raise ExtractorError(
|
|
||||||
'{} said: {} {} - {}'.format(
|
|
||||||
self.IE_NAME, status.get('messageCodeDescription'), status.get('messageCode'), status.get('message')),
|
|
||||||
expected=True)
|
|
||||||
|
|
||||||
# Found video formats
|
|
||||||
if isinstance(media.get('MediaURLs'), list):
|
|
||||||
break
|
|
||||||
|
|
||||||
ignore_no_formats = self.get_param('ignore_no_formats_error')
|
|
||||||
|
|
||||||
if not media or (not media.get('MediaURLs') and not ignore_no_formats):
|
|
||||||
raise ExtractorError(
|
|
||||||
'Unable to access the crackle API. Try passing your country code '
|
|
||||||
'to --geo-bypass-country. If it still does not work and the '
|
|
||||||
'video is available in your country')
|
|
||||||
title = media['Title']
|
|
||||||
|
|
||||||
formats, subtitles = [], {}
|
|
||||||
has_drm = False
|
|
||||||
for e in media.get('MediaURLs') or []:
|
|
||||||
if e.get('UseDRM'):
|
|
||||||
has_drm = True
|
|
||||||
format_url = url_or_none(e.get('DRMPath'))
|
|
||||||
else:
|
|
||||||
format_url = url_or_none(e.get('Path'))
|
|
||||||
if not format_url:
|
|
||||||
continue
|
|
||||||
ext = determine_ext(format_url)
|
|
||||||
if ext == 'm3u8':
|
|
||||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
|
||||||
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
|
||||||
m3u8_id='hls', fatal=False)
|
|
||||||
formats.extend(fmts)
|
|
||||||
subtitles = self._merge_subtitles(subtitles, subs)
|
|
||||||
elif ext == 'mpd':
|
|
||||||
fmts, subs = self._extract_mpd_formats_and_subtitles(
|
|
||||||
format_url, video_id, mpd_id='dash', fatal=False)
|
|
||||||
formats.extend(fmts)
|
|
||||||
subtitles = self._merge_subtitles(subtitles, subs)
|
|
||||||
elif format_url.endswith('.ism/Manifest'):
|
|
||||||
fmts, subs = self._extract_ism_formats_and_subtitles(
|
|
||||||
format_url, video_id, ism_id='mss', fatal=False)
|
|
||||||
formats.extend(fmts)
|
|
||||||
subtitles = self._merge_subtitles(subtitles, subs)
|
|
||||||
else:
|
|
||||||
mfs_path = e.get('Type')
|
|
||||||
mfs_info = self._MEDIA_FILE_SLOTS.get(mfs_path)
|
|
||||||
if not mfs_info:
|
|
||||||
continue
|
|
||||||
formats.append({
|
|
||||||
'url': format_url,
|
|
||||||
'format_id': 'http-' + mfs_path.split('.')[0],
|
|
||||||
'width': mfs_info['width'],
|
|
||||||
'height': mfs_info['height'],
|
|
||||||
})
|
|
||||||
if not formats and has_drm:
|
|
||||||
self.report_drm(video_id)
|
|
||||||
|
|
||||||
description = media.get('Description')
|
|
||||||
duration = int_or_none(media.get(
|
|
||||||
'DurationInSeconds')) or parse_duration(media.get('Duration'))
|
|
||||||
view_count = int_or_none(media.get('CountViews'))
|
|
||||||
average_rating = float_or_none(media.get('UserRating'))
|
|
||||||
age_limit = parse_age_limit(media.get('Rating'))
|
|
||||||
genre = media.get('Genre')
|
|
||||||
release_year = int_or_none(media.get('ReleaseYear'))
|
|
||||||
creator = media.get('Directors')
|
|
||||||
artist = media.get('Cast')
|
|
||||||
|
|
||||||
if media.get('MediaTypeDisplayValue') == 'Full Episode':
|
|
||||||
series = media.get('ShowName')
|
|
||||||
episode = title
|
|
||||||
season_number = int_or_none(media.get('Season'))
|
|
||||||
episode_number = int_or_none(media.get('Episode'))
|
|
||||||
else:
|
|
||||||
series = episode = season_number = episode_number = None
|
|
||||||
|
|
||||||
cc_files = media.get('ClosedCaptionFiles')
|
|
||||||
if isinstance(cc_files, list):
|
|
||||||
for cc_file in cc_files:
|
|
||||||
if not isinstance(cc_file, dict):
|
|
||||||
continue
|
|
||||||
cc_url = url_or_none(cc_file.get('Path'))
|
|
||||||
if not cc_url:
|
|
||||||
continue
|
|
||||||
lang = cc_file.get('Locale') or 'en'
|
|
||||||
subtitles.setdefault(lang, []).append({'url': cc_url})
|
|
||||||
|
|
||||||
thumbnails = []
|
|
||||||
images = media.get('Images')
|
|
||||||
if isinstance(images, list):
|
|
||||||
for image_key, image_url in images.items():
|
|
||||||
mobj = re.search(r'Img_(\d+)[xX](\d+)', image_key)
|
|
||||||
if not mobj:
|
|
||||||
continue
|
|
||||||
thumbnails.append({
|
|
||||||
'url': image_url,
|
|
||||||
'width': int(mobj.group(1)),
|
|
||||||
'height': int(mobj.group(2)),
|
|
||||||
})
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'description': description,
|
|
||||||
'duration': duration,
|
|
||||||
'view_count': view_count,
|
|
||||||
'average_rating': average_rating,
|
|
||||||
'age_limit': age_limit,
|
|
||||||
'genre': genre,
|
|
||||||
'creator': creator,
|
|
||||||
'artist': artist,
|
|
||||||
'release_year': release_year,
|
|
||||||
'series': series,
|
|
||||||
'episode': episode,
|
|
||||||
'season_number': season_number,
|
|
||||||
'episode_number': episode_number,
|
|
||||||
'thumbnails': thumbnails,
|
|
||||||
'subtitles': subtitles,
|
|
||||||
'formats': formats,
|
|
||||||
}
|
|
||||||
@@ -1,180 +0,0 @@
|
|||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
ExtractorError,
|
|
||||||
int_or_none,
|
|
||||||
parse_age_limit,
|
|
||||||
parse_iso8601,
|
|
||||||
parse_qs,
|
|
||||||
smuggle_url,
|
|
||||||
str_or_none,
|
|
||||||
update_url_query,
|
|
||||||
)
|
|
||||||
from ..utils.traversal import traverse_obj
|
|
||||||
|
|
||||||
|
|
||||||
class CWTVIE(InfoExtractor):
|
|
||||||
IE_NAME = 'cwtv'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?cw(?:tv(?:pr)?|seed)\.com/(?:shows/)?(?:[^/]+/)+[^?]*\?.*\b(?:play|watch|guid)=(?P<id>[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12})'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://www.cwtv.com/shows/continuum/a-stitch-in-time/?play=9149a1e1-4cb2-46d7-81b2-47d35bbd332b',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '9149a1e1-4cb2-46d7-81b2-47d35bbd332b',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'A Stitch in Time',
|
|
||||||
'description': r're:(?s)City Protective Services officer Kiera Cameron is transported from 2077.+',
|
|
||||||
'thumbnail': r're:https?://.+\.jpe?g',
|
|
||||||
'duration': 2632,
|
|
||||||
'timestamp': 1736928000,
|
|
||||||
'uploader': 'CWTV',
|
|
||||||
'chapters': 'count:5',
|
|
||||||
'series': 'Continuum',
|
|
||||||
'season_number': 1,
|
|
||||||
'episode_number': 1,
|
|
||||||
'age_limit': 14,
|
|
||||||
'upload_date': '20250115',
|
|
||||||
'season': 'Season 1',
|
|
||||||
'episode': 'Episode 1',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://cwtv.com/shows/arrow/legends-of-yesterday/?play=6b15e985-9345-4f60-baf8-56e96be57c63',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6b15e985-9345-4f60-baf8-56e96be57c63',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Legends of Yesterday',
|
|
||||||
'description': r're:(?s)Oliver and Barry Allen take Kendra Saunders and Carter Hall to a remote.+',
|
|
||||||
'duration': 2665,
|
|
||||||
'series': 'Arrow',
|
|
||||||
'season_number': 4,
|
|
||||||
'season': '4',
|
|
||||||
'episode_number': 8,
|
|
||||||
'upload_date': '20151203',
|
|
||||||
'timestamp': 1449122100,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'redirect to http://cwtv.com/shows/arrow/',
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.cwseed.com/shows/whose-line-is-it-anyway/jeff-davis-4/?play=24282b12-ead2-42f2-95ad-26770c2c6088',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '24282b12-ead2-42f2-95ad-26770c2c6088',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Jeff Davis 4',
|
|
||||||
'description': 'Jeff Davis is back to make you laugh.',
|
|
||||||
'duration': 1263,
|
|
||||||
'series': 'Whose Line Is It Anyway?',
|
|
||||||
'season_number': 11,
|
|
||||||
'episode_number': 20,
|
|
||||||
'upload_date': '20151006',
|
|
||||||
'timestamp': 1444107300,
|
|
||||||
'age_limit': 14,
|
|
||||||
'uploader': 'CWTV',
|
|
||||||
'thumbnail': r're:https?://.+\.jpe?g',
|
|
||||||
'chapters': 'count:4',
|
|
||||||
'episode': 'Episode 20',
|
|
||||||
'season': 'Season 11',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://cwtv.com/thecw/chroniclesofcisco/?play=8adebe35-f447-465f-ab52-e863506ff6d6',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://cwtvpr.com/the-cw/video?watch=9eee3f60-ef4e-440b-b3b2-49428ac9c54e',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://cwtv.com/shows/arrow/legends-of-yesterday/?watch=6b15e985-9345-4f60-baf8-56e96be57c63',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.cwtv.com/movies/play/?guid=0a8e8b5b-1356-41d5-9a6a-4eda1a6feb6c',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
data = self._download_json(
|
|
||||||
f'https://images.cwtv.com/feed/app-2/video-meta/apiversion_22/device_android/guid_{video_id}', video_id)
|
|
||||||
if traverse_obj(data, 'result') != 'ok':
|
|
||||||
raise ExtractorError(traverse_obj(data, (('error_msg', 'msg'), {str}, any)), expected=True)
|
|
||||||
video_data = data['video']
|
|
||||||
title = video_data['title']
|
|
||||||
mpx_url = update_url_query(
|
|
||||||
video_data.get('mpx_url') or f'https://link.theplatform.com/s/cwtv/media/guid/2703454149/{video_id}',
|
|
||||||
{'formats': 'M3U+none'})
|
|
||||||
|
|
||||||
season = str_or_none(video_data.get('season'))
|
|
||||||
episode = str_or_none(video_data.get('episode'))
|
|
||||||
if episode and season:
|
|
||||||
episode = episode[len(season):]
|
|
||||||
|
|
||||||
return {
|
|
||||||
'_type': 'url_transparent',
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'url': smuggle_url(mpx_url, {'force_smil_url': True}),
|
|
||||||
'description': video_data.get('description_long'),
|
|
||||||
'duration': int_or_none(video_data.get('duration_secs')),
|
|
||||||
'series': video_data.get('series_name'),
|
|
||||||
'season_number': int_or_none(season),
|
|
||||||
'episode_number': int_or_none(episode),
|
|
||||||
'timestamp': parse_iso8601(video_data.get('start_time')),
|
|
||||||
'age_limit': parse_age_limit(video_data.get('rating')),
|
|
||||||
'ie_key': 'ThePlatform',
|
|
||||||
'thumbnail': video_data.get('large_thumbnail'),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class CWTVMovieIE(InfoExtractor):
|
|
||||||
IE_NAME = 'cwtv:movie'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?cwtv\.com/shows/(?P<id>[\w-]+)/?\?(?:[^#]+&)?viewContext=Movies'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://www.cwtv.com/shows/the-crush/?viewContext=Movies+Swimlane',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '0a8e8b5b-1356-41d5-9a6a-4eda1a6feb6c',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'The Crush',
|
|
||||||
'upload_date': '20241112',
|
|
||||||
'description': 'md5:1549acd90dff4a8273acd7284458363e',
|
|
||||||
'chapters': 'count:9',
|
|
||||||
'timestamp': 1731398400,
|
|
||||||
'age_limit': 16,
|
|
||||||
'duration': 5337,
|
|
||||||
'series': 'The Crush',
|
|
||||||
'season': 'Season 1',
|
|
||||||
'uploader': 'CWTV',
|
|
||||||
'season_number': 1,
|
|
||||||
'episode': 'Episode 1',
|
|
||||||
'episode_number': 1,
|
|
||||||
'thumbnail': r're:https?://.+\.jpe?g',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
_UUID_RE = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}'
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
display_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
app_url = (
|
|
||||||
self._html_search_meta('al:ios:url', webpage, default=None)
|
|
||||||
or self._html_search_meta('al:android:url', webpage, default=None))
|
|
||||||
video_id = (
|
|
||||||
traverse_obj(parse_qs(app_url), ('video_id', 0, {lambda x: re.fullmatch(self._UUID_RE, x)}, 0))
|
|
||||||
or self._search_regex([
|
|
||||||
rf'CWTV\.Site\.curPlayingGUID\s*=\s*["\']({self._UUID_RE})',
|
|
||||||
rf'CWTV\.Site\.viewInAppURL\s*=\s*["\']/shows/[\w-]+/watch-in-app/\?play=({self._UUID_RE})',
|
|
||||||
], webpage, 'video ID'))
|
|
||||||
|
|
||||||
return self.url_result(
|
|
||||||
f'https://www.cwtv.com/shows/{display_id}/{display_id}/?play={video_id}', CWTVIE, video_id)
|
|
||||||
@@ -171,17 +171,6 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
|||||||
'view_count': int,
|
'view_count': int,
|
||||||
},
|
},
|
||||||
'skip': 'video gone',
|
'skip': 'video gone',
|
||||||
}, {
|
|
||||||
# Vevo video
|
|
||||||
'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
|
|
||||||
'info_dict': {
|
|
||||||
'title': 'Roar (Official)',
|
|
||||||
'id': 'USUV71301934',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'uploader': 'Katy Perry',
|
|
||||||
'upload_date': '20130905',
|
|
||||||
},
|
|
||||||
'skip': 'Invalid URL',
|
|
||||||
}, {
|
}, {
|
||||||
# age-restricted video
|
# age-restricted video
|
||||||
'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
|
'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
|
||||||
|
|||||||
@@ -2,18 +2,152 @@ import re
|
|||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import js_to_json, url_or_none
|
from ..utils import int_or_none, js_to_json, url_or_none
|
||||||
from ..utils.traversal import traverse_obj
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class FaulioLiveIE(InfoExtractor):
|
class FaulioBaseIE(InfoExtractor):
|
||||||
_DOMAINS = (
|
_DOMAINS = (
|
||||||
'aloula.sba.sa',
|
'aloula.sba.sa',
|
||||||
'bahry.com',
|
'bahry.com',
|
||||||
'maraya.sba.net.ae',
|
'maraya.sba.net.ae',
|
||||||
'sat7plus.org',
|
'sat7plus.org',
|
||||||
)
|
)
|
||||||
_VALID_URL = fr'https?://(?:{"|".join(map(re.escape, _DOMAINS))})/(?:(?:en|ar|fa)/)?live/(?P<id>[a-zA-Z0-9-]+)'
|
_LANGUAGES = ('ar', 'en', 'fa')
|
||||||
|
_BASE_URL_RE = fr'https?://(?:{"|".join(map(re.escape, _DOMAINS))})/(?:(?:{"|".join(_LANGUAGES)})/)?'
|
||||||
|
|
||||||
|
def _get_headers(self, url):
|
||||||
|
parsed_url = urllib.parse.urlparse(url)
|
||||||
|
return {
|
||||||
|
'Referer': url,
|
||||||
|
'Origin': f'{parsed_url.scheme}://{parsed_url.hostname}',
|
||||||
|
}
|
||||||
|
|
||||||
|
def _get_api_base(self, url, video_id):
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
config_data = self._search_json(
|
||||||
|
r'window\.__NUXT__\.config=', webpage, 'config', video_id, transform_source=js_to_json)
|
||||||
|
return config_data['public']['TRANSLATIONS_API_URL']
|
||||||
|
|
||||||
|
|
||||||
|
class FaulioIE(FaulioBaseIE):
|
||||||
|
_VALID_URL = fr'{FaulioBaseIE._BASE_URL_RE}(?:episode|media)/(?P<id>[a-zA-Z0-9-]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://aloula.sba.sa/en/episode/29102',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'aloula.faulio.com_29102',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'display_id': 'هذا-مكانك-03-004-v-29102',
|
||||||
|
'title': 'الحلقة 4',
|
||||||
|
'episode': 'الحلقة 4',
|
||||||
|
'description': '',
|
||||||
|
'series': 'هذا مكانك',
|
||||||
|
'season': 'Season 3',
|
||||||
|
'season_number': 3,
|
||||||
|
'episode_number': 4,
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'duration': 4855,
|
||||||
|
'age_limit': 3,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://bahry.com/en/media/1191',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'bahry.faulio.com_1191',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'display_id': 'Episode-4-1191',
|
||||||
|
'title': 'Episode 4',
|
||||||
|
'episode': 'Episode 4',
|
||||||
|
'description': '',
|
||||||
|
'series': 'Wild Water',
|
||||||
|
'season': 'Season 1',
|
||||||
|
'season_number': 1,
|
||||||
|
'episode_number': 4,
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'duration': 1653,
|
||||||
|
'age_limit': 0,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://maraya.sba.net.ae/episode/127735',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'maraya.faulio.com_127735',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'display_id': 'عبدالله-الهاجري---عبدالرحمن-المطروشي-127735',
|
||||||
|
'title': 'عبدالله الهاجري - عبدالرحمن المطروشي',
|
||||||
|
'episode': 'عبدالله الهاجري - عبدالرحمن المطروشي',
|
||||||
|
'description': 'md5:53de01face66d3d6303221e5a49388a0',
|
||||||
|
'series': 'أبناؤنا في الخارج',
|
||||||
|
'season': 'Season 3',
|
||||||
|
'season_number': 3,
|
||||||
|
'episode_number': 7,
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'duration': 1316,
|
||||||
|
'age_limit': 0,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://sat7plus.org/episode/18165',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'sat7.faulio.com_18165',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'display_id': 'ep-13-ADHD-18165',
|
||||||
|
'title': 'ADHD and creativity',
|
||||||
|
'episode': 'ADHD and creativity',
|
||||||
|
'description': '',
|
||||||
|
'series': 'ADHD Podcast',
|
||||||
|
'season': 'Season 1',
|
||||||
|
'season_number': 1,
|
||||||
|
'episode_number': 13,
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
'duration': 2492,
|
||||||
|
'age_limit': 0,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://aloula.sba.sa/en/episode/0',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
api_base = self._get_api_base(url, video_id)
|
||||||
|
video_info = self._download_json(f'{api_base}/video/{video_id}', video_id, fatal=False)
|
||||||
|
player_info = self._download_json(f'{api_base}/video/{video_id}/player', video_id)
|
||||||
|
|
||||||
|
headers = self._get_headers(url)
|
||||||
|
formats = []
|
||||||
|
subtitles = {}
|
||||||
|
if hls_url := traverse_obj(player_info, ('settings', 'protocols', 'hls', {url_or_none})):
|
||||||
|
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||||
|
hls_url, video_id, 'mp4', m3u8_id='hls', fatal=False, headers=headers)
|
||||||
|
formats.extend(fmts)
|
||||||
|
self._merge_subtitles(subs, target=subtitles)
|
||||||
|
|
||||||
|
if mpd_url := traverse_obj(player_info, ('settings', 'protocols', 'dash', {url_or_none})):
|
||||||
|
fmts, subs = self._extract_mpd_formats_and_subtitles(
|
||||||
|
mpd_url, video_id, mpd_id='dash', fatal=False, headers=headers)
|
||||||
|
formats.extend(fmts)
|
||||||
|
self._merge_subtitles(subs, target=subtitles)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': f'{urllib.parse.urlparse(api_base).hostname}_{video_id}',
|
||||||
|
**traverse_obj(traverse_obj(video_info, ('blocks', 0)), {
|
||||||
|
'display_id': ('slug', {str}),
|
||||||
|
'title': ('title', {str}),
|
||||||
|
'episode': ('title', {str}),
|
||||||
|
'description': ('description', {str}),
|
||||||
|
'series': ('program_title', {str}),
|
||||||
|
'season_number': ('season_number', {int_or_none}),
|
||||||
|
'episode_number': ('episode', {int_or_none}),
|
||||||
|
'thumbnail': ('image', {url_or_none}),
|
||||||
|
'duration': ('duration', 'total', {int_or_none}),
|
||||||
|
'age_limit': ('age_rating', {int_or_none}),
|
||||||
|
}),
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'http_headers': headers,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class FaulioLiveIE(FaulioBaseIE):
|
||||||
|
_VALID_URL = fr'{FaulioBaseIE._BASE_URL_RE}live/(?P<id>[a-zA-Z0-9-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://aloula.sba.sa/live/saudiatv',
|
'url': 'https://aloula.sba.sa/live/saudiatv',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -69,27 +203,24 @@ class FaulioLiveIE(InfoExtractor):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
api_base = self._get_api_base(url, video_id)
|
||||||
|
|
||||||
config_data = self._search_json(
|
|
||||||
r'window\.__NUXT__\.config=', webpage, 'config', video_id, transform_source=js_to_json)
|
|
||||||
api_base = config_data['public']['TRANSLATIONS_API_URL']
|
|
||||||
|
|
||||||
channel = traverse_obj(
|
channel = traverse_obj(
|
||||||
self._download_json(f'{api_base}/channels', video_id),
|
self._download_json(f'{api_base}/channels', video_id),
|
||||||
(lambda k, v: v['url'] == video_id, any))
|
(lambda k, v: v['url'] == video_id, any))
|
||||||
|
|
||||||
|
headers = self._get_headers(url)
|
||||||
formats = []
|
formats = []
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
if hls_url := traverse_obj(channel, ('streams', 'hls', {url_or_none})):
|
if hls_url := traverse_obj(channel, ('streams', 'hls', {url_or_none})):
|
||||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||||
hls_url, video_id, 'mp4', m3u8_id='hls', live=True, fatal=False)
|
hls_url, video_id, 'mp4', m3u8_id='hls', live=True, fatal=False, headers=headers)
|
||||||
formats.extend(fmts)
|
formats.extend(fmts)
|
||||||
self._merge_subtitles(subs, target=subtitles)
|
self._merge_subtitles(subs, target=subtitles)
|
||||||
|
|
||||||
if mpd_url := traverse_obj(channel, ('streams', 'mpd', {url_or_none})):
|
if mpd_url := traverse_obj(channel, ('streams', 'mpd', {url_or_none})):
|
||||||
fmts, subs = self._extract_mpd_formats_and_subtitles(
|
fmts, subs = self._extract_mpd_formats_and_subtitles(
|
||||||
mpd_url, video_id, mpd_id='dash', fatal=False)
|
mpd_url, video_id, mpd_id='dash', fatal=False, headers=headers)
|
||||||
formats.extend(fmts)
|
formats.extend(fmts)
|
||||||
self._merge_subtitles(subs, target=subtitles)
|
self._merge_subtitles(subs, target=subtitles)
|
||||||
|
|
||||||
@@ -101,5 +232,6 @@ class FaulioLiveIE(InfoExtractor):
|
|||||||
}),
|
}),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
|
'http_headers': headers,
|
||||||
'is_live': True,
|
'is_live': True,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class FifaIE(InfoExtractor):
|
class FifaIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://www\.fifa\.com/fifaplus/(?P<locale>\w{2})/watch/([^#?]+/)?(?P<id>\w+)'
|
_VALID_URL = r'https?://www\.fifa\.com/fifaplus/\w{2}/watch/([^#?]+/)?(?P<id>\w+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.fifa.com/fifaplus/en/watch/7on10qPcnyLajDDU3ntg6y',
|
'url': 'https://www.fifa.com/fifaplus/en/watch/7on10qPcnyLajDDU3ntg6y',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -51,7 +51,7 @@ class FifaIE(InfoExtractor):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id, locale = self._match_valid_url(url).group('id', 'locale')
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
preconnect_link = self._search_regex(
|
preconnect_link = self._search_regex(
|
||||||
|
|||||||
@@ -3,15 +3,19 @@ from ..networking.exceptions import HTTPError
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
UserNotLive,
|
UserNotLive,
|
||||||
|
int_or_none,
|
||||||
|
join_nonempty,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
traverse_obj,
|
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class FlexTVIE(InfoExtractor):
|
class FlexTVIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?flextv\.co\.kr/channels/(?P<id>\d+)/live'
|
IE_NAME = 'ttinglive'
|
||||||
|
IE_DESC = '띵라이브 (formerly FlexTV)'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?(?:ttinglive\.com|flextv\.co\.kr)/channels/(?P<id>\d+)/live'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.flextv.co.kr/channels/231638/live',
|
'url': 'https://www.flextv.co.kr/channels/231638/live',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -36,21 +40,32 @@ class FlexTVIE(InfoExtractor):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
stream_data = self._download_json(
|
stream_data = self._download_json(
|
||||||
f'https://api.flextv.co.kr/api/channels/{channel_id}/stream',
|
f'https://api.ttinglive.com/api/channels/{channel_id}/stream',
|
||||||
channel_id, query={'option': 'all'})
|
channel_id, query={'option': 'all'})
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
|
||||||
raise UserNotLive(video_id=channel_id)
|
raise UserNotLive(video_id=channel_id)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
playlist_url = stream_data['sources'][0]['url']
|
formats = []
|
||||||
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
|
for stream in traverse_obj(stream_data, ('sources', ..., {dict})):
|
||||||
playlist_url, channel_id, 'mp4')
|
if stream.get('format') == 'ivs' and url_or_none(stream.get('url')):
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
stream['url'], channel_id, 'mp4', live=True, fatal=False, m3u8_id='ivs'))
|
||||||
|
for format_type in ['hls', 'flv']:
|
||||||
|
for data in traverse_obj(stream, (
|
||||||
|
'urlDetail', format_type, 'resolution', lambda _, v: url_or_none(v['url']))):
|
||||||
|
formats.append({
|
||||||
|
'format_id': join_nonempty(format_type, data.get('suffixName'), delim=''),
|
||||||
|
'url': data['url'],
|
||||||
|
'height': int_or_none(data.get('resolution')),
|
||||||
|
'ext': 'mp4' if format_type == 'hls' else 'flv',
|
||||||
|
'protocol': 'm3u8_native' if format_type == 'hls' else 'http',
|
||||||
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': channel_id,
|
'id': channel_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
|
||||||
'is_live': True,
|
'is_live': True,
|
||||||
**traverse_obj(stream_data, {
|
**traverse_obj(stream_data, {
|
||||||
'title': ('stream', 'title', {str}),
|
'title': ('stream', 'title', {str}),
|
||||||
|
|||||||
@@ -363,13 +363,7 @@ class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
|||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
nextjs_data = self._search_nextjs_v13_data(webpage, display_id)
|
nextjs_data = self._search_nextjs_v13_data(webpage, display_id)
|
||||||
|
video_id = get_first(nextjs_data, ('options', 'id', {str}))
|
||||||
if get_first(nextjs_data, ('isLive', {bool})):
|
|
||||||
# For livestreams we need the id of the stream instead of the currently airing episode id
|
|
||||||
video_id = get_first(nextjs_data, ('options', 'id', {str}))
|
|
||||||
else:
|
|
||||||
video_id = get_first(nextjs_data, ('video', ('playerReplayId', 'siId'), {str}))
|
|
||||||
|
|
||||||
if not video_id:
|
if not video_id:
|
||||||
raise ExtractorError('Unable to extract video ID')
|
raise ExtractorError('Unable to extract video ID')
|
||||||
|
|
||||||
|
|||||||
@@ -121,7 +121,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'id': 'cauky-lidi-70-dil-babis-predstavil-pohadky-prymulanek-nebo-andrejovy-nove-saty-ac867',
|
'id': 'cauky-lidi-70-dil-babis-predstavil-pohadky-prymulanek-nebo-andrejovy-nove-saty-ac867',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'čauky lidi 70 finall',
|
'title': 'čauky lidi 70 finall',
|
||||||
'age_limit': 0,
|
|
||||||
'description': 'md5:47b2673a5b76780d9d329783e1fbf5aa',
|
'description': 'md5:47b2673a5b76780d9d329783e1fbf5aa',
|
||||||
'direct': True,
|
'direct': True,
|
||||||
'duration': 318.0,
|
'duration': 318.0,
|
||||||
@@ -244,7 +243,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'id': 'paris-d-moll',
|
'id': 'paris-d-moll',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Paris d-moll',
|
'title': 'Paris d-moll',
|
||||||
'age_limit': 0,
|
|
||||||
'description': 'md5:319e37ea5542293db37e1e13072fe330',
|
'description': 'md5:319e37ea5542293db37e1e13072fe330',
|
||||||
'thumbnail': r're:https?://www\.filmarkivet\.se/wp-content/uploads/.+\.jpg',
|
'thumbnail': r're:https?://www\.filmarkivet\.se/wp-content/uploads/.+\.jpg',
|
||||||
},
|
},
|
||||||
@@ -255,7 +253,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '60413035',
|
'id': '60413035',
|
||||||
'title': 'Etter ett års planlegging, klaffet endelig alt: - Jeg måtte ta en liten dans',
|
'title': 'Etter ett års planlegging, klaffet endelig alt: - Jeg måtte ta en liten dans',
|
||||||
'age_limit': 0,
|
|
||||||
'description': 'md5:bbb4e12e42e78609a74fd421b93b1239',
|
'description': 'md5:bbb4e12e42e78609a74fd421b93b1239',
|
||||||
'thumbnail': r're:https?://www\.dagbladet\.no/images/.+',
|
'thumbnail': r're:https?://www\.dagbladet\.no/images/.+',
|
||||||
},
|
},
|
||||||
@@ -267,7 +264,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'single_clip',
|
'id': 'single_clip',
|
||||||
'title': 'Single Clip player examples',
|
'title': 'Single Clip player examples',
|
||||||
'age_limit': 0,
|
|
||||||
},
|
},
|
||||||
'playlist_count': 3,
|
'playlist_count': 3,
|
||||||
}, {
|
}, {
|
||||||
@@ -324,7 +320,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'id': 'videos-1',
|
'id': 'videos-1',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Videos & Audio - King Machine (1)',
|
'title': 'Videos & Audio - King Machine (1)',
|
||||||
'age_limit': 0,
|
|
||||||
'description': 'Browse King Machine videos & audio for sweet media. Your eyes will thank you.',
|
'description': 'Browse King Machine videos & audio for sweet media. Your eyes will thank you.',
|
||||||
'thumbnail': r're:https?://media\.indiedb\.com/cache/images/.+\.jpg',
|
'thumbnail': r're:https?://media\.indiedb\.com/cache/images/.+\.jpg',
|
||||||
'_old_archive_ids': ['generic videos'],
|
'_old_archive_ids': ['generic videos'],
|
||||||
@@ -363,7 +358,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'id': '21217',
|
'id': '21217',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '40 ночей (2016) - BogMedia.org',
|
'title': '40 ночей (2016) - BogMedia.org',
|
||||||
'age_limit': 0,
|
|
||||||
'description': 'md5:4e6d7d622636eb7948275432eb256dc3',
|
'description': 'md5:4e6d7d622636eb7948275432eb256dc3',
|
||||||
'display_id': '40-nochey-2016',
|
'display_id': '40-nochey-2016',
|
||||||
'thumbnail': r're:https?://bogmedia\.org/contents/videos_screenshots/.+\.jpg',
|
'thumbnail': r're:https?://bogmedia\.org/contents/videos_screenshots/.+\.jpg',
|
||||||
@@ -378,7 +372,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'id': '18485',
|
'id': '18485',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Клип: Ленинград - ЗОЖ скачать, смотреть онлайн | Youix.com',
|
'title': 'Клип: Ленинград - ЗОЖ скачать, смотреть онлайн | Youix.com',
|
||||||
'age_limit': 0,
|
|
||||||
'display_id': 'leningrad-zoj',
|
'display_id': 'leningrad-zoj',
|
||||||
'thumbnail': r're:https?://youix\.com/contents/videos_screenshots/.+\.jpg',
|
'thumbnail': r're:https?://youix\.com/contents/videos_screenshots/.+\.jpg',
|
||||||
},
|
},
|
||||||
@@ -419,7 +412,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'id': '105',
|
'id': '105',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Kelis - 4th Of July / Embed Player',
|
'title': 'Kelis - 4th Of July / Embed Player',
|
||||||
'age_limit': 0,
|
|
||||||
'display_id': 'kelis-4th-of-july',
|
'display_id': 'kelis-4th-of-july',
|
||||||
'thumbnail': r're:https?://www\.kvs-demo\.com/contents/videos_screenshots/.+\.jpg',
|
'thumbnail': r're:https?://www\.kvs-demo\.com/contents/videos_screenshots/.+\.jpg',
|
||||||
},
|
},
|
||||||
@@ -430,9 +422,8 @@ class GenericIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'beltzlaw-1',
|
'id': 'beltzlaw-1',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Beltz Law Group | Dallas Traffic Ticket, Accident & Criminal Attorney (1)',
|
'title': str,
|
||||||
'age_limit': 0,
|
'description': str,
|
||||||
'description': 'md5:5bdf23fcb76801dc3b31e74cabf82147',
|
|
||||||
'thumbnail': r're:https?://beltzlaw\.com/wp-content/uploads/.+\.jpg',
|
'thumbnail': r're:https?://beltzlaw\.com/wp-content/uploads/.+\.jpg',
|
||||||
'timestamp': int, # varies
|
'timestamp': int, # varies
|
||||||
'upload_date': str,
|
'upload_date': str,
|
||||||
@@ -447,7 +438,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'id': 'cine-1',
|
'id': 'cine-1',
|
||||||
'ext': 'webm',
|
'ext': 'webm',
|
||||||
'title': 'CINE.AR (1)',
|
'title': 'CINE.AR (1)',
|
||||||
'age_limit': 0,
|
|
||||||
'description': 'md5:a4e58f9e2291c940e485f34251898c4a',
|
'description': 'md5:a4e58f9e2291c940e485f34251898c4a',
|
||||||
'thumbnail': r're:https?://cine\.ar/img/.+\.png',
|
'thumbnail': r're:https?://cine\.ar/img/.+\.png',
|
||||||
'_old_archive_ids': ['generic cine'],
|
'_old_archive_ids': ['generic cine'],
|
||||||
@@ -461,7 +451,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'id': 'ipy2AcGL',
|
'id': 'ipy2AcGL',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Hoe een bladvlo dit verwoestende Japanse onkruid moet vernietigen',
|
'title': 'Hoe een bladvlo dit verwoestende Japanse onkruid moet vernietigen',
|
||||||
'age_limit': 0,
|
|
||||||
'description': 'md5:6a9d644bab0dc2dc06849c2505d8383d',
|
'description': 'md5:6a9d644bab0dc2dc06849c2505d8383d',
|
||||||
'duration': 111.0,
|
'duration': 111.0,
|
||||||
'thumbnail': r're:https?://images\.nu\.nl/.+\.jpg',
|
'thumbnail': r're:https?://images\.nu\.nl/.+\.jpg',
|
||||||
@@ -477,7 +466,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'id': 'porsche-911-gt3-rs-rij-impressie-2',
|
'id': 'porsche-911-gt3-rs-rij-impressie-2',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Test: Porsche 911 GT3 RS - AutoWeek',
|
'title': 'Test: Porsche 911 GT3 RS - AutoWeek',
|
||||||
'age_limit': 0,
|
|
||||||
'description': 'md5:a17b5bd84288448d8f11b838505718fc',
|
'description': 'md5:a17b5bd84288448d8f11b838505718fc',
|
||||||
'direct': True,
|
'direct': True,
|
||||||
'thumbnail': r're:https?://images\.autoweek\.nl/.+',
|
'thumbnail': r're:https?://images\.autoweek\.nl/.+',
|
||||||
@@ -493,7 +481,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'id': 'k6gl2kt2eq',
|
'id': 'k6gl2kt2eq',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Breezy HR\'s ATS helps you find & hire employees sooner',
|
'title': 'Breezy HR\'s ATS helps you find & hire employees sooner',
|
||||||
'age_limit': 0,
|
|
||||||
'average_rating': 4.5,
|
'average_rating': 4.5,
|
||||||
'description': 'md5:eee75fdd3044c538003f3be327ba01e1',
|
'description': 'md5:eee75fdd3044c538003f3be327ba01e1',
|
||||||
'duration': 60.1,
|
'duration': 60.1,
|
||||||
@@ -509,7 +496,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'id': 'videojs_hls_test',
|
'id': 'videojs_hls_test',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'video',
|
'title': 'video',
|
||||||
'age_limit': 0,
|
|
||||||
'duration': 1800,
|
'duration': 1800,
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'},
|
'params': {'skip_download': 'm3u8'},
|
||||||
@@ -786,8 +772,8 @@ class GenericIE(InfoExtractor):
|
|||||||
|
|
||||||
if default_search in ('auto', 'auto_warning', 'fixup_error'):
|
if default_search in ('auto', 'auto_warning', 'fixup_error'):
|
||||||
if re.match(r'[^\s/]+\.[^\s/]+/', url):
|
if re.match(r'[^\s/]+\.[^\s/]+/', url):
|
||||||
self.report_warning('The url doesn\'t specify the protocol, trying with http')
|
self.report_warning('The url doesn\'t specify the protocol, trying with https')
|
||||||
return self.url_result('http://' + url)
|
return self.url_result('https://' + url)
|
||||||
elif default_search != 'fixup_error':
|
elif default_search != 'fixup_error':
|
||||||
if default_search == 'auto_warning':
|
if default_search == 'auto_warning':
|
||||||
if re.match(r'^(?:url|URL)$', url):
|
if re.match(r'^(?:url|URL)$', url):
|
||||||
@@ -800,9 +786,7 @@ class GenericIE(InfoExtractor):
|
|||||||
return self.url_result('ytsearch:' + url)
|
return self.url_result('ytsearch:' + url)
|
||||||
|
|
||||||
if default_search in ('error', 'fixup_error'):
|
if default_search in ('error', 'fixup_error'):
|
||||||
raise ExtractorError(
|
raise ExtractorError(f'{url!r} is not a valid URL', expected=True)
|
||||||
f'{url!r} is not a valid URL. '
|
|
||||||
f'Set --default-search "ytsearch" (or run yt-dlp "ytsearch:{url}" ) to search YouTube', expected=True)
|
|
||||||
else:
|
else:
|
||||||
if ':' not in default_search:
|
if ':' not in default_search:
|
||||||
default_search += ':'
|
default_search += ':'
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ from ..utils import (
|
|||||||
get_element_html_by_id,
|
get_element_html_by_id,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
lowercase_escape,
|
lowercase_escape,
|
||||||
|
parse_qs,
|
||||||
try_get,
|
try_get,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
)
|
)
|
||||||
@@ -111,14 +112,18 @@ class GoogleDriveIE(InfoExtractor):
|
|||||||
self._caption_formats_ext.append(f.attrib['fmt_code'])
|
self._caption_formats_ext.append(f.attrib['fmt_code'])
|
||||||
|
|
||||||
def _get_captions_by_type(self, video_id, subtitles_id, caption_type,
|
def _get_captions_by_type(self, video_id, subtitles_id, caption_type,
|
||||||
origin_lang_code=None):
|
origin_lang_code=None, origin_lang_name=None):
|
||||||
if not subtitles_id or not caption_type:
|
if not subtitles_id or not caption_type:
|
||||||
return
|
return
|
||||||
captions = {}
|
captions = {}
|
||||||
for caption_entry in self._captions_xml.findall(
|
for caption_entry in self._captions_xml.findall(
|
||||||
self._CAPTIONS_ENTRY_TAG[caption_type]):
|
self._CAPTIONS_ENTRY_TAG[caption_type]):
|
||||||
caption_lang_code = caption_entry.attrib.get('lang_code')
|
caption_lang_code = caption_entry.attrib.get('lang_code')
|
||||||
if not caption_lang_code:
|
caption_name = caption_entry.attrib.get('name') or origin_lang_name
|
||||||
|
if not caption_lang_code or not caption_name:
|
||||||
|
self.report_warning(f'Missing necessary caption metadata. '
|
||||||
|
f'Need lang_code and name attributes. '
|
||||||
|
f'Found: {caption_entry.attrib}')
|
||||||
continue
|
continue
|
||||||
caption_format_data = []
|
caption_format_data = []
|
||||||
for caption_format in self._caption_formats_ext:
|
for caption_format in self._caption_formats_ext:
|
||||||
@@ -129,7 +134,7 @@ class GoogleDriveIE(InfoExtractor):
|
|||||||
'lang': (caption_lang_code if origin_lang_code is None
|
'lang': (caption_lang_code if origin_lang_code is None
|
||||||
else origin_lang_code),
|
else origin_lang_code),
|
||||||
'type': 'track',
|
'type': 'track',
|
||||||
'name': '',
|
'name': caption_name,
|
||||||
'kind': '',
|
'kind': '',
|
||||||
}
|
}
|
||||||
if origin_lang_code is not None:
|
if origin_lang_code is not None:
|
||||||
@@ -155,14 +160,15 @@ class GoogleDriveIE(InfoExtractor):
|
|||||||
self._download_subtitles_xml(video_id, subtitles_id, hl)
|
self._download_subtitles_xml(video_id, subtitles_id, hl)
|
||||||
if not self._captions_xml:
|
if not self._captions_xml:
|
||||||
return
|
return
|
||||||
track = self._captions_xml.find('track')
|
track = next((t for t in self._captions_xml.findall('track') if t.attrib.get('cantran') == 'true'), None)
|
||||||
if track is None:
|
if track is None:
|
||||||
return
|
return
|
||||||
origin_lang_code = track.attrib.get('lang_code')
|
origin_lang_code = track.attrib.get('lang_code')
|
||||||
if not origin_lang_code:
|
origin_lang_name = track.attrib.get('name')
|
||||||
|
if not origin_lang_code or not origin_lang_name:
|
||||||
return
|
return
|
||||||
return self._get_captions_by_type(
|
return self._get_captions_by_type(
|
||||||
video_id, subtitles_id, 'automatic_captions', origin_lang_code)
|
video_id, subtitles_id, 'automatic_captions', origin_lang_code, origin_lang_name)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
@@ -268,10 +274,8 @@ class GoogleDriveIE(InfoExtractor):
|
|||||||
subtitles_id = None
|
subtitles_id = None
|
||||||
ttsurl = get_value('ttsurl')
|
ttsurl = get_value('ttsurl')
|
||||||
if ttsurl:
|
if ttsurl:
|
||||||
# the video Id for subtitles will be the last value in the ttsurl
|
# the subtitles ID is the vid param of the ttsurl query
|
||||||
# query string
|
subtitles_id = parse_qs(ttsurl).get('vid', [None])[-1]
|
||||||
subtitles_id = ttsurl.encode().decode(
|
|
||||||
'unicode_escape').split('=')[-1]
|
|
||||||
|
|
||||||
self.cookiejar.clear(domain='.google.com', path='/', name='NID')
|
self.cookiejar.clear(domain='.google.com', path='/', name='NID')
|
||||||
|
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ from ..utils import (
|
|||||||
url_or_none,
|
url_or_none,
|
||||||
urljoin,
|
urljoin,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class ITVIE(InfoExtractor):
|
class ITVIE(InfoExtractor):
|
||||||
@@ -223,6 +224,7 @@ class ITVBTCCIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
'playlist_count': 12,
|
'playlist_count': 12,
|
||||||
}, {
|
}, {
|
||||||
|
# news page, can have absent `data` field
|
||||||
'url': 'https://www.itv.com/news/2021-10-27/i-have-to-protect-the-country-says-rishi-sunak-as-uk-faces-interest-rate-hike',
|
'url': 'https://www.itv.com/news/2021-10-27/i-have-to-protect-the-country-says-rishi-sunak-as-uk-faces-interest-rate-hike',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'i-have-to-protect-the-country-says-rishi-sunak-as-uk-faces-interest-rate-hike',
|
'id': 'i-have-to-protect-the-country-says-rishi-sunak-as-uk-faces-interest-rate-hike',
|
||||||
@@ -243,7 +245,7 @@ class ITVBTCCIE(InfoExtractor):
|
|||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
for video in json_map:
|
for video in json_map:
|
||||||
if not any(video['data'].get(attr) == 'Brightcove' for attr in ('name', 'type')):
|
if not any(traverse_obj(video, ('data', attr)) == 'Brightcove' for attr in ('name', 'type')):
|
||||||
continue
|
continue
|
||||||
video_id = video['data']['id']
|
video_id = video['data']['id']
|
||||||
account_id = video['data']['accountId']
|
account_id = video['data']['accountId']
|
||||||
|
|||||||
@@ -95,26 +95,47 @@ class KickVODIE(KickBaseIE):
|
|||||||
IE_NAME = 'kick:vod'
|
IE_NAME = 'kick:vod'
|
||||||
_VALID_URL = r'https?://(?:www\.)?kick\.com/[\w-]+/videos/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})'
|
_VALID_URL = r'https?://(?:www\.)?kick\.com/[\w-]+/videos/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://kick.com/xqc/videos/8dd97a8d-e17f-48fb-8bc3-565f88dbc9ea',
|
# Regular VOD
|
||||||
'md5': '3870f94153e40e7121a6e46c068b70cb',
|
'url': 'https://kick.com/xqc/videos/5c697a87-afce-4256-b01f-3c8fe71ef5cb',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '8dd97a8d-e17f-48fb-8bc3-565f88dbc9ea',
|
'id': '5c697a87-afce-4256-b01f-3c8fe71ef5cb',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '18+ #ad 🛑LIVE🛑CLICK🛑DRAMA🛑NEWS🛑STUFF🛑REACT🛑GET IN HHERE🛑BOP BOP🛑WEEEE WOOOO🛑',
|
'title': '🐗LIVE🐗CLICK🐗HERE🐗DRAMA🐗ALL DAY🐗NEWS🐗VIDEOS🐗CLIPS🐗GAMES🐗STUFF🐗WOW🐗IM HERE🐗LETS GO🐗COOL🐗VERY NICE🐗',
|
||||||
'description': 'THE BEST AT ABSOLUTELY EVERYTHING. THE JUICER. LEADER OF THE JUICERS.',
|
'description': 'THE BEST AT ABSOLUTELY EVERYTHING. THE JUICER. LEADER OF THE JUICERS.',
|
||||||
'channel': 'xqc',
|
|
||||||
'channel_id': '668',
|
|
||||||
'uploader': 'xQc',
|
'uploader': 'xQc',
|
||||||
'uploader_id': '676',
|
'uploader_id': '676',
|
||||||
'upload_date': '20240909',
|
'channel': 'xqc',
|
||||||
'timestamp': 1725919141,
|
'channel_id': '668',
|
||||||
'duration': 10155.0,
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg',
|
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'categories': ['Just Chatting'],
|
'age_limit': 18,
|
||||||
'age_limit': 0,
|
'duration': 22278.0,
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg',
|
||||||
|
'categories': ['Deadlock'],
|
||||||
|
'timestamp': 1756082443,
|
||||||
|
'upload_date': '20250825',
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'},
|
'params': {'skip_download': 'm3u8'},
|
||||||
|
}, {
|
||||||
|
# VOD of ongoing livestream (at the time of writing the test, ID rotates every two days)
|
||||||
|
'url': 'https://kick.com/a-log-burner/videos/5230df84-ea38-46e1-be4f-f5949ae55641',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '5230df84-ea38-46e1-be4f-f5949ae55641',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': r're:😴 Cozy Fireplace ASMR 🔥 | Relax, Focus, Sleep 💤',
|
||||||
|
'description': 'md5:080bc713eac0321a7b376a1b53816d1b',
|
||||||
|
'uploader': 'A_Log_Burner',
|
||||||
|
'uploader_id': '65114691',
|
||||||
|
'channel': 'a-log-burner',
|
||||||
|
'channel_id': '63967687',
|
||||||
|
'view_count': int,
|
||||||
|
'age_limit': 18,
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg',
|
||||||
|
'categories': ['Other, Watch Party'],
|
||||||
|
'timestamp': int,
|
||||||
|
'upload_date': str,
|
||||||
|
'live_status': 'is_live',
|
||||||
|
},
|
||||||
|
'skip': 'live',
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -137,6 +158,7 @@ class KickVODIE(KickBaseIE):
|
|||||||
'categories': ('livestream', 'categories', ..., 'name', {str}),
|
'categories': ('livestream', 'categories', ..., 'name', {str}),
|
||||||
'view_count': ('views', {int_or_none}),
|
'view_count': ('views', {int_or_none}),
|
||||||
'age_limit': ('livestream', 'is_mature', {bool}, {lambda x: 18 if x else 0}),
|
'age_limit': ('livestream', 'is_mature', {bool}, {lambda x: 18 if x else 0}),
|
||||||
|
'is_live': ('livestream', 'is_live', {bool}),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
from .arkena import ArkenaIE
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class LcpPlayIE(ArkenaIE): # XXX: Do not subclass from concrete IE
|
class LcpPlayIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://play\.lcp\.fr/embed/(?P<id>[^/]+)/(?P<account_id>[^/]+)/[^/]+/[^/]+'
|
_VALID_URL = r'https?://play\.lcp\.fr/embed/(?P<id>[^/]+)/(?P<account_id>[^/]+)/[^/]+/[^/]+'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://play.lcp.fr/embed/327336/131064/darkmatter/0',
|
'url': 'http://play.lcp.fr/embed/327336/131064/darkmatter/0',
|
||||||
@@ -21,24 +21,9 @@ class LcpPlayIE(ArkenaIE): # XXX: Do not subclass from concrete IE
|
|||||||
|
|
||||||
|
|
||||||
class LcpIE(InfoExtractor):
|
class LcpIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?lcp\.fr/(?:[^/]+/)*(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?lcp\.fr/(?:[^/]+/)*(?P<id>[^/]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# arkena embed
|
|
||||||
'url': 'http://www.lcp.fr/la-politique-en-video/schwartzenberg-prg-preconise-francois-hollande-de-participer-une-primaire',
|
|
||||||
'md5': 'b8bd9298542929c06c1c15788b1f277a',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'd56d03e9',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Schwartzenberg (PRG) préconise à François Hollande de participer à une primaire à gauche',
|
|
||||||
'description': 'md5:96ad55009548da9dea19f4120c6c16a8',
|
|
||||||
'timestamp': 1456488895,
|
|
||||||
'upload_date': '20160226',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
# dailymotion live stream
|
# dailymotion live stream
|
||||||
'url': 'http://www.lcp.fr/le-direct',
|
'url': 'http://www.lcp.fr/le-direct',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ class LocoIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://loco.com/stream/c64916eb-10fb-46a9-9a19-8c4b7ed064e7',
|
'url': 'https://loco.com/stream/c64916eb-10fb-46a9-9a19-8c4b7ed064e7',
|
||||||
'md5': '45ebc8a47ee1c2240178757caf8881b5',
|
'md5': '8b9bda03eba4d066928ae8d71f19befb',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'c64916eb-10fb-46a9-9a19-8c4b7ed064e7',
|
'id': 'c64916eb-10fb-46a9-9a19-8c4b7ed064e7',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@@ -55,9 +55,9 @@ class LocoIE(InfoExtractor):
|
|||||||
'tags': ['Gameplay'],
|
'tags': ['Gameplay'],
|
||||||
'series': 'GTA 5',
|
'series': 'GTA 5',
|
||||||
'timestamp': 1740612872,
|
'timestamp': 1740612872,
|
||||||
'modified_timestamp': 1740613037,
|
'modified_timestamp': 1750948439,
|
||||||
'upload_date': '20250226',
|
'upload_date': '20250226',
|
||||||
'modified_date': '20250226',
|
'modified_date': '20250626',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# Requires video authorization
|
# Requires video authorization
|
||||||
@@ -123,8 +123,8 @@ class LocoIE(InfoExtractor):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_type, video_id = self._match_valid_url(url).group('type', 'id')
|
video_type, video_id = self._match_valid_url(url).group('type', 'id')
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
stream = traverse_obj(self._search_nextjs_data(webpage, video_id), (
|
stream = traverse_obj(self._search_nextjs_v13_data(webpage, video_id), (
|
||||||
'props', 'pageProps', ('liveStreamData', 'stream', 'liveStream'), {dict}, any, {require('stream info')}))
|
..., (None, 'ssrData'), ('liveStreamData', 'stream', 'liveStream'), {dict}, any, {require('stream info')}))
|
||||||
|
|
||||||
if access_token := self._get_access_token(video_id):
|
if access_token := self._get_access_token(video_id):
|
||||||
self._request_webpage(
|
self._request_webpage(
|
||||||
|
|||||||
@@ -1,22 +1,14 @@
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
merge_dicts,
|
|
||||||
traverse_obj,
|
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
urljoin,
|
urljoin,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class LRTBaseIE(InfoExtractor):
|
class LRTStreamIE(InfoExtractor):
|
||||||
def _extract_js_var(self, webpage, var_name, default=None):
|
|
||||||
return self._search_regex(
|
|
||||||
fr'{var_name}\s*=\s*(["\'])((?:(?!\1).)+)\1',
|
|
||||||
webpage, var_name.replace('_', ' '), default, group=2)
|
|
||||||
|
|
||||||
|
|
||||||
class LRTStreamIE(LRTBaseIE):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/tiesiogiai/(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/tiesiogiai/(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.lrt.lt/mediateka/tiesiogiai/lrt-opus',
|
'url': 'https://www.lrt.lt/mediateka/tiesiogiai/lrt-opus',
|
||||||
@@ -31,86 +23,110 @@ class LRTStreamIE(LRTBaseIE):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
streams_data = self._download_json(self._extract_js_var(webpage, 'tokenURL'), video_id)
|
|
||||||
|
# TODO: Use _search_nextjs_v13_data once fixed
|
||||||
|
get_stream_url = self._search_regex(
|
||||||
|
r'\\"get_streams_url\\":\\"([^"]+)\\"', webpage, 'stream URL')
|
||||||
|
streams_data = self._download_json(get_stream_url, video_id)
|
||||||
|
|
||||||
formats, subtitles = [], {}
|
formats, subtitles = [], {}
|
||||||
for stream_url in traverse_obj(streams_data, (
|
for stream_url in traverse_obj(streams_data, (
|
||||||
'response', 'data', lambda k, _: k.startswith('content')), expected_type=url_or_none):
|
'response', 'data', lambda k, _: k.startswith('content'), {url_or_none})):
|
||||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(stream_url, video_id, 'mp4', m3u8_id='hls', live=True)
|
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||||
|
stream_url, video_id, 'mp4', m3u8_id='hls', live=True)
|
||||||
formats.extend(fmts)
|
formats.extend(fmts)
|
||||||
subtitles = self._merge_subtitles(subtitles, subs)
|
subtitles = self._merge_subtitles(subtitles, subs)
|
||||||
|
|
||||||
stream_title = self._extract_js_var(webpage, 'video_title', 'LRT')
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'is_live': True,
|
'is_live': True,
|
||||||
'title': f'{self._og_search_title(webpage)} - {stream_title}',
|
'title': self._og_search_title(webpage),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class LRTVODIE(LRTBaseIE):
|
class LRTVODIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?lrt\.lt(?P<path>/mediateka/irasas/(?P<id>[0-9]+))'
|
_VALID_URL = [
|
||||||
|
r'https?://(?:(?:www|archyvai)\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)',
|
||||||
|
r'https?://(?:(?:www|archyvai)\.)?lrt\.lt/mediateka/video/[^?#]+\?(?:[^#]*&)?episode=(?P<id>[0-9]+)',
|
||||||
|
]
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
'url': 'https://www.lrt.lt/mediateka/irasas/2000127261/greita-ir-gardu-sicilijos-ikvepta-klasikiniu-makaronu-su-baklazanais-vakariene',
|
'url': 'https://www.lrt.lt/mediateka/irasas/2000127261/greita-ir-gardu-sicilijos-ikvepta-klasikiniu-makaronu-su-baklazanais-vakariene',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2000127261',
|
'id': '2000127261',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Greita ir gardu: Sicilijos įkvėpta klasikinių makaronų su baklažanais vakarienė',
|
'title': 'Nustebinkite svečius klasikiniu makaronų su baklažanais receptu',
|
||||||
'description': 'md5:ad7d985f51b0dc1489ba2d76d7ed47fa',
|
'description': 'md5:ad7d985f51b0dc1489ba2d76d7ed47fa',
|
||||||
'duration': 3035,
|
'timestamp': 1604086200,
|
||||||
'timestamp': 1604079000,
|
|
||||||
'upload_date': '20201030',
|
'upload_date': '20201030',
|
||||||
'tags': ['LRT TELEVIZIJA', 'Beatos virtuvė', 'Beata Nicholson', 'Makaronai', 'Baklažanai', 'Vakarienė', 'Receptas'],
|
'tags': ['LRT TELEVIZIJA', 'Beatos virtuvė', 'Beata Nicholson', 'Makaronai', 'Baklažanai', 'Vakarienė', 'Receptas'],
|
||||||
'thumbnail': 'https://www.lrt.lt/img/2020/10/30/764041-126478-1287x836.jpg',
|
'thumbnail': 'https://www.lrt.lt/img/2020/10/30/764041-126478-1287x836.jpg',
|
||||||
|
'channel': 'Beatos virtuvė',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# direct mp3 download
|
# audio download
|
||||||
'url': 'http://www.lrt.lt/mediateka/irasas/1013074524/',
|
'url': 'https://www.lrt.lt/mediateka/irasas/1013074524/kita-tema',
|
||||||
'md5': '389da8ca3cad0f51d12bed0c844f6a0a',
|
'md5': 'fc982f10274929c66fdff65f75615cb0',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1013074524',
|
'id': '1013074524',
|
||||||
'ext': 'mp3',
|
'ext': 'mp4',
|
||||||
'title': 'Kita tema 2016-09-05 15:05',
|
'title': 'Kita tema',
|
||||||
'description': 'md5:1b295a8fc7219ed0d543fc228c931fb5',
|
'description': 'md5:1b295a8fc7219ed0d543fc228c931fb5',
|
||||||
'duration': 3008,
|
'channel': 'Kita tema',
|
||||||
'view_count': int,
|
'timestamp': 1473087900,
|
||||||
'like_count': int,
|
'upload_date': '20160905',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.lrt.lt/mediateka/video/auksinis-protas-vasara?episode=2000420320&season=%2Fmediateka%2Fvideo%2Fauksinis-protas-vasara%2F2025',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2000420320',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Kuris senovės romėnų poetas aprašė Narcizo mitą?',
|
||||||
|
'description': 'Intelektinė viktorina. Ved. Arūnas Valinskas ir Andrius Tapinas.',
|
||||||
|
'channel': 'Auksinis protas. Vasara',
|
||||||
|
'thumbnail': 'https://www.lrt.lt/img/2025/06/09/2094343-987905-1287x836.jpg',
|
||||||
|
'tags': ['LRT TELEVIZIJA', 'Auksinis protas'],
|
||||||
|
'timestamp': 1749851040,
|
||||||
|
'upload_date': '20250613',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://archyvai.lrt.lt/mediateka/video/ziniu-riteriai-ir-damos?episode=49685&season=%2Fmediateka%2Fvideo%2Fziniu-riteriai-ir-damos%2F2013',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://archyvai.lrt.lt/mediateka/irasas/2000077058/panorama-1989-baltijos-kelias',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
path, video_id = self._match_valid_url(url).group('path', 'id')
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
media_url = self._extract_js_var(webpage, 'main_url', path)
|
# TODO: Use _search_nextjs_v13_data once fixed
|
||||||
media = self._download_json(self._extract_js_var(
|
canonical_url = (
|
||||||
webpage, 'media_info_url',
|
self._search_regex(r'\\"(?:article|data)\\":{[^}]*\\"url\\":\\"(/[^"]+)\\"', webpage, 'content URL', fatal=False)
|
||||||
'https://www.lrt.lt/servisai/stream_url/vod/media_info/'),
|
or self._search_regex(r'<link\s+rel="canonical"\s*href="(/[^"]+)"', webpage, 'canonical URL'))
|
||||||
video_id, query={'url': media_url})
|
|
||||||
|
media = self._download_json(
|
||||||
|
'https://www.lrt.lt/servisai/stream_url/vod/media_info/',
|
||||||
|
video_id, query={'url': canonical_url})
|
||||||
jw_data = self._parse_jwplayer_data(
|
jw_data = self._parse_jwplayer_data(
|
||||||
media['playlist_item'], video_id, base_url=url)
|
media['playlist_item'], video_id, base_url=url)
|
||||||
|
|
||||||
json_ld_data = self._search_json_ld(webpage, video_id)
|
return {
|
||||||
|
**jw_data,
|
||||||
tags = []
|
**traverse_obj(media, {
|
||||||
for tag in (media.get('tags') or []):
|
'id': ('id', {str}),
|
||||||
tag_name = tag.get('name')
|
'title': ('title', {str}),
|
||||||
if not tag_name:
|
'description': ('content', {clean_html}),
|
||||||
continue
|
'timestamp': ('date', {lambda x: x.replace('.', '/')}, {unified_timestamp}),
|
||||||
tags.append(tag_name)
|
'tags': ('tags', ..., 'name', {str}),
|
||||||
|
}),
|
||||||
clean_info = {
|
|
||||||
'description': clean_html(media.get('content')),
|
|
||||||
'tags': tags,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return merge_dicts(clean_info, jw_data, json_ld_data)
|
|
||||||
|
|
||||||
|
class LRTRadioIE(InfoExtractor):
|
||||||
class LRTRadioIE(LRTBaseIE):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/radioteka/irasas/(?P<id>\d+)/(?P<path>[^?#/]+)'
|
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/radioteka/irasas/(?P<id>\d+)/(?P<path>[^?#/]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
|
|||||||
@@ -11,121 +11,65 @@ from ..utils import (
|
|||||||
|
|
||||||
class MediaKlikkIE(InfoExtractor):
|
class MediaKlikkIE(InfoExtractor):
|
||||||
_VALID_URL = r'''(?x)https?://(?:www\.)?
|
_VALID_URL = r'''(?x)https?://(?:www\.)?
|
||||||
(?:mediaklikk|m4sport|hirado|petofilive)\.hu/.*?(?:videok?|cikk)/
|
(?:mediaklikk|m4sport|hirado)\.hu/.*?(?:videok?|cikk)/
|
||||||
(?:(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/)?
|
(?:(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/)?
|
||||||
(?P<id>[^/#?_]+)'''
|
(?P<id>[^/#?_]+)'''
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://mediaklikk.hu/filmajanlo/cikk/az-ajto/',
|
# mediaklikk
|
||||||
|
'url': 'https://mediaklikk.hu/ajanlo/video/2025/08/04/heviz-dzsungel-a-viz-alatt-ajanlo-08-10/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '668177',
|
'id': '8573769',
|
||||||
'title': 'Az ajtó',
|
'title': 'Hévíz - dzsungel a víz alatt – Ajánló (08.10.)',
|
||||||
'display_id': 'az-ajto',
|
'display_id': 'heviz-dzsungel-a-viz-alatt-ajanlo-08-10',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'thumbnail': 'https://cdn.cms.mtv.hu/wp-content/uploads/sites/4/2016/01/vlcsnap-2023-07-31-14h18m52s111.jpg',
|
'upload_date': '20250804',
|
||||||
|
'thumbnail': 'https://cdn.cms.mtv.hu/wp-content/uploads/sites/4/2025/08/vlcsnap-2025-08-04-13h48m24s336.jpg',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# (old) mediaklikk. date in html.
|
# mediaklikk - date in html
|
||||||
'url': 'https://mediaklikk.hu/video/hazajaro-delnyugat-bacska-a-duna-menten-palankatol-doroszloig/',
|
'url': 'https://mediaklikk.hu/video/hazajaro-bilo-hegyseg-verocei-barangolas-a-drava-menten/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '4754129',
|
'id': '8482167',
|
||||||
'title': 'Hazajáró, DÉLNYUGAT-BÁCSKA – A Duna mentén Palánkától Doroszlóig',
|
'title': 'Hazajáró, Bilo-hegység - Verőcei barangolás a Dráva mentén',
|
||||||
|
'display_id': 'hazajaro-bilo-hegyseg-verocei-barangolas-a-drava-menten',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'upload_date': '20210901',
|
'upload_date': '20250703',
|
||||||
'thumbnail': 'http://mediaklikk.hu/wp-content/uploads/sites/4/2014/02/hazajarouj_JO.jpg',
|
'thumbnail': 'https://cdn.cms.mtv.hu/wp-content/uploads/sites/4/2025/07/2024-000307-M0010-01_3700_cover_01.jpg',
|
||||||
},
|
},
|
||||||
'skip': 'Webpage redirects to 404 page',
|
|
||||||
}, {
|
|
||||||
# mediaklikk. date in html.
|
|
||||||
'url': 'https://mediaklikk.hu/video/hazajaro-fabova-hegyseg-kishont-koronaja/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6696133',
|
|
||||||
'title': 'Hazajáró, Fabova-hegység - Kishont koronája',
|
|
||||||
'display_id': 'hazajaro-fabova-hegyseg-kishont-koronaja',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'upload_date': '20230903',
|
|
||||||
'thumbnail': 'https://mediaklikk.hu/wp-content/uploads/sites/4/2014/02/hazajarouj_JO.jpg',
|
|
||||||
},
|
|
||||||
'skip': 'Webpage redirects to 404 page',
|
|
||||||
}, {
|
|
||||||
# (old) m4sport
|
|
||||||
'url': 'https://m4sport.hu/video/2021/08/30/gyemant-liga-parizs/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '4754999',
|
|
||||||
'title': 'Gyémánt Liga, Párizs',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'upload_date': '20210830',
|
|
||||||
'thumbnail': 'http://m4sport.hu/wp-content/uploads/sites/4/2021/08/vlcsnap-2021-08-30-18h21m20s10-1024x576.jpg',
|
|
||||||
},
|
|
||||||
'skip': 'Webpage redirects to 404 page',
|
|
||||||
}, {
|
}, {
|
||||||
# m4sport
|
# m4sport
|
||||||
'url': 'https://m4sport.hu/sportkozvetitesek/video/2023/09/08/atletika-gyemant-liga-brusszel/',
|
'url': 'https://m4sport.hu/video/2025/08/07/holnap-kezdodik-a-12-vilagjatekok/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '6711136',
|
'id': '8581887',
|
||||||
'title': 'Atlétika – Gyémánt Liga, Brüsszel',
|
'title': 'Holnap kezdődik a 12. Világjátékok',
|
||||||
'display_id': 'atletika-gyemant-liga-brusszel',
|
'display_id': 'holnap-kezdodik-a-12-vilagjatekok',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'upload_date': '20230908',
|
'upload_date': '20250807',
|
||||||
'thumbnail': 'https://m4sport.hu/wp-content/uploads/sites/4/2023/09/vlcsnap-2023-09-08-22h43m18s691.jpg',
|
'thumbnail': 'https://cdn.cms.mtv.hu/wp-content/uploads/sites/4/2025/08/vlcsnap-2025-08-06-20h30m48s817.jpg',
|
||||||
},
|
},
|
||||||
'skip': 'Webpage redirects to 404 page',
|
|
||||||
}, {
|
|
||||||
# m4sport with *video/ url and no date
|
|
||||||
'url': 'https://m4sport.hu/bl-video/real-madrid-chelsea-1-1/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '4492099',
|
|
||||||
'title': 'Real Madrid - Chelsea 1-1',
|
|
||||||
'display_id': 'real-madrid-chelsea-1-1',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'thumbnail': 'https://m4sport.hu/wp-content/uploads/sites/4/2021/04/Sequence-01.Still001-1024x576.png',
|
|
||||||
},
|
|
||||||
'skip': 'Webpage redirects to 404 page',
|
|
||||||
}, {
|
|
||||||
# (old) hirado
|
|
||||||
'url': 'https://hirado.hu/videok/felteteleket-szabott-a-fovaros/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '4760120',
|
|
||||||
'title': 'Feltételeket szabott a főváros',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'thumbnail': 'http://hirado.hu/wp-content/uploads/sites/4/2021/09/vlcsnap-2021-09-01-20h20m37s165.jpg',
|
|
||||||
},
|
|
||||||
'skip': 'Webpage redirects to video list page',
|
|
||||||
}, {
|
}, {
|
||||||
# hirado
|
# hirado
|
||||||
'url': 'https://hirado.hu/belfold/video/2023/09/11/marad-az-eves-elszamolas-a-napelemekre-beruhazo-csaladoknal',
|
'url': 'https://hirado.hu/video/2025/08/09/idojaras-jelentes-2025-augusztus-9-2230',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '6716068',
|
'id': '8592033',
|
||||||
'title': 'Marad az éves elszámolás a napelemekre beruházó családoknál',
|
'title': 'Időjárás-jelentés, 2025. augusztus 9. 22:30',
|
||||||
'display_id': 'marad-az-eves-elszamolas-a-napelemekre-beruhazo-csaladoknal',
|
'display_id': 'idojaras-jelentes-2025-augusztus-9-2230',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'upload_date': '20230911',
|
'upload_date': '20250809',
|
||||||
'thumbnail': 'https://hirado.hu/wp-content/uploads/sites/4/2023/09/vlcsnap-2023-09-11-09h16m09s882.jpg',
|
'thumbnail': 'https://cdn.cms.mtv.hu/wp-content/uploads/sites/4/2025/08/Idojaras-jelentes-35-1.jpg',
|
||||||
},
|
},
|
||||||
'skip': 'Webpage redirects to video list page',
|
|
||||||
}, {
|
}, {
|
||||||
# (old) petofilive
|
# hirado - subcategory
|
||||||
'url': 'https://petofilive.hu/video/2021/06/07/tha-shudras-az-akusztikban/',
|
'url': 'https://hirado.hu/belfold/video/2025/08/09/nyitott-porta-napok-2025/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '4571948',
|
'id': '8590581',
|
||||||
'title': 'Tha Shudras az Akusztikban',
|
'title': 'Nyitott Porta Napok 2025',
|
||||||
|
'display_id': 'nyitott-porta-napok-2025',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'upload_date': '20210607',
|
'upload_date': '20250809',
|
||||||
'thumbnail': 'http://petofilive.hu/wp-content/uploads/sites/4/2021/06/vlcsnap-2021-06-07-22h14m23s915-1024x576.jpg',
|
'thumbnail': 'https://cdn.cms.mtv.hu/wp-content/uploads/sites/4/2025/08/vlcsnap-2025-08-09-10h35m01s887.jpg',
|
||||||
},
|
},
|
||||||
'skip': 'Webpage redirects to empty page',
|
|
||||||
}, {
|
|
||||||
# petofilive
|
|
||||||
'url': 'https://petofilive.hu/video/2023/09/09/futball-fesztival-a-margitszigeten/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6713233',
|
|
||||||
'title': 'Futball Fesztivál a Margitszigeten',
|
|
||||||
'display_id': 'futball-fesztival-a-margitszigeten',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'upload_date': '20230909',
|
|
||||||
'thumbnail': 'https://petofilive.hu/wp-content/uploads/sites/4/2023/09/Clipboard11-2.jpg',
|
|
||||||
},
|
|
||||||
'skip': 'Webpage redirects to video list page',
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -133,9 +77,8 @@ class MediaKlikkIE(InfoExtractor):
|
|||||||
display_id = mobj.group('id')
|
display_id = mobj.group('id')
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
player_data_str = self._html_search_regex(
|
player_data = self._search_json(
|
||||||
r'mtva_player_manager\.player\(document.getElementById\(.*\),\s?(\{.*\}).*\);', webpage, 'player data')
|
r'loadPlayer\((?:\s*["\'][^"\']+["\']\s*,)?', webpage, 'player data', mobj)
|
||||||
player_data = self._parse_json(player_data_str, display_id, urllib.parse.unquote)
|
|
||||||
video_id = str(player_data['contentId'])
|
video_id = str(player_data['contentId'])
|
||||||
title = player_data.get('title') or self._og_search_title(webpage, fatal=False) or \
|
title = player_data.get('title') or self._og_search_title(webpage, fatal=False) or \
|
||||||
self._html_search_regex(r'<h\d+\b[^>]+\bclass="article_title">([^<]+)<', webpage, 'title')
|
self._html_search_regex(r'<h\d+\b[^>]+\bclass="article_title">([^<]+)<', webpage, 'title')
|
||||||
@@ -146,7 +89,7 @@ class MediaKlikkIE(InfoExtractor):
|
|||||||
upload_date = unified_strdate(self._html_search_regex(
|
upload_date = unified_strdate(self._html_search_regex(
|
||||||
r'<p+\b[^>]+\bclass="article_date">([^<]+)<', webpage, 'upload date', default=None))
|
r'<p+\b[^>]+\bclass="article_date">([^<]+)<', webpage, 'upload date', default=None))
|
||||||
|
|
||||||
player_data['video'] = player_data.pop('token')
|
player_data['video'] = urllib.parse.unquote(player_data.pop('token'))
|
||||||
player_page = self._download_webpage(
|
player_page = self._download_webpage(
|
||||||
'https://player.mediaklikk.hu/playernew/player.php', video_id,
|
'https://player.mediaklikk.hu/playernew/player.php', video_id,
|
||||||
query=player_data, headers={'Referer': url})
|
query=player_data, headers={'Referer': url})
|
||||||
|
|||||||
@@ -1,15 +1,73 @@
|
|||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
clean_html,
|
||||||
|
determine_ext,
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
mimetype2ext,
|
parse_resolution,
|
||||||
parse_iso8601,
|
str_or_none,
|
||||||
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import find_elements, traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class MedialaanIE(InfoExtractor):
|
class MedialaanBaseIE(InfoExtractor):
|
||||||
|
def _extract_from_mychannels_api(self, mychannels_id):
|
||||||
|
webpage = self._download_webpage(
|
||||||
|
f'https://mychannels.video/embed/{mychannels_id}', mychannels_id)
|
||||||
|
brand_config = self._search_json(
|
||||||
|
r'window\.mychannels\.brand_config\s*=', webpage, 'brand config', mychannels_id)
|
||||||
|
response = self._download_json(
|
||||||
|
f'https://api.mychannels.world/v1/embed/video/{mychannels_id}',
|
||||||
|
mychannels_id, headers={'X-Mychannels-Brand': brand_config['brand']})
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for stream in traverse_obj(response, (
|
||||||
|
'streams', lambda _, v: url_or_none(v['url']),
|
||||||
|
)):
|
||||||
|
source_url = stream['url']
|
||||||
|
ext = determine_ext(source_url)
|
||||||
|
if ext == 'm3u8':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
source_url, mychannels_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||||
|
else:
|
||||||
|
format_id = traverse_obj(stream, ('quality', {str}))
|
||||||
|
formats.append({
|
||||||
|
'ext': ext,
|
||||||
|
'format_id': format_id,
|
||||||
|
'url': source_url,
|
||||||
|
**parse_resolution(format_id),
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': mychannels_id,
|
||||||
|
'formats': formats,
|
||||||
|
**traverse_obj(response, {
|
||||||
|
'title': ('title', {clean_html}),
|
||||||
|
'description': ('description', {clean_html}, filter),
|
||||||
|
'duration': ('durationMs', {int_or_none(scale=1000)}, {lambda x: x if x >= 0 else None}),
|
||||||
|
'genres': ('genre', 'title', {str}, filter, all, filter),
|
||||||
|
'is_live': ('live', {bool}),
|
||||||
|
'release_timestamp': ('publicationTimestampMs', {int_or_none(scale=1000)}),
|
||||||
|
'tags': ('tags', ..., 'title', {str}, filter, all, filter),
|
||||||
|
'thumbnail': ('image', 'baseUrl', {url_or_none}),
|
||||||
|
}),
|
||||||
|
**traverse_obj(response, ('channel', {
|
||||||
|
'channel': ('title', {clean_html}),
|
||||||
|
'channel_id': ('id', {str_or_none}),
|
||||||
|
})),
|
||||||
|
**traverse_obj(response, ('organisation', {
|
||||||
|
'uploader': ('title', {clean_html}),
|
||||||
|
'uploader_id': ('id', {str_or_none}),
|
||||||
|
})),
|
||||||
|
**traverse_obj(response, ('show', {
|
||||||
|
'series': ('title', {clean_html}),
|
||||||
|
'series_id': ('id', {str_or_none}),
|
||||||
|
})),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class MedialaanIE(MedialaanBaseIE):
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
https?://
|
https?://
|
||||||
(?:
|
(?:
|
||||||
@@ -32,7 +90,7 @@ class MedialaanIE(InfoExtractor):
|
|||||||
tubantia|
|
tubantia|
|
||||||
volkskrant
|
volkskrant
|
||||||
)\.nl
|
)\.nl
|
||||||
)/video/(?:[^/]+/)*[^/?&#]+~p
|
)/videos?/(?:[^/?#]+/)*[^/?&#]+(?:-|~p)
|
||||||
)
|
)
|
||||||
(?P<id>\d+)
|
(?P<id>\d+)
|
||||||
'''
|
'''
|
||||||
@@ -42,19 +100,83 @@ class MedialaanIE(InfoExtractor):
|
|||||||
'id': '193993',
|
'id': '193993',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'De terugkeer van Ally de Aap en wie vertrekt er nog bij NAC?',
|
'title': 'De terugkeer van Ally de Aap en wie vertrekt er nog bij NAC?',
|
||||||
'thumbnail': r're:https?://images\.mychannels\.video/imgix/.+',
|
'description': 'In een nieuwe Gegenpressing video bespreken Yadran Blanco en Dennis Kas het nieuws omrent NAC.',
|
||||||
'timestamp': 1611663540,
|
|
||||||
'upload_date': '20210126',
|
|
||||||
'duration': 238,
|
'duration': 238,
|
||||||
},
|
'channel': 'BN DeStem',
|
||||||
'params': {
|
'channel_id': '418',
|
||||||
'skip_download': True,
|
'genres': ['Sports'],
|
||||||
|
'release_date': '20210126',
|
||||||
|
'release_timestamp': 1611663540,
|
||||||
|
'series': 'Korte Reportage',
|
||||||
|
'series_id': '972',
|
||||||
|
'tags': 'count:2',
|
||||||
|
'thumbnail': r're:https?://images\.mychannels\.video/imgix/.+\.(?:jpe?g|png)',
|
||||||
|
'uploader': 'BN De Stem',
|
||||||
|
'uploader_id': '26',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.gelderlander.nl/video/kanalen/degelderlander~c320/series/snel-nieuws~s984/noodbevel-in-doetinchem-politie-stuurt-mensen-centrum-uit~p194093',
|
'url': 'https://www.gelderlander.nl/video/kanalen/degelderlander~c320/series/snel-nieuws~s984/noodbevel-in-doetinchem-politie-stuurt-mensen-centrum-uit~p194093',
|
||||||
'only_matching': True,
|
'info_dict': {
|
||||||
|
'id': '194093',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Noodbevel in Doetinchem: politie stuurt mensen centrum uit',
|
||||||
|
'description': 'md5:77e85b2cb26cfff9dc1fe2b1db524001',
|
||||||
|
'duration': 44,
|
||||||
|
'channel': 'De Gelderlander',
|
||||||
|
'channel_id': '320',
|
||||||
|
'genres': ['News'],
|
||||||
|
'release_date': '20210126',
|
||||||
|
'release_timestamp': 1611690600,
|
||||||
|
'series': 'Snel Nieuws',
|
||||||
|
'series_id': '984',
|
||||||
|
'tags': 'count:1',
|
||||||
|
'thumbnail': r're:https?://images\.mychannels\.video/imgix/.+\.(?:jpe?g|png)',
|
||||||
|
'uploader': 'De Gelderlander',
|
||||||
|
'uploader_id': '25',
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://embed.mychannels.video/sdk/production/193993?options=TFTFF_default',
|
'url': 'https://www.7sur7.be/videos/production/lla-tendance-tiktok-qui-enflamme-lespagne-707650',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '707650',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'La tendance TikTok qui enflamme l’Espagne',
|
||||||
|
'description': 'md5:c7ec4cb733190f227fc8935899f533b5',
|
||||||
|
'duration': 70,
|
||||||
|
'channel': 'Lifestyle',
|
||||||
|
'channel_id': '770',
|
||||||
|
'genres': ['Beauty & Lifestyle'],
|
||||||
|
'release_date': '20240906',
|
||||||
|
'release_timestamp': 1725617330,
|
||||||
|
'series': 'Lifestyle',
|
||||||
|
'series_id': '1848',
|
||||||
|
'tags': 'count:1',
|
||||||
|
'thumbnail': r're:https?://images\.mychannels\.video/imgix/.+\.(?:jpe?g|png)',
|
||||||
|
'uploader': '7sur7',
|
||||||
|
'uploader_id': '67',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://mychannels.video/embed/313117',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '313117',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': str,
|
||||||
|
'description': 'md5:255e2e52f6fe8a57103d06def438f016',
|
||||||
|
'channel': 'AD',
|
||||||
|
'channel_id': '238',
|
||||||
|
'genres': ['News'],
|
||||||
|
'live_status': 'is_live',
|
||||||
|
'release_date': '20241225',
|
||||||
|
'release_timestamp': 1735169425,
|
||||||
|
'series': 'Nieuws Update',
|
||||||
|
'series_id': '3337',
|
||||||
|
'tags': 'count:1',
|
||||||
|
'thumbnail': r're:https?://images\.mychannels\.video/imgix/.+\.(?:jpe?g|png)',
|
||||||
|
'uploader': 'AD',
|
||||||
|
'uploader_id': '1',
|
||||||
|
},
|
||||||
|
'params': {'skip_download': 'Livestream'},
|
||||||
|
}, {
|
||||||
|
'url': 'https://embed.mychannels.video/sdk/production/193993',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://embed.mychannels.video/script/production/193993',
|
'url': 'https://embed.mychannels.video/script/production/193993',
|
||||||
@@ -62,9 +184,6 @@ class MedialaanIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'https://embed.mychannels.video/production/193993',
|
'url': 'https://embed.mychannels.video/production/193993',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
|
||||||
'url': 'https://mychannels.video/embed/193993',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://embed.mychannels.video/embed/193993',
|
'url': 'https://embed.mychannels.video/embed/193993',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@@ -75,51 +194,31 @@ class MedialaanIE(InfoExtractor):
|
|||||||
'id': '1576607',
|
'id': '1576607',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Tom Waes blaastest',
|
'title': 'Tom Waes blaastest',
|
||||||
|
'channel': 'De Morgen',
|
||||||
|
'channel_id': '352',
|
||||||
|
'description': 'Tom Waes werkt mee aan een alcoholcampagne op Werchter',
|
||||||
'duration': 62,
|
'duration': 62,
|
||||||
|
'genres': ['News'],
|
||||||
|
'release_date': '20250705',
|
||||||
|
'release_timestamp': 1751730795,
|
||||||
|
'series': 'Nieuwsvideo\'s',
|
||||||
|
'series_id': '1683',
|
||||||
|
'tags': 'count:1',
|
||||||
'thumbnail': r're:https?://video-images\.persgroep\.be/aws_generated.+\.jpg',
|
'thumbnail': r're:https?://video-images\.persgroep\.be/aws_generated.+\.jpg',
|
||||||
'timestamp': 1751730795,
|
'uploader': 'De Morgen',
|
||||||
'upload_date': '20250705',
|
'uploader_id': '17',
|
||||||
},
|
},
|
||||||
'params': {'extractor_args': {'generic': {'impersonate': ['chrome']}}},
|
'params': {'extractor_args': {'generic': {'impersonate': ['chrome']}}},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _extract_embed_urls(cls, url, webpage):
|
def _extract_embed_urls(cls, url, webpage):
|
||||||
entries = []
|
yield from traverse_obj(webpage, (
|
||||||
for element in re.findall(r'(<div[^>]+data-mychannels-type="video"[^>]*>)', webpage):
|
{find_elements(tag='div', attr='data-mychannels-type', value='video', html=True)},
|
||||||
mychannels_id = extract_attributes(element).get('data-mychannels-id')
|
..., {extract_attributes}, 'data-mychannels-id', {str}, filter,
|
||||||
if mychannels_id:
|
{lambda x: f'https://mychannels.video/embed/{x}'}))
|
||||||
entries.append('https://mychannels.video/embed/' + mychannels_id)
|
|
||||||
return entries
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
production_id = self._match_id(url)
|
mychannels_id = self._match_id(url)
|
||||||
production = self._download_json(
|
|
||||||
'https://embed.mychannels.video/sdk/production/' + production_id,
|
|
||||||
production_id, query={'options': 'UUUU_default'})['productions'][0]
|
|
||||||
title = production['title']
|
|
||||||
|
|
||||||
formats = []
|
return self._extract_from_mychannels_api(mychannels_id)
|
||||||
for source in (production.get('sources') or []):
|
|
||||||
src = source.get('src')
|
|
||||||
if not src:
|
|
||||||
continue
|
|
||||||
ext = mimetype2ext(source.get('type'))
|
|
||||||
if ext == 'm3u8':
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
src, production_id, 'mp4', 'm3u8_native',
|
|
||||||
m3u8_id='hls', fatal=False))
|
|
||||||
else:
|
|
||||||
formats.append({
|
|
||||||
'ext': ext,
|
|
||||||
'url': src,
|
|
||||||
})
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': production_id,
|
|
||||||
'title': title,
|
|
||||||
'formats': formats,
|
|
||||||
'thumbnail': production.get('posterUrl'),
|
|
||||||
'timestamp': parse_iso8601(production.get('publicationDate'), ' '),
|
|
||||||
'duration': int_or_none(production.get('duration')) or None,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,102 +0,0 @@
|
|||||||
from .telecinco import TelecincoBaseIE
|
|
||||||
from ..utils import (
|
|
||||||
int_or_none,
|
|
||||||
parse_iso8601,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MiTeleIE(TelecincoBaseIE):
|
|
||||||
IE_DESC = 'mitele.es'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?mitele\.es/(?:[^/]+/)+(?P<id>[^/]+)/player'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.mitele.es/programas-tv/diario-de/57b0dfb9c715da65618b4afa/player',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'FhYW1iNTE6J6H7NkQRIEzfne6t2quqPg',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Diario de La redacción Programa 144',
|
|
||||||
'description': 'md5:07c35a7b11abb05876a6a79185b58d27',
|
|
||||||
'series': 'Diario de',
|
|
||||||
'season': 'Season 14',
|
|
||||||
'season_number': 14,
|
|
||||||
'episode': 'Tor, la web invisible',
|
|
||||||
'episode_number': 3,
|
|
||||||
'thumbnail': r're:(?i)^https?://.*\.jpg$',
|
|
||||||
'duration': 2913,
|
|
||||||
'age_limit': 16,
|
|
||||||
'timestamp': 1471209401,
|
|
||||||
'upload_date': '20160814',
|
|
||||||
},
|
|
||||||
'skip': 'HTTP Error 404 Not Found',
|
|
||||||
}, {
|
|
||||||
# no explicit title
|
|
||||||
'url': 'http://www.mitele.es/programas-tv/cuarto-milenio/57b0de3dc915da14058b4876/player',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'oyNG1iNTE6TAPP-JmCjbwfwJqqMMX3Vq',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Cuarto Milenio Temporada 6 Programa 226',
|
|
||||||
'description': 'md5:5ff132013f0cd968ffbf1f5f3538a65f',
|
|
||||||
'series': 'Cuarto Milenio',
|
|
||||||
'season': 'Season 6',
|
|
||||||
'season_number': 6,
|
|
||||||
'episode': 'Episode 24',
|
|
||||||
'episode_number': 24,
|
|
||||||
'thumbnail': r're:(?i)^https?://.*\.jpg$',
|
|
||||||
'duration': 7313,
|
|
||||||
'age_limit': 12,
|
|
||||||
'timestamp': 1471209021,
|
|
||||||
'upload_date': '20160814',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'HTTP Error 404 Not Found',
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.mitele.es/programas-tv/horizonte/temporada-5/programa-171-40_013480051/player/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '7adbe22e-cd41-4787-afa4-36f3da7c2c6f',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Horizonte Temporada 5 Programa 171',
|
|
||||||
'description': 'md5:97f1fb712c5ac27e5693a8b3c5c0c6e3',
|
|
||||||
'episode': 'Las Zonas de Bajas Emisiones, a debate',
|
|
||||||
'episode_number': 171,
|
|
||||||
'season': 'Season 5',
|
|
||||||
'season_number': 5,
|
|
||||||
'series': 'Horizonte',
|
|
||||||
'duration': 7012,
|
|
||||||
'upload_date': '20240927',
|
|
||||||
'timestamp': 1727416450,
|
|
||||||
'thumbnail': 'https://album.mediaset.es/eimg/2024/09/27/horizonte-171_9f02.jpg',
|
|
||||||
'age_limit': 12,
|
|
||||||
},
|
|
||||||
'params': {'geo_bypass_country': 'ES'},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.mitele.es/series-online/la-que-se-avecina/57aac5c1c915da951a8b45ed/player',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144-40_1006364575251/player/',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
display_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
pre_player = self._search_json(
|
|
||||||
r'window\.\$REACTBASE_STATE\.prePlayer_mtweb\s*=',
|
|
||||||
webpage, 'Pre Player', display_id)['prePlayer']
|
|
||||||
title = pre_player['title']
|
|
||||||
video_info = self._parse_content(pre_player['video'], url)
|
|
||||||
content = pre_player.get('content') or {}
|
|
||||||
info = content.get('info') or {}
|
|
||||||
|
|
||||||
video_info.update({
|
|
||||||
'title': title,
|
|
||||||
'description': info.get('synopsis'),
|
|
||||||
'series': content.get('title'),
|
|
||||||
'season_number': int_or_none(info.get('season_number')),
|
|
||||||
'episode': content.get('subtitle'),
|
|
||||||
'episode_number': int_or_none(info.get('episode_number')),
|
|
||||||
'duration': int_or_none(info.get('duration')),
|
|
||||||
'age_limit': int_or_none(info.get('rating')),
|
|
||||||
'timestamp': parse_iso8601(pre_player.get('publishedTime')),
|
|
||||||
})
|
|
||||||
return video_info
|
|
||||||
@@ -1,652 +1,268 @@
|
|||||||
import re
|
import base64
|
||||||
import xml.etree.ElementTree
|
import json
|
||||||
|
import time
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..networking import HEADRequest, Request
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
RegexNotFoundError,
|
|
||||||
find_xpath_attr,
|
|
||||||
fix_xml_ampersands,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
js_to_json,
|
||||||
strip_or_none,
|
jwt_decode_hs256,
|
||||||
timeconvert,
|
parse_iso8601,
|
||||||
try_get,
|
parse_qs,
|
||||||
unescapeHTML,
|
update_url,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
url_basename,
|
url_or_none,
|
||||||
xpath_text,
|
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import require, traverse_obj
|
||||||
|
|
||||||
|
|
||||||
def _media_xml_tag(tag):
|
class MTVServicesBaseIE(InfoExtractor):
|
||||||
return f'{{http://search.yahoo.com/mrss/}}{tag}'
|
_GEO_BYPASS = False
|
||||||
|
_GEO_COUNTRIES = ['US']
|
||||||
|
_CACHE_SECTION = 'mtvservices'
|
||||||
class MTVServicesInfoExtractor(InfoExtractor):
|
_ACCESS_TOKEN_KEY = 'access'
|
||||||
_MOBILE_TEMPLATE = None
|
_REFRESH_TOKEN_KEY = 'refresh'
|
||||||
_LANG = None
|
_MEDIA_TOKEN_KEY = 'media'
|
||||||
|
_token_cache = {}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _id_from_uri(uri):
|
def _jwt_is_expired(token):
|
||||||
return uri.split(':')[-1]
|
return jwt_decode_hs256(token)['exp'] - time.time() < 120
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _remove_template_parameter(url):
|
def _get_auth_suite_data(config):
|
||||||
# Remove the templates, like &device={device}
|
return traverse_obj(config, {
|
||||||
return re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', url)
|
'clientId': ('clientId', {str}),
|
||||||
|
'countryCode': ('countryCode', {str}),
|
||||||
|
})
|
||||||
|
|
||||||
def _get_feed_url(self, uri, url=None):
|
def _call_auth_api(self, path, config, display_id=None, note=None, data=None, headers=None, query=None):
|
||||||
return self._FEED_URL
|
headers = {
|
||||||
|
'Accept': 'application/json',
|
||||||
def _get_thumbnail_url(self, uri, itemdoc):
|
'Client-Description': 'deviceName=Chrome Windows;deviceType=desktop;system=Windows NT 10.0',
|
||||||
search_path = '{}/{}'.format(_media_xml_tag('group'), _media_xml_tag('thumbnail'))
|
'Api-Version': '2025-07-09',
|
||||||
thumb_node = itemdoc.find(search_path)
|
**(headers or {}),
|
||||||
if thumb_node is None:
|
|
||||||
return None
|
|
||||||
return thumb_node.get('url') or thumb_node.text or None
|
|
||||||
|
|
||||||
def _extract_mobile_video_formats(self, mtvn_id):
|
|
||||||
webpage_url = self._MOBILE_TEMPLATE % mtvn_id
|
|
||||||
req = Request(webpage_url)
|
|
||||||
# Otherwise we get a webpage that would execute some javascript
|
|
||||||
req.headers['User-Agent'] = 'curl/7'
|
|
||||||
webpage = self._download_webpage(req, mtvn_id,
|
|
||||||
'Downloading mobile page')
|
|
||||||
metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))
|
|
||||||
req = HEADRequest(metrics_url)
|
|
||||||
response = self._request_webpage(req, mtvn_id, 'Resolving url')
|
|
||||||
url = response.url
|
|
||||||
# Transform the url to get the best quality:
|
|
||||||
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, count=1)
|
|
||||||
return [{'url': url, 'ext': 'mp4'}]
|
|
||||||
|
|
||||||
def _extract_video_formats(self, mdoc, mtvn_id, video_id):
|
|
||||||
if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4|copyright_error\.flv(?:\?geo\b.+?)?)$', mdoc.find('.//src').text) is not None:
|
|
||||||
if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:
|
|
||||||
self.to_screen('The normal version is not available from your '
|
|
||||||
'country, trying with the mobile version')
|
|
||||||
return self._extract_mobile_video_formats(mtvn_id)
|
|
||||||
raise ExtractorError('This video is not available from your country.',
|
|
||||||
expected=True)
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
for rendition in mdoc.findall('.//rendition'):
|
|
||||||
if rendition.get('method') == 'hls':
|
|
||||||
hls_url = rendition.find('./src').text
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
hls_url, video_id, ext='mp4', entry_protocol='m3u8_native',
|
|
||||||
m3u8_id='hls', fatal=False))
|
|
||||||
else:
|
|
||||||
# fms
|
|
||||||
try:
|
|
||||||
_, _, ext = rendition.attrib['type'].partition('/')
|
|
||||||
rtmp_video_url = rendition.find('./src').text
|
|
||||||
if 'error_not_available.swf' in rtmp_video_url:
|
|
||||||
raise ExtractorError(
|
|
||||||
f'{self.IE_NAME} said: video is not available',
|
|
||||||
expected=True)
|
|
||||||
if rtmp_video_url.endswith('siteunavail.png'):
|
|
||||||
continue
|
|
||||||
formats.extend([{
|
|
||||||
'ext': 'flv' if rtmp_video_url.startswith('rtmp') else ext,
|
|
||||||
'url': rtmp_video_url,
|
|
||||||
'format_id': join_nonempty(
|
|
||||||
'rtmp' if rtmp_video_url.startswith('rtmp') else None,
|
|
||||||
rendition.get('bitrate')),
|
|
||||||
'width': int(rendition.get('width')),
|
|
||||||
'height': int(rendition.get('height')),
|
|
||||||
}])
|
|
||||||
except (KeyError, TypeError):
|
|
||||||
raise ExtractorError('Invalid rendition field.')
|
|
||||||
return formats
|
|
||||||
|
|
||||||
def _extract_subtitles(self, mdoc, mtvn_id):
|
|
||||||
subtitles = {}
|
|
||||||
for transcript in mdoc.findall('.//transcript'):
|
|
||||||
if transcript.get('kind') != 'captions':
|
|
||||||
continue
|
|
||||||
lang = transcript.get('srclang')
|
|
||||||
for typographic in transcript.findall('./typographic'):
|
|
||||||
sub_src = typographic.get('src')
|
|
||||||
if not sub_src:
|
|
||||||
continue
|
|
||||||
ext = typographic.get('format')
|
|
||||||
if ext == 'cea-608':
|
|
||||||
ext = 'scc'
|
|
||||||
subtitles.setdefault(lang, []).append({
|
|
||||||
'url': str(sub_src),
|
|
||||||
'ext': ext,
|
|
||||||
})
|
|
||||||
return subtitles
|
|
||||||
|
|
||||||
def _get_video_info(self, itemdoc, use_hls=True):
|
|
||||||
uri = itemdoc.find('guid').text
|
|
||||||
video_id = self._id_from_uri(uri)
|
|
||||||
self.report_extraction(video_id)
|
|
||||||
content_el = itemdoc.find('{}/{}'.format(_media_xml_tag('group'), _media_xml_tag('content')))
|
|
||||||
mediagen_url = self._remove_template_parameter(content_el.attrib['url'])
|
|
||||||
mediagen_url = mediagen_url.replace('device={device}', '')
|
|
||||||
if 'acceptMethods' not in mediagen_url:
|
|
||||||
mediagen_url += '&' if '?' in mediagen_url else '?'
|
|
||||||
mediagen_url += 'acceptMethods='
|
|
||||||
mediagen_url += 'hls' if use_hls else 'fms'
|
|
||||||
|
|
||||||
mediagen_doc = self._download_xml(
|
|
||||||
mediagen_url, video_id, 'Downloading video urls', fatal=False)
|
|
||||||
|
|
||||||
if not isinstance(mediagen_doc, xml.etree.ElementTree.Element):
|
|
||||||
return None
|
|
||||||
|
|
||||||
item = mediagen_doc.find('./video/item')
|
|
||||||
if item is not None and item.get('type') == 'text':
|
|
||||||
message = f'{self.IE_NAME} returned error: '
|
|
||||||
if item.get('code') is not None:
|
|
||||||
message += '{} - '.format(item.get('code'))
|
|
||||||
message += item.text
|
|
||||||
raise ExtractorError(message, expected=True)
|
|
||||||
|
|
||||||
description = strip_or_none(xpath_text(itemdoc, 'description'))
|
|
||||||
|
|
||||||
timestamp = timeconvert(xpath_text(itemdoc, 'pubDate'))
|
|
||||||
|
|
||||||
title_el = None
|
|
||||||
if title_el is None:
|
|
||||||
title_el = find_xpath_attr(
|
|
||||||
itemdoc, './/{http://search.yahoo.com/mrss/}category',
|
|
||||||
'scheme', 'urn:mtvn:video_title')
|
|
||||||
if title_el is None:
|
|
||||||
title_el = itemdoc.find('.//{http://search.yahoo.com/mrss/}title')
|
|
||||||
if title_el is None:
|
|
||||||
title_el = itemdoc.find('.//title')
|
|
||||||
if title_el.text is None:
|
|
||||||
title_el = None
|
|
||||||
|
|
||||||
title = title_el.text
|
|
||||||
if title is None:
|
|
||||||
raise ExtractorError('Could not find video title')
|
|
||||||
title = title.strip()
|
|
||||||
|
|
||||||
series = find_xpath_attr(
|
|
||||||
itemdoc, './/{http://search.yahoo.com/mrss/}category',
|
|
||||||
'scheme', 'urn:mtvn:franchise')
|
|
||||||
season = find_xpath_attr(
|
|
||||||
itemdoc, './/{http://search.yahoo.com/mrss/}category',
|
|
||||||
'scheme', 'urn:mtvn:seasonN')
|
|
||||||
episode = find_xpath_attr(
|
|
||||||
itemdoc, './/{http://search.yahoo.com/mrss/}category',
|
|
||||||
'scheme', 'urn:mtvn:episodeN')
|
|
||||||
series = series.text if series is not None else None
|
|
||||||
season = season.text if season is not None else None
|
|
||||||
episode = episode.text if episode is not None else None
|
|
||||||
if season and episode:
|
|
||||||
# episode number includes season, so remove it
|
|
||||||
episode = re.sub(rf'^{season}', '', episode)
|
|
||||||
|
|
||||||
# This a short id that's used in the webpage urls
|
|
||||||
mtvn_id = None
|
|
||||||
mtvn_id_node = find_xpath_attr(itemdoc, './/{http://search.yahoo.com/mrss/}category',
|
|
||||||
'scheme', 'urn:mtvn:id')
|
|
||||||
if mtvn_id_node is not None:
|
|
||||||
mtvn_id = mtvn_id_node.text
|
|
||||||
|
|
||||||
formats = self._extract_video_formats(mediagen_doc, mtvn_id, video_id)
|
|
||||||
|
|
||||||
# Some parts of complete video may be missing (e.g. missing Act 3 in
|
|
||||||
# http://www.southpark.de/alle-episoden/s14e01-sexual-healing)
|
|
||||||
if not formats:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return {
|
|
||||||
'title': title,
|
|
||||||
'formats': formats,
|
|
||||||
'subtitles': self._extract_subtitles(mediagen_doc, mtvn_id),
|
|
||||||
'id': video_id,
|
|
||||||
'thumbnail': self._get_thumbnail_url(uri, itemdoc),
|
|
||||||
'description': description,
|
|
||||||
'duration': float_or_none(content_el.attrib.get('duration')),
|
|
||||||
'timestamp': timestamp,
|
|
||||||
'series': series,
|
|
||||||
'season_number': int_or_none(season),
|
|
||||||
'episode_number': int_or_none(episode),
|
|
||||||
}
|
}
|
||||||
|
if data is not None:
|
||||||
|
headers['Content-Type'] = 'application/json'
|
||||||
|
if isinstance(data, dict):
|
||||||
|
data = json.dumps(data, separators=(',', ':')).encode()
|
||||||
|
|
||||||
def _get_feed_query(self, uri):
|
return self._download_json(
|
||||||
data = {'uri': uri}
|
f'https://auth.mtvnservices.com/{path}', display_id,
|
||||||
if self._LANG:
|
note=note or 'Calling authentication API', data=data,
|
||||||
data['lang'] = self._LANG
|
headers=headers, query={**self._get_auth_suite_data(config), **(query or {})})
|
||||||
return data
|
|
||||||
|
|
||||||
def _get_videos_info(self, uri, use_hls=True, url=None):
|
def _get_fresh_access_token(self, config, display_id=None, force_refresh=False):
|
||||||
video_id = self._id_from_uri(uri)
|
resource_id = config['resourceId']
|
||||||
feed_url = self._get_feed_url(uri, url)
|
# resource_id should already be in _token_cache since _get_media_token is the caller
|
||||||
info_url = update_url_query(feed_url, self._get_feed_query(uri))
|
tokens = self._token_cache[resource_id]
|
||||||
return self._get_videos_info_from_url(info_url, video_id, use_hls)
|
|
||||||
|
|
||||||
def _get_videos_info_from_url(self, url, video_id, use_hls=True):
|
access_token = tokens.get(self._ACCESS_TOKEN_KEY)
|
||||||
idoc = self._download_xml(
|
if not force_refresh and access_token and not self._jwt_is_expired(access_token):
|
||||||
url, video_id,
|
return access_token
|
||||||
'Downloading info', transform_source=fix_xml_ampersands)
|
|
||||||
|
|
||||||
title = xpath_text(idoc, './channel/title')
|
if self._REFRESH_TOKEN_KEY not in tokens:
|
||||||
description = xpath_text(idoc, './channel/description')
|
response = self._call_auth_api(
|
||||||
|
'accessToken', config, display_id, 'Retrieving auth tokens', data=b'')
|
||||||
|
else:
|
||||||
|
response = self._call_auth_api(
|
||||||
|
'accessToken/refresh', config, display_id, 'Refreshing auth tokens',
|
||||||
|
data={'refreshToken': tokens[self._REFRESH_TOKEN_KEY]},
|
||||||
|
headers={'Authorization': f'Bearer {access_token}'})
|
||||||
|
|
||||||
entries = []
|
tokens[self._ACCESS_TOKEN_KEY] = response['applicationAccessToken']
|
||||||
for item in idoc.findall('.//item'):
|
tokens[self._REFRESH_TOKEN_KEY] = response['deviceRefreshToken']
|
||||||
info = self._get_video_info(item, use_hls)
|
self.cache.store(self._CACHE_SECTION, resource_id, tokens)
|
||||||
if info:
|
|
||||||
entries.append(info)
|
|
||||||
|
|
||||||
# TODO: should be multi-video
|
return tokens[self._ACCESS_TOKEN_KEY]
|
||||||
return self.playlist_result(
|
|
||||||
entries, playlist_title=title, playlist_description=description)
|
|
||||||
|
|
||||||
def _extract_triforce_mgid(self, webpage, data_zone=None, video_id=None):
|
def _get_media_token(self, video_config, config, display_id=None):
|
||||||
triforce_feed = self._parse_json(self._search_regex(
|
resource_id = config['resourceId']
|
||||||
r'triforceManifestFeed\s*=\s*({.+?})\s*;\s*\n', webpage,
|
if resource_id in self._token_cache:
|
||||||
'triforce feed', default='{}'), video_id, fatal=False)
|
tokens = self._token_cache[resource_id]
|
||||||
|
else:
|
||||||
|
tokens = self._token_cache[resource_id] = self.cache.load(self._CACHE_SECTION, resource_id) or {}
|
||||||
|
|
||||||
data_zone = self._search_regex(
|
media_token = tokens.get(self._MEDIA_TOKEN_KEY)
|
||||||
r'data-zone=(["\'])(?P<zone>.+?_lc_promo.*?)\1', webpage,
|
if media_token and not self._jwt_is_expired(media_token):
|
||||||
'data zone', default=data_zone, group='zone')
|
return media_token
|
||||||
|
|
||||||
feed_url = try_get(
|
access_token = self._get_fresh_access_token(config, display_id)
|
||||||
triforce_feed, lambda x: x['manifest']['zones'][data_zone]['feed'],
|
if not jwt_decode_hs256(access_token).get('accessMethods'):
|
||||||
str)
|
# MTVServices uses a custom AdobePass oauth flow which is incompatible with AdobePassIE
|
||||||
if not feed_url:
|
mso_id = self.get_param('ap_mso')
|
||||||
return
|
if not mso_id:
|
||||||
|
raise ExtractorError(
|
||||||
|
'This video is only available for users of participating TV providers. '
|
||||||
|
'Use --ap-mso to specify Adobe Pass Multiple-system operator Identifier and pass '
|
||||||
|
'cookies from a browser session where you are signed-in to your provider.', expected=True)
|
||||||
|
|
||||||
feed = self._download_json(feed_url, video_id, fatal=False)
|
auth_suite_data = json.dumps(
|
||||||
if not feed:
|
self._get_auth_suite_data(config), separators=(',', ':')).encode()
|
||||||
return
|
callback_url = update_url_query(config['callbackURL'], {
|
||||||
|
'authSuiteData': urllib.parse.quote(base64.b64encode(auth_suite_data).decode()),
|
||||||
|
'mvpdCode': mso_id,
|
||||||
|
})
|
||||||
|
auth_url = self._call_auth_api(
|
||||||
|
f'mvpd/{mso_id}/login', config, display_id,
|
||||||
|
'Retrieving provider authentication URL',
|
||||||
|
query={'callbackUrl': callback_url},
|
||||||
|
headers={'Authorization': f'Bearer {access_token}'})['authenticationUrl']
|
||||||
|
res = self._download_webpage_handle(auth_url, display_id, 'Downloading provider auth page')
|
||||||
|
# XXX: The following "provider-specific code" likely only works if mso_id == Comcast_SSO
|
||||||
|
# BEGIN provider-specific code
|
||||||
|
redirect_url = self._search_json(
|
||||||
|
r'initInterstitialRedirect\(', res[0], 'redirect JSON',
|
||||||
|
display_id, transform_source=js_to_json)['continue']
|
||||||
|
urlh = self._request_webpage(redirect_url, display_id, 'Requesting provider redirect page')
|
||||||
|
authorization_code = parse_qs(urlh.url)['authorizationCode'][-1]
|
||||||
|
# END provider-specific code
|
||||||
|
self._call_auth_api(
|
||||||
|
f'access/mvpd/{mso_id}', config, display_id,
|
||||||
|
'Submitting authorization code to MTVNServices',
|
||||||
|
query={'authorizationCode': authorization_code}, data=b'',
|
||||||
|
headers={'Authorization': f'Bearer {access_token}'})
|
||||||
|
access_token = self._get_fresh_access_token(config, display_id, force_refresh=True)
|
||||||
|
|
||||||
return try_get(feed, lambda x: x['result']['data']['id'], str)
|
tokens[self._MEDIA_TOKEN_KEY] = self._call_auth_api(
|
||||||
|
'mediaToken', config, display_id, 'Fetching media token', data={
|
||||||
|
'content': {('id' if k == 'videoId' else k): v for k, v in video_config.items()},
|
||||||
|
'resourceId': resource_id,
|
||||||
|
}, headers={'Authorization': f'Bearer {access_token}'})['mediaToken']
|
||||||
|
|
||||||
@staticmethod
|
self.cache.store(self._CACHE_SECTION, resource_id, tokens)
|
||||||
def _extract_child_with_type(parent, t):
|
return tokens[self._MEDIA_TOKEN_KEY]
|
||||||
for c in parent['children']:
|
|
||||||
if c.get('type') == t:
|
def _real_extract(self, url):
|
||||||
return c
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
def _extract_mgid(self, webpage):
|
|
||||||
try:
|
try:
|
||||||
# the url can be http://media.mtvnservices.com/fb/{mgid}.swf
|
data = self._download_json(
|
||||||
# or http://media.mtvnservices.com/{mgid}
|
update_url(url, query=None), display_id,
|
||||||
og_url = self._og_search_video_url(webpage)
|
query={'json': 'true'})
|
||||||
mgid = url_basename(og_url)
|
except ExtractorError as e:
|
||||||
if mgid.endswith('.swf'):
|
if isinstance(e.cause, HTTPError) and e.cause.status == 404 and not self.suitable(e.cause.response.url):
|
||||||
mgid = mgid[:-4]
|
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
|
||||||
except RegexNotFoundError:
|
raise
|
||||||
mgid = None
|
|
||||||
|
|
||||||
if mgid is None or ':' not in mgid:
|
flex_wrapper = traverse_obj(data, (
|
||||||
mgid = self._search_regex(
|
'children', lambda _, v: v['type'] == 'MainContainer',
|
||||||
[r'data-mgid="(.*?)"', r'swfobject\.embedSWF\(".*?(mgid:.*?)"'],
|
(None, ('children', lambda _, v: v['type'] == 'AviaWrapper')),
|
||||||
webpage, 'mgid', default=None)
|
'children', lambda _, v: v['type'] == 'FlexWrapper', {dict}, any))
|
||||||
|
video_detail = traverse_obj(flex_wrapper, (
|
||||||
|
(None, ('children', lambda _, v: v['type'] == 'AuthSuiteWrapper')),
|
||||||
|
'children', lambda _, v: v['type'] == 'Player',
|
||||||
|
'props', 'videoDetail', {dict}, any))
|
||||||
|
if not video_detail:
|
||||||
|
video_detail = traverse_obj(data, (
|
||||||
|
'children', ..., ('handleTVEAuthRedirection', None),
|
||||||
|
'videoDetail', {dict}, any, {require('video detail')}))
|
||||||
|
|
||||||
if not mgid:
|
mgid = video_detail['mgid']
|
||||||
sm4_embed = self._html_search_meta(
|
video_id = mgid.rpartition(':')[2]
|
||||||
'sm4:video:embed', webpage, 'sm4 embed', default='')
|
service_url = traverse_obj(video_detail, ('videoServiceUrl', {url_or_none}, {update_url(query=None)}))
|
||||||
mgid = self._search_regex(
|
if not service_url:
|
||||||
r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid', default=None)
|
raise ExtractorError('This content is no longer available', expected=True)
|
||||||
|
|
||||||
if not mgid:
|
headers = {}
|
||||||
mgid = self._extract_triforce_mgid(webpage)
|
if video_detail.get('authRequired'):
|
||||||
|
# The vast majority of provider-locked content has been moved to Paramount+
|
||||||
|
# BetIE is the only extractor that is currently known to reach this code path
|
||||||
|
video_config = traverse_obj(flex_wrapper, (
|
||||||
|
'children', lambda _, v: v['type'] == 'AuthSuiteWrapper',
|
||||||
|
'props', 'videoConfig', {dict}, any, {require('video config')}))
|
||||||
|
config = traverse_obj(data, (
|
||||||
|
'props', 'authSuiteConfig', {dict}, {require('auth suite config')}))
|
||||||
|
headers['X-VIA-TVE-MEDIATOKEN'] = self._get_media_token(video_config, config, display_id)
|
||||||
|
|
||||||
if not mgid:
|
stream_info = self._download_json(
|
||||||
data = self._parse_json(self._search_regex(
|
service_url, video_id, 'Downloading API JSON', 'Unable to download API JSON',
|
||||||
r'__DATA__\s*=\s*({.+?});', webpage, 'data'), None)
|
query={'clientPlatform': 'desktop'}, headers=headers)['stitchedstream']
|
||||||
main_container = self._extract_child_with_type(data, 'MainContainer')
|
|
||||||
ab_testing = self._extract_child_with_type(main_container, 'ABTesting')
|
|
||||||
video_player = self._extract_child_with_type(ab_testing or main_container, 'VideoPlayer')
|
|
||||||
if video_player:
|
|
||||||
mgid = try_get(video_player, lambda x: x['props']['media']['video']['config']['uri'])
|
|
||||||
else:
|
|
||||||
flex_wrapper = self._extract_child_with_type(ab_testing or main_container, 'FlexWrapper')
|
|
||||||
auth_suite_wrapper = self._extract_child_with_type(flex_wrapper, 'AuthSuiteWrapper')
|
|
||||||
player = self._extract_child_with_type(auth_suite_wrapper or flex_wrapper, 'Player')
|
|
||||||
if player:
|
|
||||||
mgid = try_get(player, lambda x: x['props']['videoDetail']['mgid'])
|
|
||||||
|
|
||||||
if not mgid:
|
manifest_type = stream_info['manifesttype']
|
||||||
raise ExtractorError('Could not extract mgid')
|
if manifest_type == 'hls':
|
||||||
|
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
|
||||||
|
stream_info['source'], video_id, 'mp4', m3u8_id=manifest_type)
|
||||||
|
elif manifest_type == 'dash':
|
||||||
|
formats, subtitles = self._extract_mpd_formats_and_subtitles(
|
||||||
|
stream_info['source'], video_id, mpd_id=manifest_type)
|
||||||
|
else:
|
||||||
|
self.raise_no_formats(f'Unsupported manifest type "{manifest_type}"')
|
||||||
|
formats, subtitles = [], {}
|
||||||
|
|
||||||
return mgid
|
return {
|
||||||
|
**traverse_obj(video_detail, {
|
||||||
def _real_extract(self, url):
|
'title': ('title', {str}),
|
||||||
title = url_basename(url)
|
'channel': ('channel', 'name', {str}),
|
||||||
webpage = self._download_webpage(url, title)
|
'thumbnails': ('images', ..., {'url': ('url', {url_or_none})}),
|
||||||
mgid = self._extract_mgid(webpage)
|
'description': (('fullDescription', 'description'), {str}, any),
|
||||||
return self._get_videos_info(mgid, url=url)
|
'series': ('parentEntity', 'title', {str}),
|
||||||
|
'season_number': ('seasonNumber', {int_or_none}),
|
||||||
|
'episode_number': ('episodeAiringOrder', {int_or_none}),
|
||||||
|
'duration': ('duration', 'milliseconds', {float_or_none(scale=1000)}),
|
||||||
|
'timestamp': ((
|
||||||
|
('originalPublishDate', {parse_iso8601}),
|
||||||
|
('publishDate', 'timestamp', {int_or_none})), any),
|
||||||
|
'release_timestamp': ((
|
||||||
|
('originalAirDate', {parse_iso8601}),
|
||||||
|
('airDate', 'timestamp', {int_or_none})), any),
|
||||||
|
}),
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
|
class MTVIE(MTVServicesBaseIE):
|
||||||
IE_NAME = 'mtvservices:embedded'
|
|
||||||
_VALID_URL = r'https?://media\.mtvnservices\.com/embed/(?P<mgid>.+?)(\?|/|$)'
|
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//media\.mtvnservices\.com/embed/.+?)\1']
|
|
||||||
|
|
||||||
_TEST = {
|
|
||||||
# From http://www.thewrap.com/peter-dinklage-sums-up-game-of-thrones-in-45-seconds-video/
|
|
||||||
'url': 'http://media.mtvnservices.com/embed/mgid:uma:video:mtv.com:1043906/cp~vid%3D1043906%26uri%3Dmgid%3Auma%3Avideo%3Amtv.com%3A1043906',
|
|
||||||
'md5': 'cb349b21a7897164cede95bd7bf3fbb9',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '1043906',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Peter Dinklage Sums Up \'Game Of Thrones\' In 45 Seconds',
|
|
||||||
'description': '"Sexy sexy sexy, stabby stabby stabby, beautiful language," says Peter Dinklage as he tries summarizing "Game of Thrones" in under a minute.',
|
|
||||||
'timestamp': 1400126400,
|
|
||||||
'upload_date': '20140515',
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def _get_feed_url(self, uri, url=None):
|
|
||||||
video_id = self._id_from_uri(uri)
|
|
||||||
config = self._download_json(
|
|
||||||
f'http://media.mtvnservices.com/pmt/e1/access/index.html?uri={uri}&configtype=edge', video_id)
|
|
||||||
return self._remove_template_parameter(config['feedWithQueryParams'])
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = self._match_valid_url(url)
|
|
||||||
mgid = mobj.group('mgid')
|
|
||||||
return self._get_videos_info(mgid)
|
|
||||||
|
|
||||||
|
|
||||||
class MTVIE(MTVServicesInfoExtractor):
|
|
||||||
IE_NAME = 'mtv'
|
IE_NAME = 'mtv'
|
||||||
_VALID_URL = r'https?://(?:www\.)?mtv\.com/(?:video-clips|(?:full-)?episodes)/(?P<id>[^/?#.]+)'
|
_VALID_URL = r'https?://(?:www\.)?mtv\.com/(?:video-clips|episodes)/(?P<id>[\da-z]{6})'
|
||||||
_FEED_URL = 'http://www.mtv.com/feeds/mrss/'
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.mtv.com/video-clips/vl8qof/unlocking-the-truth-trailer',
|
'url': 'https://www.mtv.com/video-clips/syolsj',
|
||||||
'md5': '1edbcdf1e7628e414a8c5dcebca3d32b',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '5e14040d-18a4-47c4-a582-43ff602de88e',
|
'id': '213ea7f8-bac7-4a43-8cd5-8d8cb8c8160f',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Unlocking The Truth|July 18, 2016|1|101|Trailer',
|
'display_id': 'syolsj',
|
||||||
'description': '"Unlocking the Truth" premieres August 17th at 11/10c.',
|
'title': 'The Challenge: Vets & New Threats',
|
||||||
'timestamp': 1468846800,
|
'description': 'md5:c4d2e90a5fff6463740fbf96b2bb6a41',
|
||||||
'upload_date': '20160718',
|
'duration': 95.0,
|
||||||
|
'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref',
|
||||||
|
'series': 'The Challenge',
|
||||||
|
'season': 'Season 41',
|
||||||
|
'season_number': 41,
|
||||||
|
'episode': 'Episode 0',
|
||||||
|
'episode_number': 0,
|
||||||
|
'timestamp': 1753945200,
|
||||||
|
'upload_date': '20250731',
|
||||||
|
'release_timestamp': 1753945200,
|
||||||
|
'release_date': '20250731',
|
||||||
},
|
},
|
||||||
|
'params': {'skip_download': 'm3u8'},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.mtv.com/full-episodes/94tujl/unlocking-the-truth-gates-of-hell-season-1-ep-101',
|
'url': 'https://www.mtv.com/episodes/uzvigh',
|
||||||
'only_matching': True,
|
'info_dict': {
|
||||||
}, {
|
'id': '364e8b9e-e415-11ef-b405-16fff45bc035',
|
||||||
'url': 'http://www.mtv.com/episodes/g8xu7q/teen-mom-2-breaking-the-wall-season-7-ep-713',
|
'ext': 'mp4',
|
||||||
'only_matching': True,
|
'display_id': 'uzvigh',
|
||||||
|
'title': 'CT Tamburello and Johnny Bananas',
|
||||||
|
'description': 'md5:364cea52001e9c13f92784e3365c6606',
|
||||||
|
'channel': 'MTV',
|
||||||
|
'duration': 1260.0,
|
||||||
|
'thumbnail': r're:https://images\.paramount\.tech/uri/mgid:arc:imageassetref',
|
||||||
|
'series': 'Ridiculousness',
|
||||||
|
'season': 'Season 47',
|
||||||
|
'season_number': 47,
|
||||||
|
'episode': 'Episode 19',
|
||||||
|
'episode_number': 19,
|
||||||
|
'timestamp': 1753318800,
|
||||||
|
'upload_date': '20250724',
|
||||||
|
'release_timestamp': 1753318800,
|
||||||
|
'release_date': '20250724',
|
||||||
|
},
|
||||||
|
'params': {'skip_download': 'm3u8'},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
|
||||||
class MTVJapanIE(MTVServicesInfoExtractor):
|
|
||||||
IE_NAME = 'mtvjapan'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?mtvjapan\.com/videos/(?P<id>[0-9a-z]+)'
|
|
||||||
|
|
||||||
_TEST = {
|
|
||||||
'url': 'http://www.mtvjapan.com/videos/prayht/fresh-info-cadillac-escalade',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'bc01da03-6fe5-4284-8880-f291f4e368f5',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': '【Fresh Info】Cadillac ESCALADE Sport Edition',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_GEO_COUNTRIES = ['JP']
|
|
||||||
_FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed'
|
|
||||||
|
|
||||||
def _get_feed_query(self, uri):
|
|
||||||
return {
|
|
||||||
'arcEp': 'mtvjapan.com',
|
|
||||||
'mgid': uri,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class MTVVideoIE(MTVServicesInfoExtractor):
|
|
||||||
IE_NAME = 'mtv:video'
|
|
||||||
_VALID_URL = r'''(?x)^https?://
|
|
||||||
(?:(?:www\.)?mtv\.com/videos/.+?/(?P<videoid>[0-9]+)/[^/]+$|
|
|
||||||
m\.mtv\.com/videos/video\.rbml\?.*?id=(?P<mgid>[^&]+))'''
|
|
||||||
|
|
||||||
_FEED_URL = 'http://www.mtv.com/player/embed/AS3/rss/'
|
|
||||||
|
|
||||||
_TESTS = [
|
|
||||||
{
|
|
||||||
'url': 'http://www.mtv.com/videos/misc/853555/ours-vh1-storytellers.jhtml',
|
|
||||||
'md5': '850f3f143316b1e71fa56a4edfd6e0f8',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '853555',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Taylor Swift - "Ours (VH1 Storytellers)"',
|
|
||||||
'description': 'Album: Taylor Swift performs "Ours" for VH1 Storytellers at Harvey Mudd College.',
|
|
||||||
'timestamp': 1352610000,
|
|
||||||
'upload_date': '20121111',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
def _get_thumbnail_url(self, uri, itemdoc):
|
|
||||||
return 'http://mtv.mtvnimages.com/uri/' + uri
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = self._match_valid_url(url)
|
|
||||||
video_id = mobj.group('videoid')
|
|
||||||
uri = mobj.groupdict().get('mgid')
|
|
||||||
if uri is None:
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
|
|
||||||
# Some videos come from Vevo.com
|
|
||||||
m_vevo = re.search(
|
|
||||||
r'(?s)isVevoVideo = true;.*?vevoVideoId = "(.*?)";', webpage)
|
|
||||||
if m_vevo:
|
|
||||||
vevo_id = m_vevo.group(1)
|
|
||||||
self.to_screen(f'Vevo video detected: {vevo_id}')
|
|
||||||
return self.url_result(f'vevo:{vevo_id}', ie='Vevo')
|
|
||||||
|
|
||||||
uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri')
|
|
||||||
return self._get_videos_info(uri)
|
|
||||||
|
|
||||||
|
|
||||||
class MTVDEIE(MTVServicesInfoExtractor):
|
|
||||||
_WORKING = False
|
|
||||||
IE_NAME = 'mtv.de'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?mtv\.de/(?:musik/videoclips|folgen|news)/(?P<id>[0-9a-z]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.mtv.de/musik/videoclips/2gpnv7/Traum',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'd5d472bc-f5b7-11e5-bffd-a4badb20dab5',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Traum',
|
|
||||||
'description': 'Traum',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'Blocked at Travis CI',
|
|
||||||
}, {
|
|
||||||
# mediagen URL without query (e.g. http://videos.mtvnn.com/mediagen/e865da714c166d18d6f80893195fcb97)
|
|
||||||
'url': 'http://www.mtv.de/folgen/6b1ylu/teen-mom-2-enthuellungen-S5-F1',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '1e5a878b-31c5-11e7-a442-0e40cf2fc285',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Teen Mom 2',
|
|
||||||
'description': 'md5:dc65e357ef7e1085ed53e9e9d83146a7',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'Blocked at Travis CI',
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.mtv.de/news/glolix/77491-mtv-movies-spotlight--pixels--teil-3',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'local_playlist-4e760566473c4c8c5344',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Article_mtv-movies-spotlight-pixels-teil-3_short-clips_part1',
|
|
||||||
'description': 'MTV Movies Supercut',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'Das Video kann zur Zeit nicht abgespielt werden.',
|
|
||||||
}]
|
|
||||||
_GEO_COUNTRIES = ['DE']
|
|
||||||
_FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed'
|
|
||||||
|
|
||||||
def _get_feed_query(self, uri):
|
|
||||||
return {
|
|
||||||
'arcEp': 'mtv.de',
|
|
||||||
'mgid': uri,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class MTVItaliaIE(MTVServicesInfoExtractor):
|
|
||||||
IE_NAME = 'mtv.it'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?mtv\.it/(?:episodi|video|musica)/(?P<id>[0-9a-z]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.mtv.it/episodi/24bqab/mario-una-serie-di-maccio-capatonda-cavoli-amario-episodio-completo-S1-E1',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '0f0fc78e-45fc-4cce-8f24-971c25477530',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Cavoli amario (episodio completo)',
|
|
||||||
'description': 'md5:4962bccea8fed5b7c03b295ae1340660',
|
|
||||||
'series': 'Mario - Una Serie Di Maccio Capatonda',
|
|
||||||
'season_number': 1,
|
|
||||||
'episode_number': 1,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
_GEO_COUNTRIES = ['IT']
|
|
||||||
_FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed'
|
|
||||||
|
|
||||||
def _get_feed_query(self, uri):
|
|
||||||
return {
|
|
||||||
'arcEp': 'mtv.it',
|
|
||||||
'mgid': uri,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class MTVItaliaProgrammaIE(MTVItaliaIE): # XXX: Do not subclass from concrete IE
|
|
||||||
IE_NAME = 'mtv.it:programma'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?mtv\.it/(?:programmi|playlist)/(?P<id>[0-9a-z]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
# program page: general
|
|
||||||
'url': 'http://www.mtv.it/programmi/s2rppv/mario-una-serie-di-maccio-capatonda',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'a6f155bc-8220-4640-aa43-9b95f64ffa3d',
|
|
||||||
'title': 'Mario - Una Serie Di Maccio Capatonda',
|
|
||||||
'description': 'md5:72fbffe1f77ccf4e90757dd4e3216153',
|
|
||||||
},
|
|
||||||
'playlist_count': 2,
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
# program page: specific season
|
|
||||||
'url': 'http://www.mtv.it/programmi/d9ncjf/mario-una-serie-di-maccio-capatonda-S2',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '4deeb5d8-f272-490c-bde2-ff8d261c6dd1',
|
|
||||||
'title': 'Mario - Una Serie Di Maccio Capatonda - Stagione 2',
|
|
||||||
},
|
|
||||||
'playlist_count': 34,
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
# playlist page + redirect
|
|
||||||
'url': 'http://www.mtv.it/playlist/sexy-videos/ilctal',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'dee8f9ee-756d-493b-bf37-16d1d2783359',
|
|
||||||
'title': 'Sexy Videos',
|
|
||||||
},
|
|
||||||
'playlist_mincount': 145,
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
_GEO_COUNTRIES = ['IT']
|
|
||||||
_FEED_URL = 'http://www.mtv.it/feeds/triforce/manifest/v8'
|
|
||||||
|
|
||||||
def _get_entries(self, title, url):
|
|
||||||
while True:
|
|
||||||
pg = self._search_regex(r'/(\d+)$', url, 'entries', '1')
|
|
||||||
entries = self._download_json(url, title, f'page {pg}')
|
|
||||||
url = try_get(
|
|
||||||
entries, lambda x: x['result']['nextPageURL'], str)
|
|
||||||
entries = try_get(
|
|
||||||
entries, (
|
|
||||||
lambda x: x['result']['data']['items'],
|
|
||||||
lambda x: x['result']['data']['seasons']),
|
|
||||||
list)
|
|
||||||
for entry in entries or []:
|
|
||||||
if entry.get('canonicalURL'):
|
|
||||||
yield self.url_result(entry['canonicalURL'])
|
|
||||||
if not url:
|
|
||||||
break
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
query = {'url': url}
|
|
||||||
info_url = update_url_query(self._FEED_URL, query)
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
info = self._download_json(info_url, video_id).get('manifest')
|
|
||||||
|
|
||||||
redirect = try_get(
|
|
||||||
info, lambda x: x['newLocation']['url'], str)
|
|
||||||
if redirect:
|
|
||||||
return self.url_result(redirect)
|
|
||||||
|
|
||||||
title = info.get('title')
|
|
||||||
video_id = try_get(
|
|
||||||
info, lambda x: x['reporting']['itemId'], str)
|
|
||||||
parent_id = try_get(
|
|
||||||
info, lambda x: x['reporting']['parentId'], str)
|
|
||||||
|
|
||||||
playlist_url = current_url = None
|
|
||||||
for z in (info.get('zones') or {}).values():
|
|
||||||
if z.get('moduleName') in ('INTL_M304', 'INTL_M209'):
|
|
||||||
info_url = z.get('feed')
|
|
||||||
if z.get('moduleName') in ('INTL_M308', 'INTL_M317'):
|
|
||||||
playlist_url = playlist_url or z.get('feed')
|
|
||||||
if z.get('moduleName') in ('INTL_M300',):
|
|
||||||
current_url = current_url or z.get('feed')
|
|
||||||
|
|
||||||
if not info_url:
|
|
||||||
raise ExtractorError('No info found')
|
|
||||||
|
|
||||||
if video_id == parent_id:
|
|
||||||
video_id = self._search_regex(
|
|
||||||
r'([^\/]+)/[^\/]+$', info_url, 'video_id')
|
|
||||||
|
|
||||||
info = self._download_json(info_url, video_id, 'Show infos')
|
|
||||||
info = try_get(info, lambda x: x['result']['data'], dict)
|
|
||||||
title = title or try_get(
|
|
||||||
info, (
|
|
||||||
lambda x: x['title'],
|
|
||||||
lambda x: x['headline']),
|
|
||||||
str)
|
|
||||||
description = try_get(info, lambda x: x['content'], str)
|
|
||||||
|
|
||||||
if current_url:
|
|
||||||
season = try_get(
|
|
||||||
self._download_json(playlist_url, video_id, 'Seasons info'),
|
|
||||||
lambda x: x['result']['data'], dict)
|
|
||||||
current = try_get(
|
|
||||||
season, lambda x: x['currentSeason'], str)
|
|
||||||
seasons = try_get(
|
|
||||||
season, lambda x: x['seasons'], list) or []
|
|
||||||
|
|
||||||
if current in [s.get('eTitle') for s in seasons]:
|
|
||||||
playlist_url = current_url
|
|
||||||
|
|
||||||
title = re.sub(
|
|
||||||
r'[-|]\s*(?:mtv\s*italia|programma|playlist)',
|
|
||||||
'', title, flags=re.IGNORECASE).strip()
|
|
||||||
|
|
||||||
return self.playlist_result(
|
|
||||||
self._get_entries(title, playlist_url),
|
|
||||||
video_id, title, description)
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user