mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2026-01-17 12:21:52 +00:00
Compare commits
575 Commits
2022.07.18
...
2023.01.02
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
990dd7b00f | ||
|
|
d83b0ad809 | ||
|
|
08e29b9f1f | ||
|
|
8e174ba7de | ||
|
|
05997b6e98 | ||
|
|
32a84bcf4e | ||
|
|
8300774c4a | ||
|
|
d7f9871469 | ||
|
|
13f930abc0 | ||
|
|
b23b503e22 | ||
|
|
e756f45ba0 | ||
|
|
8c53322cda | ||
|
|
193fb150b7 | ||
|
|
26fdfc3704 | ||
|
|
78d25e0b7c | ||
|
|
2a06bb4eb6 | ||
|
|
88fb942577 | ||
|
|
1cdda32998 | ||
|
|
3e01ce744a | ||
|
|
8e40b9d1ec | ||
|
|
2fb0f85868 | ||
|
|
a0e526ed4d | ||
|
|
8d1ddb0805 | ||
|
|
9bb856998b | ||
|
|
fbb7383306 | ||
|
|
ec54bd43f3 | ||
|
|
f74371a97d | ||
|
|
d5f043d127 | ||
|
|
fe74d5b592 | ||
|
|
119e40ef64 | ||
|
|
4455918e7f | ||
|
|
efa944f4bc | ||
|
|
e107c2b8cf | ||
|
|
ca2f6e14e6 | ||
|
|
c1edb853b0 | ||
|
|
2647c933b8 | ||
|
|
53006b35ea | ||
|
|
4b183d4962 | ||
|
|
3d667e0047 | ||
|
|
9a9006ba20 | ||
|
|
153e88a751 | ||
|
|
9fcd8ad1f2 | ||
|
|
6b71d186dd | ||
|
|
074b2fae90 | ||
|
|
06a9d68eb8 | ||
|
|
a4d6ead30f | ||
|
|
d1b5f3d79c | ||
|
|
da8d2de208 | ||
|
|
15e9e578c0 | ||
|
|
0ef3d47027 | ||
|
|
247c8dd4f5 | ||
|
|
032f22020c | ||
|
|
4af47a0003 | ||
|
|
9012d20b23 | ||
|
|
d61ef7f343 | ||
|
|
1c226ccdd4 | ||
|
|
8791e78ccc | ||
|
|
69f5fe45b9 | ||
|
|
0b5546c723 | ||
|
|
1fc089143c | ||
|
|
5424dbaf91 | ||
|
|
c733555106 | ||
|
|
81388c0954 | ||
|
|
df10bad267 | ||
|
|
f0f3fa028b | ||
|
|
22697a84f6 | ||
|
|
3ac5476430 | ||
|
|
e318b5b87a | ||
|
|
f549b18512 | ||
|
|
7c5e1701f6 | ||
|
|
16bed382fd | ||
|
|
3cf50fa8e9 | ||
|
|
f69b0554eb | ||
|
|
e74a3c6dcc | ||
|
|
7108221662 | ||
|
|
10dc85924a | ||
|
|
b05f0a50e0 | ||
|
|
3d79ebc8b7 | ||
|
|
b44cd29851 | ||
|
|
85a802969e | ||
|
|
72f96c5566 | ||
|
|
839e2a62ae | ||
|
|
28b8f57b4b | ||
|
|
dfc186d422 | ||
|
|
42ec478fc4 | ||
|
|
7991ae57a8 | ||
|
|
935bac1e4d | ||
|
|
c4cbd3bebd | ||
|
|
c53a18f016 | ||
|
|
71df9b7fd5 | ||
|
|
c9f5ce5118 | ||
|
|
ddf1e22d48 | ||
|
|
0e96b408b9 | ||
|
|
ba72399723 | ||
|
|
9bcfe33be7 | ||
|
|
71eb82d1b2 | ||
|
|
a9d069f5b8 | ||
|
|
48652590ec | ||
|
|
86f557b636 | ||
|
|
c0caa80515 | ||
|
|
0d95d8b00a | ||
|
|
9d52bf65ff | ||
|
|
d761dfd059 | ||
|
|
27c0f899c8 | ||
|
|
7ff2fafe47 | ||
|
|
3b021eacef | ||
|
|
f352a09778 | ||
|
|
02b2f9fa7d | ||
|
|
29ca408219 | ||
|
|
8486540257 | ||
|
|
ed027fd9d8 | ||
|
|
352e7d9873 | ||
|
|
9a0416c6a5 | ||
|
|
f5a9e9df0d | ||
|
|
f96a3fb7d3 | ||
|
|
bc87dac75f | ||
|
|
9f14daf22b | ||
|
|
784320c98c | ||
|
|
d0d74b7197 | ||
|
|
64c464a144 | ||
|
|
4de88a6a36 | ||
|
|
105bfd90f5 | ||
|
|
6368e2e639 | ||
|
|
a4894d3e25 | ||
|
|
d7b460d0e5 | ||
|
|
171a31dbe8 | ||
|
|
83cc7b8aae | ||
|
|
0a4b2f4180 | ||
|
|
a8c754cc00 | ||
|
|
bc5c2f8a2c | ||
|
|
d965856235 | ||
|
|
08270da5c3 | ||
|
|
5e39fb982e | ||
|
|
8b644025b1 | ||
|
|
7aaf4cd2a8 | ||
|
|
8522226d2f | ||
|
|
f4b2c59cfe | ||
|
|
7c8c63529e | ||
|
|
e4221b700f | ||
|
|
bd7e919a75 | ||
|
|
f7fc8d39e9 | ||
|
|
a6858cda29 | ||
|
|
17fc3dc48a | ||
|
|
3f5c216969 | ||
|
|
e72e48c53f | ||
|
|
0cf643b234 | ||
|
|
dc3028d233 | ||
|
|
4dc23a8051 | ||
|
|
495322b95b | ||
|
|
c789fb7787 | ||
|
|
ed6bec168d | ||
|
|
0d8affc17f | ||
|
|
d9df9b4919 | ||
|
|
efdc45a6ea | ||
|
|
86973308cd | ||
|
|
c61473c1d6 | ||
|
|
8fddc232bf | ||
|
|
fad689c7b6 | ||
|
|
db6fa6960c | ||
|
|
3b87f4d943 | ||
|
|
581e86b512 | ||
|
|
8196182a12 | ||
|
|
9b383177c9 | ||
|
|
fbb0ee7747 | ||
|
|
c7e4ab278a | ||
|
|
e9ce4e9250 | ||
|
|
5da08bde9e | ||
|
|
ff48fc04d0 | ||
|
|
46d09f8707 | ||
|
|
db4678e448 | ||
|
|
a349d4d641 | ||
|
|
ac8e69dd32 | ||
|
|
96b9e9cf62 | ||
|
|
cb1553e966 | ||
|
|
0d2a0ecac3 | ||
|
|
c94df4d19d | ||
|
|
728f4b5c2e | ||
|
|
8c188d5d09 | ||
|
|
e14ea7fbd9 | ||
|
|
7053aa3a48 | ||
|
|
049565df2e | ||
|
|
cc1d3bf96b | ||
|
|
5b9f253fa0 | ||
|
|
d715b0e413 | ||
|
|
6141346d18 | ||
|
|
59a0c35865 | ||
|
|
da9a60ca0d | ||
|
|
0d113603ac | ||
|
|
2e30b46fe4 | ||
|
|
68a9a450d4 | ||
|
|
ed13a772d7 | ||
|
|
78545664bf | ||
|
|
f72218c199 | ||
|
|
58fb927ebd | ||
|
|
62b8dac490 | ||
|
|
682b4524bf | ||
|
|
9da6612b0f | ||
|
|
e63faa101c | ||
|
|
497074f044 | ||
|
|
c90c5b9bdd | ||
|
|
ad97487606 | ||
|
|
e091fb92da | ||
|
|
c9bd65185c | ||
|
|
c66ed4e2e5 | ||
|
|
2530b68d44 | ||
|
|
7d61d2306e | ||
|
|
385adffcf5 | ||
|
|
0c908911f9 | ||
|
|
c13a301a94 | ||
|
|
f47cf86eff | ||
|
|
7a26ce2641 | ||
|
|
3639df54c3 | ||
|
|
a4713ba96d | ||
|
|
5318156f1c | ||
|
|
d5d1df8afd | ||
|
|
cd5df121f3 | ||
|
|
73ac0e6b85 | ||
|
|
a7ddbc0475 | ||
|
|
8fab23301c | ||
|
|
1338ae3ba3 | ||
|
|
63c547d71c | ||
|
|
814bba3933 | ||
|
|
2576d53a31 | ||
|
|
217753f4aa | ||
|
|
42a44f01c3 | ||
|
|
9b9dad119a | ||
|
|
6dca2aa66d | ||
|
|
6678a4f0b3 | ||
|
|
d51b2816e3 | ||
|
|
34f00179db | ||
|
|
5225df50cf | ||
|
|
94dc8604dd | ||
|
|
a71b812f53 | ||
|
|
c6989aa3ae | ||
|
|
a79bf78397 | ||
|
|
82fb2357d9 | ||
|
|
13b2ae29c2 | ||
|
|
36069409ec | ||
|
|
0468a3b325 | ||
|
|
d509c1f5a3 | ||
|
|
2c98d99818 | ||
|
|
226c0f3a54 | ||
|
|
ade1fa70cb | ||
|
|
4c9a1a3ba5 | ||
|
|
1d55ebabc9 | ||
|
|
f324fe8c59 | ||
|
|
866f037344 | ||
|
|
5d14b73491 | ||
|
|
540236ce11 | ||
|
|
7b0127e1e1 | ||
|
|
f99bbfc983 | ||
|
|
3b55aaac59 | ||
|
|
2e565f5bca | ||
|
|
e02e6d86db | ||
|
|
867c66ff97 | ||
|
|
f03940963e | ||
|
|
09c127ff83 | ||
|
|
aebb4f4ba7 | ||
|
|
bf2e1ec67a | ||
|
|
98d4ec1ef2 | ||
|
|
1305b659ef | ||
|
|
57fb88093e | ||
|
|
4e0511f27d | ||
|
|
304ad45a9b | ||
|
|
878eac3e2e | ||
|
|
34859e4b32 | ||
|
|
143a2ccab3 | ||
|
|
1e0daeb314 | ||
|
|
7f5b3cb8b3 | ||
|
|
c53e5cf59f | ||
|
|
c7f540ea1e | ||
|
|
12f153a827 | ||
|
|
0d887f273a | ||
|
|
4d37720a0c | ||
|
|
dd4411aac2 | ||
|
|
1d77d8ce07 | ||
|
|
a057779d5e | ||
|
|
7474e4531e | ||
|
|
d3a3d7f0cc | ||
|
|
8671f995cc | ||
|
|
4a61501db9 | ||
|
|
7244895bde | ||
|
|
177662e0f2 | ||
|
|
f48ab881f6 | ||
|
|
eb2d9504b9 | ||
|
|
8a04054647 | ||
|
|
8b7fb8b60d | ||
|
|
a83333c432 | ||
|
|
573a98d6f0 | ||
|
|
af7a5eef2f | ||
|
|
576faf00b2 | ||
|
|
81b6102d20 | ||
|
|
acf306d1f9 | ||
|
|
20a7304e4c | ||
|
|
2e0f8d4f6e | ||
|
|
7e378287c4 | ||
|
|
9cc5aed990 | ||
|
|
48f535f5f8 | ||
|
|
8dbad2a439 | ||
|
|
11398b922c | ||
|
|
dfea94f8f6 | ||
|
|
f1aae71568 | ||
|
|
a5642f2c4a | ||
|
|
10e2eb4f81 | ||
|
|
c9eba8075f | ||
|
|
9d69c4e4b4 | ||
|
|
292fdad297 | ||
|
|
c04cc2e28e | ||
|
|
7a32c70d13 | ||
|
|
709ee21417 | ||
|
|
1fb53b946c | ||
|
|
1dd18a8808 | ||
|
|
0a5095fe8d | ||
|
|
0f60ba6e65 | ||
|
|
1534aba865 | ||
|
|
0ca0f88121 | ||
|
|
0500ee3d81 | ||
|
|
46a5b335e7 | ||
|
|
914491b8e0 | ||
|
|
ab029d7e92 | ||
|
|
0bd5a039ea | ||
|
|
5c8b2ee9ec | ||
|
|
faf7863bb0 | ||
|
|
d42763a443 | ||
|
|
3c757d5ed2 | ||
|
|
f55523cfdd | ||
|
|
32972518da | ||
|
|
2e7675489f | ||
|
|
80eb0bd9b9 | ||
|
|
4cca2eb1bf | ||
|
|
1c09783f7a | ||
|
|
163281178a | ||
|
|
2fa669f759 | ||
|
|
8ca48a1a54 | ||
|
|
b27bc13af6 | ||
|
|
f7c5a5e967 | ||
|
|
fada8272b6 | ||
|
|
46d72cd2c7 | ||
|
|
19b4e59a1e | ||
|
|
dab284f80f | ||
|
|
9665f15a96 | ||
|
|
2b24afa6d7 | ||
|
|
3166e6840c | ||
|
|
8817a80d3a | ||
|
|
5736d79172 | ||
|
|
fc2ba496fd | ||
|
|
2b9d02167f | ||
|
|
2314b4d89f | ||
|
|
1060f82f89 | ||
|
|
22df97f9c5 | ||
|
|
9c935fbc72 | ||
|
|
deae7c1711 | ||
|
|
941e881e1f | ||
|
|
0cb0fdbbfe | ||
|
|
0831d95c46 | ||
|
|
c26f9b991a | ||
|
|
0c0b78b273 | ||
|
|
3ffb2f5bea | ||
|
|
ae1035646a | ||
|
|
1015ceeeaf | ||
|
|
17ffed1842 | ||
|
|
be9c0884d7 | ||
|
|
48c8424bd9 | ||
|
|
7657ec7ed6 | ||
|
|
07a1250e0e | ||
|
|
69082b38dc | ||
|
|
aa824dd10b | ||
|
|
a12d03e15d | ||
|
|
1a7c9fad9f | ||
|
|
3c7a276234 | ||
|
|
d6f8871964 | ||
|
|
5469a4ab11 | ||
|
|
2c475e48b5 | ||
|
|
7c6eb424d3 | ||
|
|
adba24d207 | ||
|
|
5d7c7d6569 | ||
|
|
d2c8aadf79 | ||
|
|
1ac7f46184 | ||
|
|
05deb747bb | ||
|
|
b505e8517a | ||
|
|
f2e9fa3ef7 | ||
|
|
50a399326f | ||
|
|
1ff88b7aec | ||
|
|
825d3ce386 | ||
|
|
92aa6d6883 | ||
|
|
b2a4db425b | ||
|
|
de49cdbe9d | ||
|
|
9f9c85dda4 | ||
|
|
11734714c2 | ||
|
|
b86ca447ce | ||
|
|
f8c7ba9984 | ||
|
|
76f2bb175d | ||
|
|
f26af78a8a | ||
|
|
bfbecd1174 | ||
|
|
9bd13fe5bb | ||
|
|
459262ac97 | ||
|
|
82ea226c61 | ||
|
|
da4db748fa | ||
|
|
e1eabd7beb | ||
|
|
d81ba7d491 | ||
|
|
5135ed3d4a | ||
|
|
c4b2df872d | ||
|
|
224b5a35f7 | ||
|
|
50ac0e5416 | ||
|
|
e0992d5558 | ||
|
|
5e01315aa1 | ||
|
|
4e4982ab5b | ||
|
|
89e4d86171 | ||
|
|
a1af516259 | ||
|
|
1d64a59547 | ||
|
|
ca7f8b8f31 | ||
|
|
164b03c486 | ||
|
|
e5458d1d88 | ||
|
|
b5e7a2e69d | ||
|
|
2516cafb28 | ||
|
|
fd404bec7e | ||
|
|
fe7866d0ed | ||
|
|
5314b52192 | ||
|
|
13db4e7b9e | ||
|
|
07275b708b | ||
|
|
b85703d11a | ||
|
|
992dc6b486 | ||
|
|
822d66e591 | ||
|
|
8d1ad6378f | ||
|
|
2d1019542a | ||
|
|
b25cac650f | ||
|
|
90a1df305b | ||
|
|
0a6b4b82e9 | ||
|
|
1704c47ba8 | ||
|
|
b76e9cedb3 | ||
|
|
48c88e088c | ||
|
|
a831c2ea90 | ||
|
|
be13a6e525 | ||
|
|
8a3da4c68c | ||
|
|
4d37d4a77c | ||
|
|
7d3b98be4c | ||
|
|
2b3e43e247 | ||
|
|
f60ef66371 | ||
|
|
25836db6be | ||
|
|
587021cd9f | ||
|
|
580ce00782 | ||
|
|
2f1a299c50 | ||
|
|
f6ca640b12 | ||
|
|
3ce2933693 | ||
|
|
c200096c03 | ||
|
|
6d3e7424bf | ||
|
|
5c6d2ef9d1 | ||
|
|
460eb9c50e | ||
|
|
9fd03a1696 | ||
|
|
55937202b7 | ||
|
|
1e4fca9a87 | ||
|
|
49b4ceaedf | ||
|
|
d711839760 | ||
|
|
48732becfe | ||
|
|
6440c45ff3 | ||
|
|
ef6342bd07 | ||
|
|
e183bb8c9b | ||
|
|
7695f5a0a7 | ||
|
|
cb7cc448c0 | ||
|
|
63be30e3e0 | ||
|
|
43cf982ac3 | ||
|
|
7e82397441 | ||
|
|
66c4afd828 | ||
|
|
0e0ce898f6 | ||
|
|
a6125983ab | ||
|
|
8f84770acd | ||
|
|
62b58c0936 | ||
|
|
8f53dc44a0 | ||
|
|
1cddfdc52b | ||
|
|
cea4b857f0 | ||
|
|
ffcd62c289 | ||
|
|
a1c5bd82ec | ||
|
|
5da42f2b9b | ||
|
|
1155ecef29 | ||
|
|
96623ab5c6 | ||
|
|
7e798d725e | ||
|
|
8420a4d063 | ||
|
|
b5e9a641f5 | ||
|
|
c220d9efc8 | ||
|
|
81e0195998 | ||
|
|
f1e2d4a9a2 | ||
|
|
3157158f76 | ||
|
|
16d4535abc | ||
|
|
2a5e5477bc | ||
|
|
e251986cbe | ||
|
|
f0ad6f8c51 | ||
|
|
70b2340909 | ||
|
|
115add4387 | ||
|
|
c4b6c5c7c9 | ||
|
|
c7dcf0b31e | ||
|
|
298d9c0e89 | ||
|
|
a416623436 | ||
|
|
b8ed0f15d4 | ||
|
|
22b22b7d5c | ||
|
|
1f6b90ed8d | ||
|
|
a3e9642116 | ||
|
|
43aebb7db4 | ||
|
|
061a17abd3 | ||
|
|
d380fc1614 | ||
|
|
ad26f15a06 | ||
|
|
aeaf905e22 | ||
|
|
97d9c79e92 | ||
|
|
f62f553d46 | ||
|
|
989a01c261 | ||
|
|
05e2243e80 | ||
|
|
4080efeb01 | ||
|
|
fc61aff41b | ||
|
|
fe0918bb65 | ||
|
|
b99ba3df09 | ||
|
|
7356a44443 | ||
|
|
a0c830f488 | ||
|
|
a6ca61d427 | ||
|
|
d8657ff76f | ||
|
|
5770293d25 | ||
|
|
0647d9251f | ||
|
|
be5c1ae862 | ||
|
|
bfd973ece3 | ||
|
|
1e8fe57e5c | ||
|
|
f14a2d8382 | ||
|
|
5fff2e576f | ||
|
|
f2e8dbcc00 | ||
|
|
8f97a15d1c | ||
|
|
47304e07dc | ||
|
|
565a4c5944 | ||
|
|
2ebe6fefbe | ||
|
|
5f2a7f7c4a | ||
|
|
30389593c2 | ||
|
|
d4ada3574e | ||
|
|
e1bd953f45 | ||
|
|
98a60600b2 | ||
|
|
e325a21a1f | ||
|
|
3df4f81dfe | ||
|
|
31b532a1f2 | ||
|
|
daef791100 | ||
|
|
a6bcaf71fc | ||
|
|
4f04be6add | ||
|
|
8dc5930511 | ||
|
|
b4daacb4ec | ||
|
|
6a7d3a0a09 | ||
|
|
c646d76f67 | ||
|
|
07b47084ba | ||
|
|
4f547d6d2c | ||
|
|
2eae7d507c | ||
|
|
1cdf69c57e | ||
|
|
b6cd135ac2 | ||
|
|
befcac11a0 | ||
|
|
7f71cee020 | ||
|
|
db5f248204 | ||
|
|
871a8929bc | ||
|
|
edebb65170 | ||
|
|
f640e42ffa | ||
|
|
59f63c8f0f | ||
|
|
bfbb5a1bb1 | ||
|
|
051d6b450c | ||
|
|
67685a541d | ||
|
|
964b5493a4 | ||
|
|
3955b20703 | ||
|
|
f1042989c1 | ||
|
|
e2884db36a | ||
|
|
2c646fe42c | ||
|
|
693f060040 | ||
|
|
3bec830a59 | ||
|
|
7d0f6f0c45 | ||
|
|
26bafe7028 | ||
|
|
0cd2810379 | ||
|
|
0f7247f88e | ||
|
|
2dc4970e08 | ||
|
|
4f08e58655 | ||
|
|
dcbf7394ab | ||
|
|
c40f327a16 | ||
|
|
81bf0943ea | ||
|
|
b79f9e302d | ||
|
|
bc83b4b06c | ||
|
|
8ef5af1942 | ||
|
|
6929b41a21 |
17
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
17
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
@@ -2,6 +2,13 @@ name: Broken site
|
|||||||
description: Report broken or misfunctioning site
|
description: Report broken or misfunctioning site
|
||||||
labels: [triage, site-bug]
|
labels: [triage, site-bug]
|
||||||
body:
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
||||||
|
description: Fill all fields even if you think it is irrelevant for the issue
|
||||||
|
options:
|
||||||
|
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
||||||
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@@ -11,11 +18,11 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting a broken site
|
- label: I'm reporting a broken site
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2023.01.02** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
@@ -55,7 +62,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2023.01.02 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@@ -63,8 +70,8 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.07.18, Current version: 2022.07.18
|
Latest version: 2023.01.02, Current version: 2023.01.02
|
||||||
yt-dlp is up to date (2022.07.18)
|
yt-dlp is up to date (2023.01.02)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
|||||||
@@ -2,6 +2,13 @@ name: Site support request
|
|||||||
description: Request support for a new site
|
description: Request support for a new site
|
||||||
labels: [triage, site-request]
|
labels: [triage, site-request]
|
||||||
body:
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
||||||
|
description: Fill all fields even if you think it is irrelevant for the issue
|
||||||
|
options:
|
||||||
|
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
||||||
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@@ -11,11 +18,11 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting a new site support request
|
- label: I'm reporting a new site support request
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2023.01.02** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that none of provided URLs [violate any copyrights](https://github.com/ytdl-org/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free) or contain any [DRM](https://en.wikipedia.org/wiki/Digital_rights_management) to the best of my knowledge
|
- label: I've checked that none of provided URLs [violate any copyrights](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy) or contain any [DRM](https://en.wikipedia.org/wiki/Digital_rights_management) to the best of my knowledge
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
@@ -67,7 +74,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2023.01.02 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@@ -75,8 +82,8 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.07.18, Current version: 2022.07.18
|
Latest version: 2023.01.02, Current version: 2023.01.02
|
||||||
yt-dlp is up to date (2022.07.18)
|
yt-dlp is up to date (2023.01.02)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
|||||||
@@ -2,6 +2,13 @@ name: Site feature request
|
|||||||
description: Request a new functionality for a supported site
|
description: Request a new functionality for a supported site
|
||||||
labels: [triage, site-enhancement]
|
labels: [triage, site-enhancement]
|
||||||
body:
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
||||||
|
description: Fill all fields even if you think it is irrelevant for the issue
|
||||||
|
options:
|
||||||
|
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
||||||
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@@ -11,7 +18,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm requesting a site-specific feature
|
- label: I'm requesting a site-specific feature
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2023.01.02** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
@@ -63,7 +70,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2023.01.02 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@@ -71,8 +78,8 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.07.18, Current version: 2022.07.18
|
Latest version: 2023.01.02, Current version: 2023.01.02
|
||||||
yt-dlp is up to date (2022.07.18)
|
yt-dlp is up to date (2023.01.02)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
|||||||
19
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
19
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
@@ -2,6 +2,13 @@ name: Bug report
|
|||||||
description: Report a bug unrelated to any particular site or extractor
|
description: Report a bug unrelated to any particular site or extractor
|
||||||
labels: [triage, bug]
|
labels: [triage, bug]
|
||||||
body:
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
||||||
|
description: Fill all fields even if you think it is irrelevant for the issue
|
||||||
|
options:
|
||||||
|
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
||||||
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@@ -11,11 +18,11 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting a bug unrelated to a specific site
|
- label: I'm reporting a bug unrelated to a specific site
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2023.01.02** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
@@ -29,7 +36,7 @@ body:
|
|||||||
placeholder: Provide any additional information, any suggested solutions, and as much context and examples as possible
|
placeholder: Provide any additional information, any suggested solutions, and as much context and examples as possible
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: verbose
|
id: verbose
|
||||||
attributes:
|
attributes:
|
||||||
label: Provide verbose output that clearly demonstrates the problem
|
label: Provide verbose output that clearly demonstrates the problem
|
||||||
@@ -48,7 +55,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2023.01.02 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@@ -56,8 +63,8 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.07.18, Current version: 2022.07.18
|
Latest version: 2023.01.02, Current version: 2023.01.02
|
||||||
yt-dlp is up to date (2022.07.18)
|
yt-dlp is up to date (2023.01.02)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
|||||||
15
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
15
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
@@ -2,6 +2,13 @@ name: Feature request
|
|||||||
description: Request a new functionality unrelated to any particular site or extractor
|
description: Request a new functionality unrelated to any particular site or extractor
|
||||||
labels: [triage, enhancement]
|
labels: [triage, enhancement]
|
||||||
body:
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
||||||
|
description: Fill all fields even if you think it is irrelevant for the issue
|
||||||
|
options:
|
||||||
|
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
||||||
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@@ -13,7 +20,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2023.01.02** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
@@ -44,7 +51,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2023.01.02 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@@ -52,7 +59,7 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.07.18, Current version: 2022.07.18
|
Latest version: 2023.01.02, Current version: 2023.01.02
|
||||||
yt-dlp is up to date (2022.07.18)
|
yt-dlp is up to date (2023.01.02)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
|
|||||||
17
.github/ISSUE_TEMPLATE/6_question.yml
vendored
17
.github/ISSUE_TEMPLATE/6_question.yml
vendored
@@ -2,12 +2,19 @@ name: Ask question
|
|||||||
description: Ask yt-dlp related question
|
description: Ask yt-dlp related question
|
||||||
labels: [question]
|
labels: [question]
|
||||||
body:
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
||||||
|
description: Fill all fields even if you think it is irrelevant for the issue
|
||||||
|
options:
|
||||||
|
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
||||||
|
required: true
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
### Make sure you are **only** asking a question and not reporting a bug or requesting a feature.
|
### Make sure you are **only** asking a question and not reporting a bug or requesting a feature.
|
||||||
If your question contains "isn't working" or "can you add", this is most likely the wrong template.
|
If your question contains "isn't working" or "can you add", this is most likely the wrong template.
|
||||||
If you are in doubt whether this is the right template, **use another template**!
|
If you are in doubt whether this is the right template, **USE ANOTHER TEMPLATE**!
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@@ -19,7 +26,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2023.01.02** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
@@ -50,7 +57,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2023.01.02 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@@ -58,7 +65,7 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.07.18, Current version: 2022.07.18
|
Latest version: 2023.01.02, Current version: 2023.01.02
|
||||||
yt-dlp is up to date (2022.07.18)
|
yt-dlp is up to date (2023.01.02)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ name: Broken site
|
|||||||
description: Report broken or misfunctioning site
|
description: Report broken or misfunctioning site
|
||||||
labels: [triage, site-bug]
|
labels: [triage, site-bug]
|
||||||
body:
|
body:
|
||||||
|
%(no_skip)s
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@@ -15,7 +16,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ name: Site support request
|
|||||||
description: Request support for a new site
|
description: Request support for a new site
|
||||||
labels: [triage, site-request]
|
labels: [triage, site-request]
|
||||||
body:
|
body:
|
||||||
|
%(no_skip)s
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@@ -15,7 +16,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that none of provided URLs [violate any copyrights](https://github.com/ytdl-org/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free) or contain any [DRM](https://en.wikipedia.org/wiki/Digital_rights_management) to the best of my knowledge
|
- label: I've checked that none of provided URLs [violate any copyrights](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy) or contain any [DRM](https://en.wikipedia.org/wiki/Digital_rights_management) to the best of my knowledge
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ name: Site feature request
|
|||||||
description: Request a new functionality for a supported site
|
description: Request a new functionality for a supported site
|
||||||
labels: [triage, site-enhancement]
|
labels: [triage, site-enhancement]
|
||||||
body:
|
body:
|
||||||
|
%(no_skip)s
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
5
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
5
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
@@ -2,6 +2,7 @@ name: Bug report
|
|||||||
description: Report a bug unrelated to any particular site or extractor
|
description: Report a bug unrelated to any particular site or extractor
|
||||||
labels: [triage, bug]
|
labels: [triage, bug]
|
||||||
body:
|
body:
|
||||||
|
%(no_skip)s
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@@ -15,7 +16,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
@@ -29,4 +30,4 @@ body:
|
|||||||
placeholder: Provide any additional information, any suggested solutions, and as much context and examples as possible
|
placeholder: Provide any additional information, any suggested solutions, and as much context and examples as possible
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
%(verbose)s
|
%(verbose)s
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ name: Feature request
|
|||||||
description: Request a new functionality unrelated to any particular site or extractor
|
description: Request a new functionality unrelated to any particular site or extractor
|
||||||
labels: [triage, enhancement]
|
labels: [triage, enhancement]
|
||||||
body:
|
body:
|
||||||
|
%(no_skip)s
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
3
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
3
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
@@ -2,12 +2,13 @@ name: Ask question
|
|||||||
description: Ask yt-dlp related question
|
description: Ask yt-dlp related question
|
||||||
labels: [question]
|
labels: [question]
|
||||||
body:
|
body:
|
||||||
|
%(no_skip)s
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
### Make sure you are **only** asking a question and not reporting a bug or requesting a feature.
|
### Make sure you are **only** asking a question and not reporting a bug or requesting a feature.
|
||||||
If your question contains "isn't working" or "can you add", this is most likely the wrong template.
|
If your question contains "isn't working" or "can you add", this is most likely the wrong template.
|
||||||
If you are in doubt whether this is the right template, **use another template**!
|
If you are in doubt whether this is the right template, **USE ANOTHER TEMPLATE**!
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
27
.github/PULL_REQUEST_TEMPLATE.md
vendored
27
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,3 +1,18 @@
|
|||||||
|
**IMPORTANT**: PRs without the template will be CLOSED
|
||||||
|
|
||||||
|
### Description of your *pull request* and other information
|
||||||
|
|
||||||
|
<!--
|
||||||
|
|
||||||
|
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
ADD DESCRIPTION HERE
|
||||||
|
|
||||||
|
Fixes #
|
||||||
|
|
||||||
|
|
||||||
<details open><summary>Template</summary> <!-- OPEN is intentional -->
|
<details open><summary>Template</summary> <!-- OPEN is intentional -->
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
@@ -25,16 +40,4 @@
|
|||||||
- [ ] Core bug fix/improvement
|
- [ ] Core bug fix/improvement
|
||||||
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
|
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
|
||||||
|
|
||||||
### Description of your *pull request* and other information
|
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
DESCRIPTION
|
|
||||||
|
|
||||||
Fixes #
|
|
||||||
|
|||||||
485
.github/workflows/build.yml
vendored
485
.github/workflows/build.yml
vendored
@@ -1,19 +1,22 @@
|
|||||||
name: Build
|
name: Build
|
||||||
on: workflow_dispatch
|
on: workflow_dispatch
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
create_release:
|
prepare:
|
||||||
|
permissions:
|
||||||
|
contents: write # for push_release
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
version_suffix: ${{ steps.version_suffix.outputs.version_suffix }}
|
version_suffix: ${{ steps.version_suffix.outputs.version_suffix }}
|
||||||
ytdlp_version: ${{ steps.bump_version.outputs.ytdlp_version }}
|
ytdlp_version: ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
head_sha: ${{ steps.push_release.outputs.head_sha }}
|
||||||
release_id: ${{ steps.create_release.outputs.id }}
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: actions/setup-python@v2
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
|
|
||||||
@@ -22,7 +25,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
PUSH_VERSION_COMMIT: ${{ secrets.PUSH_VERSION_COMMIT }}
|
PUSH_VERSION_COMMIT: ${{ secrets.PUSH_VERSION_COMMIT }}
|
||||||
if: "env.PUSH_VERSION_COMMIT == ''"
|
if: "env.PUSH_VERSION_COMMIT == ''"
|
||||||
run: echo ::set-output name=version_suffix::$(date -u +"%H%M%S")
|
run: echo "version_suffix=$(date -u +"%H%M%S")" >> "$GITHUB_OUTPUT"
|
||||||
- name: Bump version
|
- name: Bump version
|
||||||
id: bump_version
|
id: bump_version
|
||||||
run: |
|
run: |
|
||||||
@@ -37,125 +40,65 @@ jobs:
|
|||||||
git add -u
|
git add -u
|
||||||
git commit -m "[version] update" -m "Created by: ${{ github.event.sender.login }}" -m ":ci skip all :ci run dl"
|
git commit -m "[version] update" -m "Created by: ${{ github.event.sender.login }}" -m ":ci skip all :ci run dl"
|
||||||
git push origin --force ${{ github.event.ref }}:release
|
git push origin --force ${{ github.event.ref }}:release
|
||||||
echo ::set-output name=head_sha::$(git rev-parse HEAD)
|
echo "head_sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT"
|
||||||
- name: Update master
|
- name: Update master
|
||||||
env:
|
env:
|
||||||
PUSH_VERSION_COMMIT: ${{ secrets.PUSH_VERSION_COMMIT }}
|
PUSH_VERSION_COMMIT: ${{ secrets.PUSH_VERSION_COMMIT }}
|
||||||
if: "env.PUSH_VERSION_COMMIT != ''"
|
if: "env.PUSH_VERSION_COMMIT != ''"
|
||||||
run: git push origin ${{ github.event.ref }}
|
run: git push origin ${{ github.event.ref }}
|
||||||
- name: Get Changelog
|
|
||||||
run: |
|
|
||||||
changelog=$(grep -oPz '(?s)(?<=### ${{ steps.bump_version.outputs.ytdlp_version }}\n{2}).+?(?=\n{2,3}###)' Changelog.md) || true
|
|
||||||
echo "changelog<<EOF" >> $GITHUB_ENV
|
|
||||||
echo "$changelog" >> $GITHUB_ENV
|
|
||||||
echo "EOF" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Create Release
|
|
||||||
id: create_release
|
|
||||||
uses: actions/create-release@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
tag_name: ${{ steps.bump_version.outputs.ytdlp_version }}
|
|
||||||
release_name: yt-dlp ${{ steps.bump_version.outputs.ytdlp_version }}
|
|
||||||
commitish: ${{ steps.push_release.outputs.head_sha }}
|
|
||||||
draft: true
|
|
||||||
prerelease: false
|
|
||||||
body: |
|
|
||||||
#### [A description of the various files]((https://github.com/yt-dlp/yt-dlp#release-files)) are in the README
|
|
||||||
|
|
||||||
---
|
|
||||||
<details open><summary><h3>Changelog</summary>
|
|
||||||
<p>
|
|
||||||
|
|
||||||
${{ env.changelog }}
|
|
||||||
|
|
||||||
</p>
|
|
||||||
</details>
|
|
||||||
|
|
||||||
|
|
||||||
build_unix:
|
build_unix:
|
||||||
needs: create_release
|
needs: prepare
|
||||||
runs-on: ubuntu-18.04 # Standalone executable should be built on minimum supported OS
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
|
||||||
sha256_bin: ${{ steps.get_sha.outputs.sha256_bin }}
|
|
||||||
sha512_bin: ${{ steps.get_sha.outputs.sha512_bin }}
|
|
||||||
sha256_tar: ${{ steps.get_sha.outputs.sha256_tar }}
|
|
||||||
sha512_tar: ${{ steps.get_sha.outputs.sha512_tar }}
|
|
||||||
sha256_linux: ${{ steps.get_sha.outputs.sha256_linux }}
|
|
||||||
sha512_linux: ${{ steps.get_sha.outputs.sha512_linux }}
|
|
||||||
sha256_linux_zip: ${{ steps.get_sha.outputs.sha256_linux_zip }}
|
|
||||||
sha512_linux_zip: ${{ steps.get_sha.outputs.sha512_linux_zip }}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-python@v2
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
|
- uses: conda-incubator/setup-miniconda@v2
|
||||||
|
with:
|
||||||
|
miniforge-variant: Mambaforge
|
||||||
|
use-mamba: true
|
||||||
|
channels: conda-forge
|
||||||
|
auto-update-conda: true
|
||||||
|
activate-environment: ''
|
||||||
|
auto-activate-base: false
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get -y install zip pandoc man
|
sudo apt-get -y install zip pandoc man sed
|
||||||
python -m pip install --upgrade pip setuptools wheel twine
|
python -m pip install -U pip setuptools wheel twine
|
||||||
python -m pip install Pyinstaller -r requirements.txt
|
python -m pip install -U Pyinstaller -r requirements.txt
|
||||||
|
reqs=$(mktemp)
|
||||||
|
echo -e 'python=3.10.*\npyinstaller' >$reqs
|
||||||
|
sed 's/^brotli.*/brotli-python/' <requirements.txt >>$reqs
|
||||||
|
mamba create -n build --file $reqs
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py ${{ needs.create_release.outputs.version_suffix }}
|
python devscripts/update-version.py ${{ needs.prepare.outputs.version_suffix }}
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build Unix executables
|
- name: Build Unix platform-independent binary
|
||||||
run: |
|
run: |
|
||||||
make all tar
|
make all tar
|
||||||
|
- name: Build Unix standalone binary
|
||||||
|
shell: bash -l {0}
|
||||||
|
run: |
|
||||||
|
unset LD_LIBRARY_PATH # Harmful; set by setup-python
|
||||||
|
conda activate build
|
||||||
python pyinst.py --onedir
|
python pyinst.py --onedir
|
||||||
(cd ./dist/yt-dlp_linux && zip -r ../yt-dlp_linux.zip .)
|
(cd ./dist/yt-dlp_linux && zip -r ../yt-dlp_linux.zip .)
|
||||||
python pyinst.py
|
python pyinst.py
|
||||||
- name: Get SHA2-SUMS
|
|
||||||
id: get_sha
|
|
||||||
run: |
|
|
||||||
echo "::set-output name=sha256_bin::$(sha256sum yt-dlp | awk '{print $1}')"
|
|
||||||
echo "::set-output name=sha512_bin::$(sha512sum yt-dlp | awk '{print $1}')"
|
|
||||||
echo "::set-output name=sha256_tar::$(sha256sum yt-dlp.tar.gz | awk '{print $1}')"
|
|
||||||
echo "::set-output name=sha512_tar::$(sha512sum yt-dlp.tar.gz | awk '{print $1}')"
|
|
||||||
echo "::set-output name=sha256_linux::$(sha256sum dist/yt-dlp_linux | awk '{print $1}')"
|
|
||||||
echo "::set-output name=sha512_linux::$(sha512sum dist/yt-dlp_linux | awk '{print $1}')"
|
|
||||||
echo "::set-output name=sha256_linux_zip::$(sha256sum dist/yt-dlp_linux.zip | awk '{print $1}')"
|
|
||||||
echo "::set-output name=sha512_linux_zip::$(sha512sum dist/yt-dlp_linux.zip | awk '{print $1}')"
|
|
||||||
|
|
||||||
- name: Upload zip binary
|
- name: Upload artifacts
|
||||||
uses: actions/upload-release-asset@v1
|
uses: actions/upload-artifact@v3
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
with:
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
path: |
|
||||||
asset_path: ./yt-dlp
|
yt-dlp
|
||||||
asset_name: yt-dlp
|
yt-dlp.tar.gz
|
||||||
asset_content_type: application/octet-stream
|
dist/yt-dlp_linux
|
||||||
- name: Upload Source tar
|
dist/yt-dlp_linux.zip
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./yt-dlp.tar.gz
|
|
||||||
asset_name: yt-dlp.tar.gz
|
|
||||||
asset_content_type: application/gzip
|
|
||||||
- name: Upload standalone binary
|
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./dist/yt-dlp_linux
|
|
||||||
asset_name: yt-dlp_linux
|
|
||||||
asset_content_type: application/octet-stream
|
|
||||||
- name: Upload onedir binary
|
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./dist/yt-dlp_linux.zip
|
|
||||||
asset_name: yt-dlp_linux.zip
|
|
||||||
asset_content_type: application/zip
|
|
||||||
|
|
||||||
- name: Build and publish on PyPi
|
- name: Build and publish on PyPi
|
||||||
env:
|
env:
|
||||||
@@ -164,6 +107,7 @@ jobs:
|
|||||||
if: "env.TWINE_PASSWORD != ''"
|
if: "env.TWINE_PASSWORD != ''"
|
||||||
run: |
|
run: |
|
||||||
rm -rf dist/*
|
rm -rf dist/*
|
||||||
|
python devscripts/set-variant.py pip -M "You installed yt-dlp with pip or using the wheel from PyPi; Use that to update"
|
||||||
python setup.py sdist bdist_wheel
|
python setup.py sdist bdist_wheel
|
||||||
twine upload dist/*
|
twine upload dist/*
|
||||||
|
|
||||||
@@ -180,24 +124,62 @@ jobs:
|
|||||||
if: "env.BREW_TOKEN != ''"
|
if: "env.BREW_TOKEN != ''"
|
||||||
run: |
|
run: |
|
||||||
git clone git@github.com:yt-dlp/homebrew-taps taps/
|
git clone git@github.com:yt-dlp/homebrew-taps taps/
|
||||||
python devscripts/update-formulae.py taps/Formula/yt-dlp.rb "${{ needs.create_release.outputs.ytdlp_version }}"
|
python devscripts/update-formulae.py taps/Formula/yt-dlp.rb "${{ needs.prepare.outputs.ytdlp_version }}"
|
||||||
git -C taps/ config user.name github-actions
|
git -C taps/ config user.name github-actions
|
||||||
git -C taps/ config user.email github-actions@example.com
|
git -C taps/ config user.email github-actions@example.com
|
||||||
git -C taps/ commit -am 'yt-dlp: ${{ needs.create_release.outputs.ytdlp_version }}'
|
git -C taps/ commit -am 'yt-dlp: ${{ needs.prepare.outputs.ytdlp_version }}'
|
||||||
git -C taps/ push
|
git -C taps/ push
|
||||||
|
|
||||||
|
|
||||||
|
build_linux_arm:
|
||||||
|
permissions:
|
||||||
|
packages: write # for Creating cache
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: prepare
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
architecture:
|
||||||
|
- armv7
|
||||||
|
- aarch64
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
path: ./repo
|
||||||
|
- name: Virtualized Install, Prepare & Build
|
||||||
|
uses: yt-dlp/run-on-arch-action@v2
|
||||||
|
with:
|
||||||
|
githubToken: ${{ github.token }} # To cache image
|
||||||
|
arch: ${{ matrix.architecture }}
|
||||||
|
distro: ubuntu18.04 # Standalone executable should be built on minimum supported OS
|
||||||
|
dockerRunArgs: --volume "${PWD}/repo:/repo"
|
||||||
|
install: | # Installing Python 3.10 from the Deadsnakes repo raises errors
|
||||||
|
apt update
|
||||||
|
apt -y install zlib1g-dev python3.8 python3.8-dev python3.8-distutils python3-pip
|
||||||
|
python3.8 -m pip install -U pip setuptools wheel
|
||||||
|
# Cannot access requirements.txt from the repo directory at this stage
|
||||||
|
python3.8 -m pip install -U Pyinstaller mutagen pycryptodomex websockets brotli certifi
|
||||||
|
|
||||||
|
run: |
|
||||||
|
cd repo
|
||||||
|
python3.8 -m pip install -U Pyinstaller -r requirements.txt # Cached version may be out of date
|
||||||
|
python3.8 devscripts/update-version.py ${{ needs.prepare.outputs.version_suffix }}
|
||||||
|
python3.8 devscripts/make_lazy_extractors.py
|
||||||
|
python3.8 pyinst.py
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
path: | # run-on-arch-action designates armv7l as armv7
|
||||||
|
repo/dist/yt-dlp_linux_${{ (matrix.architecture == 'armv7' && 'armv7l') || matrix.architecture }}
|
||||||
|
|
||||||
|
|
||||||
build_macos:
|
build_macos:
|
||||||
runs-on: macos-11
|
runs-on: macos-11
|
||||||
needs: create_release
|
needs: prepare
|
||||||
outputs:
|
|
||||||
sha256_macos: ${{ steps.get_sha.outputs.sha256_macos }}
|
|
||||||
sha512_macos: ${{ steps.get_sha.outputs.sha512_macos }}
|
|
||||||
sha256_macos_zip: ${{ steps.get_sha.outputs.sha256_macos_zip }}
|
|
||||||
sha512_macos_zip: ${{ steps.get_sha.outputs.sha512_macos_zip }}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
# NB: In order to create a universal2 application, the version of python3 in /usr/bin has to be used
|
# NB: In order to create a universal2 application, the version of python3 in /usr/bin has to be used
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
@@ -206,50 +188,28 @@ jobs:
|
|||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
/usr/bin/python3 devscripts/update-version.py ${{ needs.create_release.outputs.version_suffix }}
|
/usr/bin/python3 devscripts/update-version.py ${{ needs.prepare.outputs.version_suffix }}
|
||||||
/usr/bin/python3 devscripts/make_lazy_extractors.py
|
/usr/bin/python3 devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
/usr/bin/python3 pyinst.py --target-architecture universal2 --onedir
|
/usr/bin/python3 pyinst.py --target-architecture universal2 --onedir
|
||||||
(cd ./dist/yt-dlp_macos && zip -r ../yt-dlp_macos.zip .)
|
(cd ./dist/yt-dlp_macos && zip -r ../yt-dlp_macos.zip .)
|
||||||
/usr/bin/python3 pyinst.py --target-architecture universal2
|
/usr/bin/python3 pyinst.py --target-architecture universal2
|
||||||
- name: Get SHA2-SUMS
|
|
||||||
id: get_sha
|
|
||||||
run: |
|
|
||||||
echo "::set-output name=sha256_macos::$(sha256sum dist/yt-dlp_macos | awk '{print $1}')"
|
|
||||||
echo "::set-output name=sha512_macos::$(sha512sum dist/yt-dlp_macos | awk '{print $1}')"
|
|
||||||
echo "::set-output name=sha256_macos_zip::$(sha256sum dist/yt-dlp_macos.zip | awk '{print $1}')"
|
|
||||||
echo "::set-output name=sha512_macos_zip::$(sha512sum dist/yt-dlp_macos.zip | awk '{print $1}')"
|
|
||||||
|
|
||||||
- name: Upload standalone binary
|
- name: Upload artifacts
|
||||||
uses: actions/upload-release-asset@v1
|
uses: actions/upload-artifact@v3
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
with:
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
path: |
|
||||||
asset_path: ./dist/yt-dlp_macos
|
dist/yt-dlp_macos
|
||||||
asset_name: yt-dlp_macos
|
dist/yt-dlp_macos.zip
|
||||||
asset_content_type: application/octet-stream
|
|
||||||
- name: Upload onedir binary
|
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./dist/yt-dlp_macos.zip
|
|
||||||
asset_name: yt-dlp_macos.zip
|
|
||||||
asset_content_type: application/zip
|
|
||||||
|
|
||||||
|
|
||||||
build_macos_legacy:
|
build_macos_legacy:
|
||||||
runs-on: macos-latest
|
runs-on: macos-latest
|
||||||
needs: create_release
|
needs: prepare
|
||||||
outputs:
|
|
||||||
sha256_macos_legacy: ${{ steps.get_sha.outputs.sha256_macos_legacy }}
|
|
||||||
sha512_macos_legacy: ${{ steps.get_sha.outputs.sha512_macos_legacy }}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Install Python
|
- name: Install Python
|
||||||
# We need the official Python, because the GA ones only support newer macOS versions
|
# We need the official Python, because the GA ones only support newer macOS versions
|
||||||
env:
|
env:
|
||||||
@@ -269,52 +229,37 @@ jobs:
|
|||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python3 devscripts/update-version.py ${{ needs.create_release.outputs.version_suffix }}
|
python3 devscripts/update-version.py ${{ needs.prepare.outputs.version_suffix }}
|
||||||
python3 devscripts/make_lazy_extractors.py
|
python3 devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
python3 pyinst.py
|
python3 pyinst.py
|
||||||
- name: Get SHA2-SUMS
|
mv dist/yt-dlp_macos dist/yt-dlp_macos_legacy
|
||||||
id: get_sha
|
|
||||||
run: |
|
|
||||||
echo "::set-output name=sha256_macos_legacy::$(sha256sum dist/yt-dlp_macos | awk '{print $1}')"
|
|
||||||
echo "::set-output name=sha512_macos_legacy::$(sha512sum dist/yt-dlp_macos | awk '{print $1}')"
|
|
||||||
|
|
||||||
- name: Upload standalone binary
|
- name: Upload artifacts
|
||||||
uses: actions/upload-release-asset@v1
|
uses: actions/upload-artifact@v3
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
with:
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
path: |
|
||||||
asset_path: ./dist/yt-dlp_macos
|
dist/yt-dlp_macos_legacy
|
||||||
asset_name: yt-dlp_macos_legacy
|
|
||||||
asset_content_type: application/octet-stream
|
|
||||||
|
|
||||||
|
|
||||||
build_windows:
|
build_windows:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
needs: create_release
|
needs: prepare
|
||||||
outputs:
|
|
||||||
sha256_win: ${{ steps.get_sha.outputs.sha256_win }}
|
|
||||||
sha512_win: ${{ steps.get_sha.outputs.sha512_win }}
|
|
||||||
sha256_py2exe: ${{ steps.get_sha.outputs.sha256_py2exe }}
|
|
||||||
sha512_py2exe: ${{ steps.get_sha.outputs.sha512_py2exe }}
|
|
||||||
sha256_win_zip: ${{ steps.get_sha.outputs.sha256_win_zip }}
|
|
||||||
sha512_win_zip: ${{ steps.get_sha.outputs.sha512_win_zip }}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-python@v2
|
- uses: actions/setup-python@v4
|
||||||
with: # 3.8 is used for Win7 support
|
with: # 3.8 is used for Win7 support
|
||||||
python-version: '3.8'
|
python-version: '3.8'
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
||||||
python -m pip install --upgrade pip setuptools wheel py2exe
|
python -m pip install -U pip setuptools wheel py2exe
|
||||||
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-4.10-py3-none-any.whl" -r requirements.txt
|
pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-5.3-py3-none-any.whl" -r requirements.txt
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py ${{ needs.create_release.outputs.version_suffix }}
|
python devscripts/update-version.py ${{ needs.prepare.outputs.version_suffix }}
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@@ -323,154 +268,126 @@ jobs:
|
|||||||
python pyinst.py
|
python pyinst.py
|
||||||
python pyinst.py --onedir
|
python pyinst.py --onedir
|
||||||
Compress-Archive -Path ./dist/yt-dlp/* -DestinationPath ./dist/yt-dlp_win.zip
|
Compress-Archive -Path ./dist/yt-dlp/* -DestinationPath ./dist/yt-dlp_win.zip
|
||||||
- name: Get SHA2-SUMS
|
|
||||||
id: get_sha
|
|
||||||
run: |
|
|
||||||
echo "::set-output name=sha256_py2exe::$((Get-FileHash dist\yt-dlp_min.exe -Algorithm SHA256).Hash.ToLower())"
|
|
||||||
echo "::set-output name=sha512_py2exe::$((Get-FileHash dist\yt-dlp_min.exe -Algorithm SHA512).Hash.ToLower())"
|
|
||||||
echo "::set-output name=sha256_win::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA256).Hash.ToLower())"
|
|
||||||
echo "::set-output name=sha512_win::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA512).Hash.ToLower())"
|
|
||||||
echo "::set-output name=sha256_win_zip::$((Get-FileHash dist\yt-dlp_win.zip -Algorithm SHA256).Hash.ToLower())"
|
|
||||||
echo "::set-output name=sha512_win_zip::$((Get-FileHash dist\yt-dlp_win.zip -Algorithm SHA512).Hash.ToLower())"
|
|
||||||
|
|
||||||
- name: Upload py2exe binary
|
- name: Upload artifacts
|
||||||
uses: actions/upload-release-asset@v1
|
uses: actions/upload-artifact@v3
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
with:
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
path: |
|
||||||
asset_path: ./dist/yt-dlp_min.exe
|
dist/yt-dlp.exe
|
||||||
asset_name: yt-dlp_min.exe
|
dist/yt-dlp_min.exe
|
||||||
asset_content_type: application/vnd.microsoft.portable-executable
|
dist/yt-dlp_win.zip
|
||||||
- name: Upload standalone binary
|
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./dist/yt-dlp.exe
|
|
||||||
asset_name: yt-dlp.exe
|
|
||||||
asset_content_type: application/vnd.microsoft.portable-executable
|
|
||||||
- name: Upload onedir binary
|
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./dist/yt-dlp_win.zip
|
|
||||||
asset_name: yt-dlp_win.zip
|
|
||||||
asset_content_type: application/zip
|
|
||||||
|
|
||||||
|
|
||||||
build_windows32:
|
build_windows32:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
needs: create_release
|
needs: prepare
|
||||||
outputs:
|
|
||||||
sha256_win32: ${{ steps.get_sha.outputs.sha256_win32 }}
|
|
||||||
sha512_win32: ${{ steps.get_sha.outputs.sha512_win32 }}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-python@v2
|
- uses: actions/setup-python@v4
|
||||||
with: # 3.7 is used for Vista support. See https://github.com/yt-dlp/yt-dlp/issues/390
|
with: # 3.7 is used for Vista support. See https://github.com/yt-dlp/yt-dlp/issues/390
|
||||||
python-version: '3.7'
|
python-version: '3.7'
|
||||||
architecture: 'x86'
|
architecture: 'x86'
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip setuptools wheel
|
python -m pip install -U pip setuptools wheel
|
||||||
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-4.10-py3-none-any.whl" -r requirements.txt
|
pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-5.3-py3-none-any.whl" -r requirements.txt
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py ${{ needs.create_release.outputs.version_suffix }}
|
python devscripts/update-version.py ${{ needs.prepare.outputs.version_suffix }}
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
python pyinst.py
|
python pyinst.py
|
||||||
- name: Get SHA2-SUMS
|
|
||||||
id: get_sha
|
|
||||||
run: |
|
|
||||||
echo "::set-output name=sha256_win32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA256).Hash.ToLower())"
|
|
||||||
echo "::set-output name=sha512_win32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA512).Hash.ToLower())"
|
|
||||||
|
|
||||||
- name: Upload standalone binary
|
- name: Upload artifacts
|
||||||
uses: actions/upload-release-asset@v1
|
uses: actions/upload-artifact@v3
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
with:
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
path: |
|
||||||
asset_path: ./dist/yt-dlp_x86.exe
|
dist/yt-dlp_x86.exe
|
||||||
asset_name: yt-dlp_x86.exe
|
|
||||||
asset_content_type: application/vnd.microsoft.portable-executable
|
|
||||||
|
|
||||||
|
|
||||||
finish:
|
publish_release:
|
||||||
|
permissions:
|
||||||
|
contents: write # for action-gh-release
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [create_release, build_unix, build_windows, build_windows32, build_macos, build_macos_legacy]
|
needs: [prepare, build_unix, build_linux_arm, build_windows, build_windows32, build_macos, build_macos_legacy]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Make SHA2-SUMS files
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
|
||||||
|
- name: Get Changelog
|
||||||
run: |
|
run: |
|
||||||
echo "${{ needs.build_unix.outputs.sha256_bin }} yt-dlp" >> SHA2-256SUMS
|
changelog=$(grep -oPz '(?s)(?<=### ${{ needs.prepare.outputs.ytdlp_version }}\n{2}).+?(?=\n{2,3}###)' Changelog.md) || true
|
||||||
echo "${{ needs.build_unix.outputs.sha256_tar }} yt-dlp.tar.gz" >> SHA2-256SUMS
|
echo "changelog<<EOF" >> $GITHUB_ENV
|
||||||
echo "${{ needs.build_unix.outputs.sha256_linux }} yt-dlp_linux" >> SHA2-256SUMS
|
echo "$changelog" >> $GITHUB_ENV
|
||||||
echo "${{ needs.build_unix.outputs.sha256_linux_zip }} yt-dlp_linux.zip" >> SHA2-256SUMS
|
echo "EOF" >> $GITHUB_ENV
|
||||||
echo "${{ needs.build_windows.outputs.sha256_win }} yt-dlp.exe" >> SHA2-256SUMS
|
|
||||||
echo "${{ needs.build_windows.outputs.sha256_py2exe }} yt-dlp_min.exe" >> SHA2-256SUMS
|
|
||||||
echo "${{ needs.build_windows32.outputs.sha256_win32 }} yt-dlp_x86.exe" >> SHA2-256SUMS
|
|
||||||
echo "${{ needs.build_windows.outputs.sha256_win_zip }} yt-dlp_win.zip" >> SHA2-256SUMS
|
|
||||||
echo "${{ needs.build_macos.outputs.sha256_macos }} yt-dlp_macos" >> SHA2-256SUMS
|
|
||||||
echo "${{ needs.build_macos.outputs.sha256_macos_zip }} yt-dlp_macos.zip" >> SHA2-256SUMS
|
|
||||||
echo "${{ needs.build_macos_legacy.outputs.sha256_macos_legacy }} yt-dlp_macos_legacy" >> SHA2-256SUMS
|
|
||||||
echo "${{ needs.build_unix.outputs.sha512_bin }} yt-dlp" >> SHA2-512SUMS
|
|
||||||
echo "${{ needs.build_unix.outputs.sha512_tar }} yt-dlp.tar.gz" >> SHA2-512SUMS
|
|
||||||
echo "${{ needs.build_unix.outputs.sha512_linux }} yt-dlp_linux" >> SHA2-512SUMS
|
|
||||||
echo "${{ needs.build_unix.outputs.sha512_linux_zip }} yt-dlp_linux.zip" >> SHA2-512SUMS
|
|
||||||
echo "${{ needs.build_windows.outputs.sha512_win }} yt-dlp.exe" >> SHA2-512SUMS
|
|
||||||
echo "${{ needs.build_windows.outputs.sha512_py2exe }} yt-dlp_min.exe" >> SHA2-512SUMS
|
|
||||||
echo "${{ needs.build_windows32.outputs.sha512_win32 }} yt-dlp_x86.exe" >> SHA2-512SUMS
|
|
||||||
echo "${{ needs.build_windows.outputs.sha512_win_zip }} yt-dlp_win.zip" >> SHA2-512SUMS
|
|
||||||
echo "${{ needs.build_macos.outputs.sha512_macos }} yt-dlp_macos" >> SHA2-512SUMS
|
|
||||||
echo "${{ needs.build_macos.outputs.sha512_macos_zip }} yt-dlp_macos.zip" >> SHA2-512SUMS
|
|
||||||
echo "${{ needs.build_macos_legacy.outputs.sha512_macos_legacy }} yt-dlp_macos_legacy" >> SHA2-512SUMS
|
|
||||||
|
|
||||||
- name: Upload SHA2-256SUMS file
|
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./SHA2-256SUMS
|
|
||||||
asset_name: SHA2-256SUMS
|
|
||||||
asset_content_type: text/plain
|
|
||||||
- name: Upload SHA2-512SUMS file
|
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./SHA2-512SUMS
|
|
||||||
asset_name: SHA2-512SUMS
|
|
||||||
asset_content_type: text/plain
|
|
||||||
|
|
||||||
- name: Make Update spec
|
- name: Make Update spec
|
||||||
run: |
|
run: |
|
||||||
echo "# This file is used for regulating self-update" >> _update_spec
|
echo "# This file is used for regulating self-update" >> _update_spec
|
||||||
echo "lock 2022.07.18 .+ Python 3.6" >> _update_spec
|
echo "lock 2022.07.18 .+ Python 3.6" >> _update_spec
|
||||||
- name: Upload update spec
|
- name: Make SHA2-SUMS files
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./_update_spec
|
|
||||||
asset_name: _update_spec
|
|
||||||
asset_content_type: text/plain
|
|
||||||
|
|
||||||
- name: Finalize release
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
run: |
|
||||||
gh api -X PATCH -H "Accept: application/vnd.github.v3+json" \
|
sha256sum artifact/yt-dlp | awk '{print $1 " yt-dlp"}' >> SHA2-256SUMS
|
||||||
/repos/${{ github.repository }}/releases/${{ needs.create_release.outputs.release_id }} \
|
sha256sum artifact/yt-dlp.tar.gz | awk '{print $1 " yt-dlp.tar.gz"}' >> SHA2-256SUMS
|
||||||
-F draft=false
|
sha256sum artifact/yt-dlp.exe | awk '{print $1 " yt-dlp.exe"}' >> SHA2-256SUMS
|
||||||
|
sha256sum artifact/yt-dlp_win.zip | awk '{print $1 " yt-dlp_win.zip"}' >> SHA2-256SUMS
|
||||||
|
sha256sum artifact/yt-dlp_min.exe | awk '{print $1 " yt-dlp_min.exe"}' >> SHA2-256SUMS
|
||||||
|
sha256sum artifact/yt-dlp_x86.exe | awk '{print $1 " yt-dlp_x86.exe"}' >> SHA2-256SUMS
|
||||||
|
sha256sum artifact/yt-dlp_macos | awk '{print $1 " yt-dlp_macos"}' >> SHA2-256SUMS
|
||||||
|
sha256sum artifact/yt-dlp_macos.zip | awk '{print $1 " yt-dlp_macos.zip"}' >> SHA2-256SUMS
|
||||||
|
sha256sum artifact/yt-dlp_macos_legacy | awk '{print $1 " yt-dlp_macos_legacy"}' >> SHA2-256SUMS
|
||||||
|
sha256sum artifact/yt-dlp_linux_armv7l | awk '{print $1 " yt-dlp_linux_armv7l"}' >> SHA2-256SUMS
|
||||||
|
sha256sum artifact/yt-dlp_linux_aarch64 | awk '{print $1 " yt-dlp_linux_aarch64"}' >> SHA2-256SUMS
|
||||||
|
sha256sum artifact/dist/yt-dlp_linux | awk '{print $1 " yt-dlp_linux"}' >> SHA2-256SUMS
|
||||||
|
sha256sum artifact/dist/yt-dlp_linux.zip | awk '{print $1 " yt-dlp_linux.zip"}' >> SHA2-256SUMS
|
||||||
|
sha512sum artifact/yt-dlp | awk '{print $1 " yt-dlp"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/yt-dlp.tar.gz | awk '{print $1 " yt-dlp.tar.gz"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/yt-dlp.exe | awk '{print $1 " yt-dlp.exe"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/yt-dlp_win.zip | awk '{print $1 " yt-dlp_win.zip"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/yt-dlp_min.exe | awk '{print $1 " yt-dlp_min.exe"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/yt-dlp_x86.exe | awk '{print $1 " yt-dlp_x86.exe"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/yt-dlp_macos | awk '{print $1 " yt-dlp_macos"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/yt-dlp_macos.zip | awk '{print $1 " yt-dlp_macos.zip"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/yt-dlp_macos_legacy | awk '{print $1 " yt-dlp_macos_legacy"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/yt-dlp_linux_armv7l | awk '{print $1 " yt-dlp_linux_armv7l"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/yt-dlp_linux_aarch64 | awk '{print $1 " yt-dlp_linux_aarch64"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/dist/yt-dlp_linux | awk '{print $1 " yt-dlp_linux"}' >> SHA2-512SUMS
|
||||||
|
sha512sum artifact/dist/yt-dlp_linux.zip | awk '{print $1 " yt-dlp_linux.zip"}' >> SHA2-512SUMS
|
||||||
|
|
||||||
|
- name: Publish Release
|
||||||
|
uses: yt-dlp/action-gh-release@v1
|
||||||
|
with:
|
||||||
|
tag_name: ${{ needs.prepare.outputs.ytdlp_version }}
|
||||||
|
name: yt-dlp ${{ needs.prepare.outputs.ytdlp_version }}
|
||||||
|
target_commitish: ${{ needs.prepare.outputs.head_sha }}
|
||||||
|
body: |
|
||||||
|
#### [A description of the various files]((https://github.com/yt-dlp/yt-dlp#release-files)) are in the README
|
||||||
|
|
||||||
|
---
|
||||||
|
<details open><summary><h3>Changelog</summary>
|
||||||
|
<p>
|
||||||
|
|
||||||
|
${{ env.changelog }}
|
||||||
|
|
||||||
|
</p>
|
||||||
|
</details>
|
||||||
|
files: |
|
||||||
|
SHA2-256SUMS
|
||||||
|
SHA2-512SUMS
|
||||||
|
artifact/yt-dlp
|
||||||
|
artifact/yt-dlp.tar.gz
|
||||||
|
artifact/yt-dlp.exe
|
||||||
|
artifact/yt-dlp_win.zip
|
||||||
|
artifact/yt-dlp_min.exe
|
||||||
|
artifact/yt-dlp_x86.exe
|
||||||
|
artifact/yt-dlp_macos
|
||||||
|
artifact/yt-dlp_macos.zip
|
||||||
|
artifact/yt-dlp_macos_legacy
|
||||||
|
artifact/yt-dlp_linux_armv7l
|
||||||
|
artifact/yt-dlp_linux_aarch64
|
||||||
|
artifact/dist/yt-dlp_linux
|
||||||
|
artifact/dist/yt-dlp_linux.zip
|
||||||
|
_update_spec
|
||||||
|
|||||||
18
.github/workflows/core.yml
vendored
18
.github/workflows/core.yml
vendored
@@ -1,5 +1,8 @@
|
|||||||
name: Core Tests
|
name: Core Tests
|
||||||
on: [push, pull_request]
|
on: [push, pull_request]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
name: Core Tests
|
name: Core Tests
|
||||||
@@ -9,26 +12,27 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
# CPython 3.9 is in quick-test
|
# CPython 3.11 is in quick-test
|
||||||
python-version: ['3.6', '3.7', '3.10', 3.11-dev, pypy-3.6, pypy-3.7, pypy-3.8]
|
python-version: ['3.8', '3.9', '3.10', pypy-3.7, pypy-3.8]
|
||||||
run-tests-ext: [sh]
|
run-tests-ext: [sh]
|
||||||
include:
|
include:
|
||||||
# atleast one of each CPython/PyPy tests must be in windows
|
# atleast one of each CPython/PyPy tests must be in windows
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: '3.8'
|
python-version: '3.7'
|
||||||
run-tests-ext: bat
|
run-tests-ext: bat
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: pypy-3.9
|
python-version: pypy-3.9
|
||||||
run-tests-ext: bat
|
run-tests-ext: bat
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install pytest
|
- name: Install pytest
|
||||||
run: pip install pytest
|
run: pip install pytest
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
continue-on-error: False
|
continue-on-error: False
|
||||||
run: ./devscripts/run_tests.${{ matrix.run-tests-ext }} core
|
run: |
|
||||||
# Linter is in quick-test
|
python3 -m yt_dlp -v || true # Print debug head
|
||||||
|
./devscripts/run_tests.${{ matrix.run-tests-ext }} core
|
||||||
|
|||||||
13
.github/workflows/download.yml
vendored
13
.github/workflows/download.yml
vendored
@@ -1,14 +1,17 @@
|
|||||||
name: Download Tests
|
name: Download Tests
|
||||||
on: [push, pull_request]
|
on: [push, pull_request]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
quick:
|
quick:
|
||||||
name: Quick Download Tests
|
name: Quick Download Tests
|
||||||
if: "contains(github.event.head_commit.message, 'ci run dl')"
|
if: "contains(github.event.head_commit.message, 'ci run dl')"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: 3.9
|
python-version: 3.9
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
@@ -25,7 +28,7 @@ jobs:
|
|||||||
fail-fast: true
|
fail-fast: true
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
python-version: ['3.6', '3.7', '3.10', 3.11-dev, pypy-3.6, pypy-3.7, pypy-3.8]
|
python-version: ['3.7', '3.10', 3.11-dev, pypy-3.7, pypy-3.8]
|
||||||
run-tests-ext: [sh]
|
run-tests-ext: [sh]
|
||||||
include:
|
include:
|
||||||
# atleast one of each CPython/PyPy tests must be in windows
|
# atleast one of each CPython/PyPy tests must be in windows
|
||||||
@@ -36,9 +39,9 @@ jobs:
|
|||||||
python-version: pypy-3.9
|
python-version: pypy-3.9
|
||||||
run-tests-ext: bat
|
run-tests-ext: bat
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install pytest
|
- name: Install pytest
|
||||||
|
|||||||
22
.github/workflows/quick-test.yml
vendored
22
.github/workflows/quick-test.yml
vendored
@@ -1,30 +1,32 @@
|
|||||||
name: Quick Test
|
name: Quick Test
|
||||||
on: [push, pull_request]
|
on: [push, pull_request]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
name: Core Test
|
name: Core Test
|
||||||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Set up Python
|
- name: Set up Python 3.11
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: 3.9
|
python-version: '3.11'
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
run: pip install pytest pycryptodomex
|
run: pip install pytest pycryptodomex
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: ./devscripts/run_tests.sh core
|
run: |
|
||||||
|
python3 -m yt_dlp -v || true
|
||||||
|
./devscripts/run_tests.sh core
|
||||||
flake8:
|
flake8:
|
||||||
name: Linter
|
name: Linter
|
||||||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Set up Python
|
- uses: actions/setup-python@v4
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: 3.9
|
|
||||||
- name: Install flake8
|
- name: Install flake8
|
||||||
run: pip install flake8
|
run: pip install flake8
|
||||||
- name: Make lazy extractors
|
- name: Make lazy extractors
|
||||||
|
|||||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -30,16 +30,18 @@ cookies
|
|||||||
*.f4v
|
*.f4v
|
||||||
*.flac
|
*.flac
|
||||||
*.flv
|
*.flv
|
||||||
|
*.gif
|
||||||
*.jpeg
|
*.jpeg
|
||||||
*.jpg
|
*.jpg
|
||||||
*.m4a
|
*.m4a
|
||||||
*.mpga
|
|
||||||
*.m4v
|
*.m4v
|
||||||
*.mhtml
|
*.mhtml
|
||||||
*.mkv
|
*.mkv
|
||||||
*.mov
|
*.mov
|
||||||
*.mp3
|
*.mp3
|
||||||
*.mp4
|
*.mp4
|
||||||
|
*.mpga
|
||||||
|
*.oga
|
||||||
*.ogg
|
*.ogg
|
||||||
*.opus
|
*.opus
|
||||||
*.png
|
*.png
|
||||||
@@ -47,6 +49,7 @@ cookies
|
|||||||
*.srt
|
*.srt
|
||||||
*.swf
|
*.swf
|
||||||
*.swp
|
*.swp
|
||||||
|
*.tt
|
||||||
*.ttml
|
*.ttml
|
||||||
*.url
|
*.url
|
||||||
*.vtt
|
*.vtt
|
||||||
@@ -69,6 +72,7 @@ dist/
|
|||||||
zip/
|
zip/
|
||||||
tmp/
|
tmp/
|
||||||
venv/
|
venv/
|
||||||
|
.venv/
|
||||||
completions/
|
completions/
|
||||||
|
|
||||||
# Misc
|
# Misc
|
||||||
@@ -85,6 +89,7 @@ updates_key.pem
|
|||||||
.tox
|
.tox
|
||||||
*.class
|
*.class
|
||||||
*.isorted
|
*.isorted
|
||||||
|
*.stackdump
|
||||||
|
|
||||||
# Generated
|
# Generated
|
||||||
AUTHORS
|
AUTHORS
|
||||||
@@ -116,9 +121,5 @@ yt-dlp.zip
|
|||||||
*/extractor/lazy_extractors.py
|
*/extractor/lazy_extractors.py
|
||||||
|
|
||||||
# Plugins
|
# Plugins
|
||||||
ytdlp_plugins/extractor/*
|
ytdlp_plugins/
|
||||||
!ytdlp_plugins/extractor/__init__.py
|
yt-dlp-plugins
|
||||||
!ytdlp_plugins/extractor/sample.py
|
|
||||||
ytdlp_plugins/postprocessor/*
|
|
||||||
!ytdlp_plugins/postprocessor/__init__.py
|
|
||||||
!ytdlp_plugins/postprocessor/sample.py
|
|
||||||
|
|||||||
@@ -161,7 +161,7 @@ The same applies for changes to the documentation, code style, or overarching ch
|
|||||||
|
|
||||||
## Adding support for a new site
|
## Adding support for a new site
|
||||||
|
|
||||||
If you want to add support for a new site, first of all **make sure** this site is **not dedicated to [copyright infringement](https://www.github.com/ytdl-org/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. yt-dlp does **not support** such sites thus pull requests adding support for them **will be rejected**.
|
If you want to add support for a new site, first of all **make sure** this site is **not dedicated to [copyright infringement](#is-the-website-primarily-used-for-piracy)**. yt-dlp does **not support** such sites thus pull requests adding support for them **will be rejected**.
|
||||||
|
|
||||||
After you have ensured this site is distributing its content legally, you can follow this quick list (assuming your service is called `yourextractor`):
|
After you have ensured this site is distributing its content legally, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||||
|
|
||||||
@@ -195,7 +195,7 @@ After you have ensured this site is distributing its content legally, you can fo
|
|||||||
# * A value
|
# * A value
|
||||||
# * MD5 checksum; start the string with md5:
|
# * MD5 checksum; start the string with md5:
|
||||||
# * A regular expression; start the string with re:
|
# * A regular expression; start the string with re:
|
||||||
# * Any Python type (for example int or float)
|
# * Any Python type, e.g. int or float
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@@ -222,7 +222,7 @@ After you have ensured this site is distributing its content legally, you can fo
|
|||||||
|
|
||||||
$ flake8 yt_dlp/extractor/yourextractor.py
|
$ flake8 yt_dlp/extractor/yourextractor.py
|
||||||
|
|
||||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.6 and above. Backward compatibility is not required for even older versions of Python.
|
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.7 and above. Backward compatibility is not required for even older versions of Python.
|
||||||
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add yt_dlp/extractor/_extractors.py
|
$ git add yt_dlp/extractor/_extractors.py
|
||||||
@@ -261,7 +261,7 @@ The aforementioned metafields are the critical data that the extraction does not
|
|||||||
|
|
||||||
For pornographic sites, appropriate `age_limit` must also be returned.
|
For pornographic sites, appropriate `age_limit` must also be returned.
|
||||||
|
|
||||||
The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract usefull information with `--ignore-no-formats-error` - Eg: when the video is a live stream that has not started yet.
|
The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract usefull information with `--ignore-no-formats-error` - e.g. when the video is a live stream that has not started yet.
|
||||||
|
|
||||||
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
||||||
|
|
||||||
@@ -351,8 +351,9 @@ Say you extracted a list of thumbnails into `thumbnail_data` and want to iterate
|
|||||||
```python
|
```python
|
||||||
thumbnail_data = data.get('thumbnails') or []
|
thumbnail_data = data.get('thumbnails') or []
|
||||||
thumbnails = [{
|
thumbnails = [{
|
||||||
'url': item['url']
|
'url': item['url'],
|
||||||
} for item in thumbnail_data] # correct
|
'height': item.get('h'),
|
||||||
|
} for item in thumbnail_data if item.get('url')] # correct
|
||||||
```
|
```
|
||||||
|
|
||||||
and not like:
|
and not like:
|
||||||
@@ -360,12 +361,27 @@ and not like:
|
|||||||
```python
|
```python
|
||||||
thumbnail_data = data.get('thumbnails')
|
thumbnail_data = data.get('thumbnails')
|
||||||
thumbnails = [{
|
thumbnails = [{
|
||||||
'url': item['url']
|
'url': item['url'],
|
||||||
|
'height': item.get('h'),
|
||||||
} for item in thumbnail_data] # incorrect
|
} for item in thumbnail_data] # incorrect
|
||||||
```
|
```
|
||||||
|
|
||||||
In this case, `thumbnail_data` will be `None` if the field was not found and this will cause the loop `for item in thumbnail_data` to raise a fatal error. Using `or []` avoids this error and results in setting an empty list in `thumbnails` instead.
|
In this case, `thumbnail_data` will be `None` if the field was not found and this will cause the loop `for item in thumbnail_data` to raise a fatal error. Using `or []` avoids this error and results in setting an empty list in `thumbnails` instead.
|
||||||
|
|
||||||
|
Alternately, this can be further simplified by using `traverse_obj`
|
||||||
|
|
||||||
|
```python
|
||||||
|
thumbnails = [{
|
||||||
|
'url': item['url'],
|
||||||
|
'height': item.get('h'),
|
||||||
|
} for item in traverse_obj(data, ('thumbnails', lambda _, v: v['url']))]
|
||||||
|
```
|
||||||
|
|
||||||
|
or, even better,
|
||||||
|
|
||||||
|
```python
|
||||||
|
thumbnails = traverse_obj(data, ('thumbnails', ..., {'url': 'url', 'height': 'h'}))
|
||||||
|
```
|
||||||
|
|
||||||
### Provide fallbacks
|
### Provide fallbacks
|
||||||
|
|
||||||
|
|||||||
90
CONTRIBUTORS
90
CONTRIBUTORS
@@ -3,6 +3,7 @@ shirt-dev (collaborator)
|
|||||||
coletdjnz/colethedj (collaborator)
|
coletdjnz/colethedj (collaborator)
|
||||||
Ashish0804 (collaborator)
|
Ashish0804 (collaborator)
|
||||||
nao20010128nao/Lesmiscore (collaborator)
|
nao20010128nao/Lesmiscore (collaborator)
|
||||||
|
bashonly (collaborator)
|
||||||
h-h-h-h
|
h-h-h-h
|
||||||
pauldubois98
|
pauldubois98
|
||||||
nixxo
|
nixxo
|
||||||
@@ -285,3 +286,92 @@ odo2063
|
|||||||
pritam20ps05
|
pritam20ps05
|
||||||
scy
|
scy
|
||||||
sheerluck
|
sheerluck
|
||||||
|
AxiosDeminence
|
||||||
|
DjesonPV
|
||||||
|
eren-kemer
|
||||||
|
freezboltz
|
||||||
|
Galiley
|
||||||
|
haobinliang
|
||||||
|
Mehavoid
|
||||||
|
winterbird-code
|
||||||
|
yashkc2025
|
||||||
|
aldoridhoni
|
||||||
|
jacobtruman
|
||||||
|
masta79
|
||||||
|
palewire
|
||||||
|
cgrigis
|
||||||
|
DavidH-2022
|
||||||
|
dfaker
|
||||||
|
jackyyf
|
||||||
|
ohaiibuzzle
|
||||||
|
SamantazFox
|
||||||
|
shreyasminocha
|
||||||
|
tejasa97
|
||||||
|
xenov
|
||||||
|
satan1st
|
||||||
|
0xGodspeed
|
||||||
|
5736d79
|
||||||
|
587021c
|
||||||
|
basrieter
|
||||||
|
Bobscorn
|
||||||
|
CNugteren
|
||||||
|
columndeeply
|
||||||
|
DoubleCouponDay
|
||||||
|
Fabi019
|
||||||
|
GautamMKGarg
|
||||||
|
Grub4K
|
||||||
|
itachi-19
|
||||||
|
jeroenj
|
||||||
|
josanabr
|
||||||
|
LiviaMedeiros
|
||||||
|
nikita-moor
|
||||||
|
snapdgn
|
||||||
|
SuperSonicHub1
|
||||||
|
tannertechnology
|
||||||
|
Timendum
|
||||||
|
tobi1805
|
||||||
|
TokyoBlackHole
|
||||||
|
ajayyy
|
||||||
|
Alienmaster
|
||||||
|
bsun0000
|
||||||
|
changren-wcr
|
||||||
|
ClosedPort22
|
||||||
|
CrankDatSouljaBoy
|
||||||
|
cruel-efficiency
|
||||||
|
endotronic
|
||||||
|
Generator
|
||||||
|
gibson042
|
||||||
|
How-Bout-No
|
||||||
|
invertico
|
||||||
|
jahway603
|
||||||
|
jwoglom
|
||||||
|
lksj
|
||||||
|
megapro17
|
||||||
|
mlampe
|
||||||
|
MrOctopus
|
||||||
|
nosoop
|
||||||
|
puc9
|
||||||
|
sashashura
|
||||||
|
schnusch
|
||||||
|
SG5
|
||||||
|
the-marenga
|
||||||
|
tkgmomosheep
|
||||||
|
vitkhab
|
||||||
|
glensc
|
||||||
|
synthpop123
|
||||||
|
tntmod54321
|
||||||
|
milkknife
|
||||||
|
Bnyro
|
||||||
|
CapacitorSet
|
||||||
|
stelcodes
|
||||||
|
skbeh
|
||||||
|
muddi900
|
||||||
|
digitall
|
||||||
|
chengzhicn
|
||||||
|
mexus
|
||||||
|
JChris246
|
||||||
|
redraskal
|
||||||
|
Spicadox
|
||||||
|
barsnick
|
||||||
|
docbender
|
||||||
|
KurtBestor
|
||||||
|
|||||||
543
Changelog.md
543
Changelog.md
@@ -11,6 +11,543 @@
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
## 2023.01.02
|
||||||
|
|
||||||
|
* **Improve plugin architecture** by [Grub4K](https://github.com/Grub4K), [coletdjnz](https://github.com/coletdjnz), [flashdagger](https://github.com/flashdagger), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* Plugins can be loaded in any distribution of yt-dlp (binary, pip, source, etc.) and can be distributed and installed as packages. See [the readme](https://github.com/yt-dlp/yt-dlp/tree/05997b6e98e638d97d409c65bb5eb86da68f3b64#plugins) for more information
|
||||||
|
* Add `--compat-options 2021,2022`
|
||||||
|
* This allows devs to change defaults and make other potentially breaking changes more easily. If you need everything to work exactly as-is, put Use `--compat 2022` in your config to guard against future compat changes.
|
||||||
|
* [downloader/aria2c] Native progress for aria2c via RPC by [Lesmiscore](https://github.com/Lesmiscore), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* Merge youtube-dl: Upto [commit/195f22f](https://github.com/ytdl-org/youtube-dl/commit/195f22f6) by [Grub4k](https://github.com/Grub4k), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* Add pre-processor stage `video`
|
||||||
|
* Let `--parse/replace-in-metadata` run at any post-processing stage
|
||||||
|
* Add `--enable-file-urls` by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* Add new field `aspect_ratio`
|
||||||
|
* Add `ac4` to known codecs
|
||||||
|
* Add `weba` to known extensions
|
||||||
|
* [FFmpegVideoConvertor] Add `gif` to `--recode-video`
|
||||||
|
* Add message when there are no subtitles/thumbnails
|
||||||
|
* Deprioritize HEVC-over-FLV formats by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* Make early reject of `--match-filter` stricter
|
||||||
|
* Fix `--cookies-from-browser` CLI parsing
|
||||||
|
* Fix `original_url` in playlists
|
||||||
|
* Fix bug in writing playlist info-json
|
||||||
|
* Fix bugs in `PlaylistEntries`
|
||||||
|
* [downloader/ffmpeg] Fix headers for video+audio formats by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor] Add a way to distinguish IEs that returns only videos
|
||||||
|
* [extractor] Implement universal format sorting and deprecate `_sort_formats`
|
||||||
|
* [extractor] Let `_extract_format` functions obey `--ignore-no-formats`
|
||||||
|
* [extractor/generic] Add `fragment_query` extractor arg for DASH and HLS by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/generic] Decode unicode-escaped embed URLs by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/generic] Don't report redirect to https
|
||||||
|
* [extractor/generic] Fix JSON LD manifest extraction by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/generic] Use `Accept-Encoding: identity` for initial request by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [FormatSort] Add `mov` to `vext`
|
||||||
|
* [jsinterp] Escape regex that looks like nested set
|
||||||
|
* [webvtt] Handle premature EOF by [flashdagger](https://github.com/flashdagger)
|
||||||
|
* [utils] `classproperty`: Add cache support
|
||||||
|
* [utils] `get_exe_version`: Detect broken executables by [dirkf](https://github.com/dirkf), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [utils] `js_to_json`: Fix bug in [f55523c](https://github.com/yt-dlp/yt-dlp/commit/f55523c) by [ChillingPepper](https://github.com/ChillingPepper), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [utils] Make `ExtractorError` mutable
|
||||||
|
* [utils] Move `FileDownloader.parse_bytes` into utils
|
||||||
|
* [utils] Move format sorting code into `utils`
|
||||||
|
* [utils] `windows_enable_vt_mode`: Proper implementation by [Grub4K](https://github.com/Grub4K)
|
||||||
|
* [update] Workaround [#5632](https://github.com/yt-dlp/yt-dlp/issues/5632)
|
||||||
|
* [docs] Improvements
|
||||||
|
* [cleanup] Misc fixes and cleanup
|
||||||
|
* [cleanup] Use `random.choices` by [freezboltz](https://github.com/freezboltz)
|
||||||
|
* [extractor/airtv] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/amazonminitv] Add extractors by [GautamMKGarg](https://github.com/GautamMKGarg), [nyuszika7h](https://github.com/nyuszika7h)
|
||||||
|
* [extractor/beatbump] Add extractors by [Bobscorn](https://github.com/Bobscorn), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/europarl] Add EuroParlWebstream extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/kanal2] Add extractor by [bashonly](https://github.com/bashonly), [glensc](https://github.com/glensc), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/kankanews] Add extractor by [synthpop123](https://github.com/synthpop123)
|
||||||
|
* [extractor/kick] Add extractor by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/mediastream] Add extractor by [HobbyistDev](https://github.com/HobbyistDev), [elyse0](https://github.com/elyse0)
|
||||||
|
* [extractor/noice] Add NoicePodcast extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/oneplace] Add OnePlacePodcast extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/rumble] Add RumbleIE extractor by [flashdagger](https://github.com/flashdagger)
|
||||||
|
* [extractor/screencastify] Add extractor by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/trtcocuk] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/Veoh] Add user extractor by [tntmod54321](https://github.com/tntmod54321)
|
||||||
|
* [extractor/videoken] Add extractors by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/webcamerapl] Add extractor by [milkknife](https://github.com/milkknife)
|
||||||
|
* [extractor/amazon] Add `AmazonReviews` extractor by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/netverse] Add `NetverseSearch` extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/vimeo] Add `VimeoProIE` by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/xiami] Remove extractors by [synthpop123](https://github.com/synthpop123)
|
||||||
|
* [extractor/youtube] Add `piped.video` by [Bnyro](https://github.com/Bnyro)
|
||||||
|
* [extractor/youtube] Consider language in format de-duplication
|
||||||
|
* [extractor/youtube] Extract DRC formats
|
||||||
|
* [extractor/youtube] Fix `ytuser:`
|
||||||
|
* [extractor/youtube] Fix bug in handling of music URLs
|
||||||
|
* [extractor/youtube] Subtitles cannot be translated to `und`
|
||||||
|
* [extractor/youtube:tab] Extract metadata from channel items by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/ARD] Add vtt subtitles by [CapacitorSet](https://github.com/CapacitorSet)
|
||||||
|
* [extractor/ArteTV] Extract chapters by [bashonly](https://github.com/bashonly), [iw0nderhow](https://github.com/iw0nderhow)
|
||||||
|
* [extractor/bandcamp] Add `album_artist` by [stelcodes](https://github.com/stelcodes)
|
||||||
|
* [extractor/bilibili] Fix `--no-playlist` for anthology
|
||||||
|
* [extractor/bilibili] Improve `_VALID_URL` by [skbeh](https://github.com/skbeh)
|
||||||
|
* [extractor/biliintl:series] Make partial download of series faster
|
||||||
|
* [extractor/BiliLive] Fix extractor
|
||||||
|
* [extractor/brightcove] Add `BrightcoveNewBaseIE` and fix embed extraction
|
||||||
|
* [extractor/cda] Support premium and misc improvements by [selfisekai](https://github.com/selfisekai)
|
||||||
|
* [extractor/ciscowebex] Support password-protected videos by [damianoamatruda](https://github.com/damianoamatruda)
|
||||||
|
* [extractor/curiositystream] Fix auth by [mnn](https://github.com/mnn)
|
||||||
|
* [extractor/embedly] Handle vimeo embeds
|
||||||
|
* [extractor/fifa] Fix Preplay extraction by [dirkf](https://github.com/dirkf)
|
||||||
|
* [extractor/foxsports] Fix extractor by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/gronkh] Fix `_VALID_URL` by [muddi900](https://github.com/muddi900)
|
||||||
|
* [extractor/hotstar] Improve format metadata
|
||||||
|
* [extractor/iqiyi] Fix `Iq` JS regex by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/la7] Improve extractor by [nixxo](https://github.com/nixxo)
|
||||||
|
* [extractor/mediaset] Better embed detection and error messages by [nixxo](https://github.com/nixxo)
|
||||||
|
* [extractor/mixch] Support `--wait-for-video`
|
||||||
|
* [extractor/naver] Improve `_VALID_URL` for `NaverNowIE` by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/naver] Treat fan subtitles as separate language
|
||||||
|
* [extractor/netverse] Extract comments by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/nosnl] Add support for /video by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/odnoklassniki] Extract subtitles by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/pinterest] Fix extractor by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/plutotv] Fix videos with non-zero start by [digitall](https://github.com/digitall)
|
||||||
|
* [extractor/polskieradio] Adapt to next.js redesigns by [selfisekai](https://github.com/selfisekai)
|
||||||
|
* [extractor/reddit] Add vcodec to fallback format by [chengzhicn](https://github.com/chengzhicn)
|
||||||
|
* [extractor/reddit] Extract crossposted media by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/reddit] Extract video embeds in text posts by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/rutube] Support private videos by [mexus](https://github.com/mexus)
|
||||||
|
* [extractor/sibnet] Separate from VKIE
|
||||||
|
* [extractor/slideslive] Fix extractor by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/slideslive] Support embeds and slides by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/soundcloud] Support user permalink by [nosoop](https://github.com/nosoop)
|
||||||
|
* [extractor/spankbang] Fix extractor by [JChris246](https://github.com/JChris246)
|
||||||
|
* [extractor/stv] Detect DRM
|
||||||
|
* [extractor/swearnet] Fix description bug
|
||||||
|
* [extractor/tencent] Fix geo-restricted video by [elyse0](https://github.com/elyse0)
|
||||||
|
* [extractor/tiktok] Fix subs, `DouyinIE`, improve `_VALID_URL` by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/tiktok] Update `_VALID_URL`, add `api_hostname` arg by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/tiktok] Update API hostname by [redraskal](https://github.com/redraskal)
|
||||||
|
* [extractor/twitcasting] Fix videos with password by [Spicadox](https://github.com/Spicadox), [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/twitter] Heed `--no-playlist` for multi-video tweets by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/twitter] Refresh guest token when expired by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/twitter:spaces] Add `Referer` to m3u8 by [nixxo](https://github.com/nixxo)
|
||||||
|
* [extractor/udemy] Fix lectures that have no URL and detect DRM
|
||||||
|
* [extractor/unsupported] Add more URLs
|
||||||
|
* [extractor/urplay] Support for audio-only formats by [barsnick](https://github.com/barsnick)
|
||||||
|
* [extractor/wistia] Improve extension detection by [Grub4k](https://github.com/Grub4k), [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/yle_areena] Support restricted videos by [docbender](https://github.com/docbender)
|
||||||
|
* [extractor/youku] Fix extractor by [KurtBestor](https://github.com/KurtBestor)
|
||||||
|
* [extractor/youporn] Fix metadata by [marieell](https://github.com/marieell)
|
||||||
|
* [extractor/redgifs] Fix bug in [8c188d5](https://github.com/yt-dlp/yt-dlp/commit/8c188d5d09177ed213a05c900d3523867c5897fd)
|
||||||
|
|
||||||
|
|
||||||
|
### 2022.11.11
|
||||||
|
|
||||||
|
* Merge youtube-dl: Upto [commit/de39d12](https://github.com/ytdl-org/youtube-dl/commit/de39d128)
|
||||||
|
* Backport SSL configuration from Python 3.10 by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* Do more processing in `--flat-playlist`
|
||||||
|
* Fix `--list` options not implying `-s` in some cases by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly)
|
||||||
|
* Fix end time of clips by [cruel-efficiency](https://github.com/cruel-efficiency)
|
||||||
|
* Fix for `formats=None`
|
||||||
|
* Write API params in debug head
|
||||||
|
* [outtmpl] Ensure ASCII in json and add option for Unicode
|
||||||
|
* [SponsorBlock] Add `type` field, obey `--retry-sleep extractor`, relax duration check for large segments
|
||||||
|
* [SponsorBlock] **Support `chapter` category** by [ajayyy](https://github.com/ajayyy), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [ThumbnailsConvertor] Fix filename escaping by [dirkf](https://github.com/dirkf), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [ModifyChapters] Handle the entire video being marked for removal
|
||||||
|
* [embedthumbnail] Fix thumbnail name in mp3 by [How-Bout-No](https://github.com/How-Bout-No)
|
||||||
|
* [downloader/fragment] HLS download can continue without first fragment
|
||||||
|
* [cookies] Improve `LenientSimpleCookie` by [Grub4K](https://github.com/Grub4K)
|
||||||
|
* [jsinterp] Improve separating regex
|
||||||
|
* [extractor/common] Fix `fatal=False` for `_search_nuxt_data`
|
||||||
|
* [extractor/common] Improve `_generic_title`
|
||||||
|
* [extractor/common] Fix `json_ld` type checks by [Grub4K](https://github.com/Grub4K)
|
||||||
|
* [extractor/generic] Separate embed extraction into own function
|
||||||
|
* [extractor/generic:quoted-html] Add extractor by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/unsupported] Raise error on known DRM-only sites by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [utils] `js_to_json`: Improve escape handling by [Grub4K](https://github.com/Grub4K)
|
||||||
|
* [utils] `strftime_or_none`: Workaround Python bug on Windows
|
||||||
|
* [utils] `traverse_obj`: Always return list when branching, allow `re.Match` objects by [Grub4K](https://github.com/Grub4K)
|
||||||
|
* [build, test] Harden workflows' security by [sashashura](https://github.com/sashashura)
|
||||||
|
* [build] `py2exe`: Migrate to freeze API by [SG5](https://github.com/SG5), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [build] Create `armv7l` and `aarch64` releases by [MrOctopus](https://github.com/MrOctopus), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [build] Make linux binary truly standalone using `conda` by [mlampe](https://github.com/mlampe)
|
||||||
|
* [build] Replace `set-output` with `GITHUB_OUTPUT` by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [update] Use error code `100` for update errors
|
||||||
|
* [compat] Fix `shutils.move` in restricted ACL mode on BSD by [ClosedPort22](https://github.com/ClosedPort22), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [docs, devscripts] Document `pyinst`'s argument passthrough by [jahway603](https://github.com/jahway603)
|
||||||
|
* [test] Allow `extract_flat` in download tests by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [cleanup] Misc fixes and cleanup by [pukkandan](https://github.com/pukkandan), [Alienmaster](https://github.com/Alienmaster)
|
||||||
|
* [extractor/aeon] Add extractor by [DoubleCouponDay](https://github.com/DoubleCouponDay)
|
||||||
|
* [extractor/agora] Add extractors by [selfisekai](https://github.com/selfisekai)
|
||||||
|
* [extractor/camsoda] Add extractor by [zulaport](https://github.com/zulaport)
|
||||||
|
* [extractor/cinetecamilano] Add extractor by [timendum](https://github.com/timendum)
|
||||||
|
* [extractor/deuxm] Add extractors by [CrankDatSouljaBoy](https://github.com/CrankDatSouljaBoy)
|
||||||
|
* [extractor/genius] Add extractors by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/japandiet] Add extractors by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [extractor/listennotes] Add extractor by [lksj](https://github.com/lksj), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/nos.nl] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/oftv] Add extractors by [DoubleCouponDay](https://github.com/DoubleCouponDay)
|
||||||
|
* [extractor/podbayfm] Add extractor by [schnusch](https://github.com/schnusch)
|
||||||
|
* [extractor/qingting] Add extractor by [bashonly](https://github.com/bashonly), [changren-wcr](https://github.com/changren-wcr)
|
||||||
|
* [extractor/screen9] Add extractor by [tpikonen](https://github.com/tpikonen)
|
||||||
|
* [extractor/swearnet] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/YleAreena] Add extractor by [pukkandan](https://github.com/pukkandan), [vitkhab](https://github.com/vitkhab)
|
||||||
|
* [extractor/zeenews] Add extractor by [m4tu4g](https://github.com/m4tu4g), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/youtube:tab] **Update tab handling for redesign** by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* Channel URLs download all uploads of the channel as multiple playlists, separated by tab
|
||||||
|
* [extractor/youtube] Differentiate between no comments and disabled comments by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/youtube] Extract `concurrent_view_count` for livestreams by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/youtube] Fix `duration` for premieres by [nosoop](https://github.com/nosoop)
|
||||||
|
* [extractor/youtube] Fix `live_status` by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/youtube] Ignore incomplete data error for comment replies by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/youtube] Improve chapter parsing from description
|
||||||
|
* [extractor/youtube] Mark videos as fully watched by [bsun0000](https://github.com/bsun0000)
|
||||||
|
* [extractor/youtube] Update piped instances by [Generator](https://github.com/Generator)
|
||||||
|
* [extractor/youtube] Update playlist metadata extraction for new layout by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/youtube:tab] Fix video metadata from tabs by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/youtube:tab] Let `approximate_date` return timestamp
|
||||||
|
* [extractor/americastestkitchen] Fix extractor by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/bbc] Support onion domains by [DoubleCouponDay](https://github.com/DoubleCouponDay)
|
||||||
|
* [extractor/bilibili] Add chapters and misc cleanup by [lockmatrix](https://github.com/lockmatrix), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/bilibili] Fix BilibiliIE and Bangumi extractors by [lockmatrix](https://github.com/lockmatrix), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/bitchute] Better error for geo-restricted videos by [flashdagger](https://github.com/flashdagger)
|
||||||
|
* [extractor/bitchute] Improve `BitChuteChannelIE` by [flashdagger](https://github.com/flashdagger), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/bitchute] Simplify extractor by [flashdagger](https://github.com/flashdagger), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/cda] Support login through API by [selfisekai](https://github.com/selfisekai)
|
||||||
|
* [extractor/crunchyroll] Beta is now the only layout by [tejing1](https://github.com/tejing1)
|
||||||
|
* [extractor/detik] Avoid unnecessary extraction
|
||||||
|
* [extractor/doodstream] Remove extractor
|
||||||
|
* [extractor/dplay] Add MotorTrendOnDemand extractor by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/epoch] Support videos without data-trailer by [gibson042](https://github.com/gibson042), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/fox] Extract thumbnail by [vitkhab](https://github.com/vitkhab)
|
||||||
|
* [extractor/foxnews] Add `FoxNewsVideo` extractor
|
||||||
|
* [extractor/hotstar] Add season support by [m4tu4g](https://github.com/m4tu4g)
|
||||||
|
* [extractor/hotstar] Refactor v1 API calls
|
||||||
|
* [extractor/iprima] Make json+ld non-fatal by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/iq] Increase phantomjs timeout
|
||||||
|
* [extractor/kaltura] Support playlists by [jwoglom](https://github.com/jwoglom), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/lbry] Authenticate with cookies by [flashdagger](https://github.com/flashdagger)
|
||||||
|
* [extractor/livestreamfails] Support posts by [invertico](https://github.com/invertico)
|
||||||
|
* [extractor/mlb] Add `MLBArticle` extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/mxplayer] Improve extractor by [m4tu4g](https://github.com/m4tu4g)
|
||||||
|
* [extractor/niconico] Always use HTTPS for requests
|
||||||
|
* [extractor/nzherald] Support new video embed by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/odnoklassniki] Support boosty.to embeds by [Lesmiscore](https://github.com/Lesmiscore), [megapro17](https://github.com/megapro17), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/paramountplus] Update API token by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/reddit] Add fallback format by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/redgifs] Fix extractors by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/redgifs] Refresh auth token for 401 by [endotronic](https://github.com/endotronic), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/rumble] Add HLS formats and extract more metadata by [flashdagger](https://github.com/flashdagger)
|
||||||
|
* [extractor/sbs] Improve `_VALID_URL` by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/skyit] Fix extractors by [nixxo](https://github.com/nixxo)
|
||||||
|
* [extractor/stripchat] Fix hostname for HLS stream by [zulaport](https://github.com/zulaport)
|
||||||
|
* [extractor/stripchat] Improve error message by [freezboltz](https://github.com/freezboltz)
|
||||||
|
* [extractor/telegram] Add playlist support and more metadata by [bashonly](https://github.com/bashonly), [bsun0000](https://github.com/bsun0000)
|
||||||
|
* [extractor/Tnaflix] Fix for HTTP 500 by [SG5](https://github.com/SG5), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/tubitv] Better DRM detection by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/tvp] Update extractors by [selfisekai](https://github.com/selfisekai)
|
||||||
|
* [extractor/twitcasting] Fix `data-movie-playlist` extraction by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [extractor/twitter] Add onion site to `_VALID_URL` by [DoubleCouponDay](https://github.com/DoubleCouponDay)
|
||||||
|
* [extractor/twitter] Add Spaces extractor and GraphQL API by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly), [nixxo](https://github.com/nixxo), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/twitter] Support multi-video posts by [Grub4K](https://github.com/Grub4K)
|
||||||
|
* [extractor/uktvplay] Fix `_VALID_URL`
|
||||||
|
* [extractor/viu] Support subtitles of on-screen text by [tkgmomosheep](https://github.com/tkgmomosheep)
|
||||||
|
* [extractor/VK] Fix playlist URLs by [the-marenga](https://github.com/the-marenga)
|
||||||
|
* [extractor/vlive] Extract `release_timestamp`
|
||||||
|
* [extractor/voot] Improve `_VALID_URL` by [freezboltz](https://github.com/freezboltz)
|
||||||
|
* [extractor/wordpress:mb.miniAudioPlayer] Add embed extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/YoutubeWebArchive] Improve metadata extraction by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/zee5] Improve `_VALID_URL` by [m4tu4g](https://github.com/m4tu4g)
|
||||||
|
* [extractor/zenyandex] Fix extractors by [lksj](https://github.com/lksj), [puc9](https://github.com/puc9), [pukkandan](https://github.com/pukkandan)
|
||||||
|
|
||||||
|
|
||||||
|
### 2022.10.04
|
||||||
|
|
||||||
|
* Allow a `set` to be passed as `download_archive` by [pukkandan](https://github.com/pukkandan), [bashonly](https://github.com/bashonly)
|
||||||
|
* Allow open ranges for time ranges by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* Allow plugin extractors to replace the built-in ones
|
||||||
|
* Don't download entire video when no matching `--download-sections`
|
||||||
|
* Fix `--config-location -`
|
||||||
|
* Improve [5736d79](https://github.com/yt-dlp/yt-dlp/pull/5044/commits/5736d79172c47ff84740d5720467370a560febad)
|
||||||
|
* Fix for when playlists don't have `webpage_url`
|
||||||
|
* Support environment variables in `--ffmpeg-location`
|
||||||
|
* Workaround `libc_ver` not be available on Windows Store version of Python
|
||||||
|
* [outtmpl] Curly braces to filter keys by [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [outtmpl] Make `%s` work in strfformat for all systems
|
||||||
|
* [jsinterp] Workaround operator associativity issue
|
||||||
|
* [cookies] Let `_get_mac_keyring_password` fail gracefully
|
||||||
|
* [cookies] Parse cookies leniently by [Grub4K](https://github.com/Grub4K)
|
||||||
|
* [phantomjs] Fix bug in [587021c](https://github.com/yt-dlp/yt-dlp/commit/587021cd9f717181b44e881941aca3f8d753758b) by [elyse0](https://github.com/elyse0)
|
||||||
|
* [downloader/aria2c] Fix filename containing leading whitespace by [std-move](https://github.com/std-move)
|
||||||
|
* [downloader/ism] Support ec-3 codec by [nixxo](https://github.com/nixxo)
|
||||||
|
* [extractor] Fix `fatal=False` in `RetryManager`
|
||||||
|
* [extractor] Improve json-ld extraction
|
||||||
|
* [extractor] Make `_search_json` able to parse lists
|
||||||
|
* [extractor] Escape `%` in `representation_id` of m3u8
|
||||||
|
* [extractor/generic] Pass through referer from json-ld
|
||||||
|
* [utils] `base_url`: URL paths can contain `&` by [elyse0](https://github.com/elyse0)
|
||||||
|
* [utils] `js_to_json`: Improve
|
||||||
|
* [utils] `Popen.run`: Fix default return in binary mode
|
||||||
|
* [utils] `traverse_obj`: Rewrite, document and add tests by [Grub4K](https://github.com/Grub4K)
|
||||||
|
* [devscripts] `make_lazy_extractors`: Fix for Docker by [josanabr](https://github.com/josanabr)
|
||||||
|
* [docs] Misc Improvements
|
||||||
|
* [cleanup] Misc fixes and cleanup by [pukkandan](https://github.com/pukkandan), [gamer191](https://github.com/gamer191)
|
||||||
|
* [extractor/24tv.ua] Add extractors by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/BerufeTV] Add extractor by [Fabi019](https://github.com/Fabi019)
|
||||||
|
* [extractor/booyah] Add extractor by [HobbyistDev](https://github.com/HobbyistDev), [elyse0](https://github.com/elyse0)
|
||||||
|
* [extractor/bundesliga] Add extractor by [Fabi019](https://github.com/Fabi019)
|
||||||
|
* [extractor/GoPlay] Add extractor by [CNugteren](https://github.com/CNugteren), [basrieter](https://github.com/basrieter), [jeroenj](https://github.com/jeroenj)
|
||||||
|
* [extractor/iltalehti] Add extractor by [tpikonen](https://github.com/tpikonen)
|
||||||
|
* [extractor/IsraelNationalNews] Add extractor by [Bobscorn](https://github.com/Bobscorn)
|
||||||
|
* [extractor/mediaworksnzvod] Add extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/MicrosoftEmbed] Add extractor by [DoubleCouponDay](https://github.com/DoubleCouponDay)
|
||||||
|
* [extractor/nbc] Add NBCStations extractor by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/onenewsnz] Add extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/prankcast] Add extractor by [HobbyistDev](https://github.com/HobbyistDev), [columndeeply](https://github.com/columndeeply)
|
||||||
|
* [extractor/Smotrim] Add extractor by [Lesmiscore](https://github.com/Lesmiscore), [nikita-moor](https://github.com/nikita-moor)
|
||||||
|
* [extractor/tencent] Add Iflix extractor by [elyse0](https://github.com/elyse0)
|
||||||
|
* [extractor/unscripted] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/adobepass] Add MSO AlticeOne (Optimum TV) by [CplPwnies](https://github.com/CplPwnies)
|
||||||
|
* [extractor/youtube] **Download `post_live` videos from start** by [Lesmiscore](https://github.com/Lesmiscore), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/youtube] Add support for Shorts audio pivot feed by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/youtube] Detect `lazy-load-for-videos` embeds
|
||||||
|
* [extractor/youtube] Do not warn on duplicate chapters
|
||||||
|
* [extractor/youtube] Fix video like count extraction by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/youtube] Support changing extraction language by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/youtube:tab] Improve continuation items extraction
|
||||||
|
* [extractor/youtube:tab] Support `reporthistory` page
|
||||||
|
* [extractor/amazonstore] Fix JSON extraction by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/amazonstore] Retry to avoid captcha page by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [extractor/animeondemand] Remove extractor by [TokyoBlackHole](https://github.com/TokyoBlackHole)
|
||||||
|
* [extractor/anvato] Fix extractor and refactor by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/artetv] Remove duplicate stream urls by [Grub4K](https://github.com/Grub4K)
|
||||||
|
* [extractor/audioboom] Support direct URLs and refactor by [pukkandan](https://github.com/pukkandan), [tpikonen](https://github.com/tpikonen)
|
||||||
|
* [extractor/bandcamp] Extract `uploader_url`
|
||||||
|
* [extractor/bilibili] Add space.bilibili extractors by [lockmatrix](https://github.com/lockmatrix)
|
||||||
|
* [extractor/BilibiliSpace] Fix extractor and better error message by [lockmatrix](https://github.com/lockmatrix)
|
||||||
|
* [extractor/BiliIntl] Support uppercase lang in `_VALID_URL` by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/BiliIntlSeries] Fix `_VALID_URL`
|
||||||
|
* [extractor/bongacams] Update `_VALID_URL` by [0xGodspeed](https://github.com/0xGodspeed)
|
||||||
|
* [extractor/crunchyroll:beta] Improve handling of hardsubs by [Grub4K](https://github.com/Grub4K)
|
||||||
|
* [extractor/detik] Generalize extractors by [HobbyistDev](https://github.com/HobbyistDev), [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/dplay:italy] Add default authentication by [Timendum](https://github.com/Timendum)
|
||||||
|
* [extractor/heise] Fix extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/holodex] Fix `_VALID_URL` by [LiviaMedeiros](https://github.com/LiviaMedeiros)
|
||||||
|
* [extractor/hrfensehen] Fix extractor by [snapdgn](https://github.com/snapdgn)
|
||||||
|
* [extractor/hungama] Add subtitle by [GautamMKGarg](https://github.com/GautamMKGarg), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/instagram] Extract more metadata by [pritam20ps05](https://github.com/pritam20ps05)
|
||||||
|
* [extractor/JWPlatform] Fix extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/malltv] Fix video_id extraction by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/MLBTV] Detect live streams
|
||||||
|
* [extractor/motorsport] Support native embeds
|
||||||
|
* [extractor/Mxplayer] Fix extractor by [itachi-19](https://github.com/itachi-19)
|
||||||
|
* [extractor/nebula] Add nebula.tv by [tannertechnology](https://github.com/tannertechnology)
|
||||||
|
* [extractor/nfl] Fix extractor by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/ondemandkorea] Update `jw_config` regex by [julien-hadleyjack](https://github.com/julien-hadleyjack)
|
||||||
|
* [extractor/paramountplus] Better DRM detection by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/patreon] Sort formats
|
||||||
|
* [extractor/rcs] Fix embed extraction by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/redgifs] Fix extractor by [jhwgh1968](https://github.com/jhwgh1968)
|
||||||
|
* [extractor/rutube] Fix `_EMBED_REGEX` by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/RUTV] Fix warnings for livestreams by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [extractor/soundcloud:search] More metadata in `--flat-playlist` by [SuperSonicHub1](https://github.com/SuperSonicHub1)
|
||||||
|
* [extractor/telegraaf] Use mobile GraphQL API endpoint by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/tennistv] Fix timestamp by [zenerdi0de](https://github.com/zenerdi0de)
|
||||||
|
* [extractor/tiktok] Fix TikTokIE by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/triller] Fix auth token by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/trovo] Fix extractors by [Mehavoid](https://github.com/Mehavoid)
|
||||||
|
* [extractor/tv2] Support new url format by [tobi1805](https://github.com/tobi1805)
|
||||||
|
* [extractor/web.archive:youtube] Fix `_YT_INITIAL_PLAYER_RESPONSE_RE`
|
||||||
|
* [extractor/wistia] Add support for channels by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/wistia] Match IDs in embed URLs by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/wordpress:playlist] Add generic embed extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/yandexvideopreview] Update `_VALID_URL` by [Grub4K](https://github.com/Grub4K)
|
||||||
|
* [extractor/zee5] Fix `_VALID_URL` by [m4tu4g](https://github.com/m4tu4g)
|
||||||
|
* [extractor/zee5] Generate device ids by [freezboltz](https://github.com/freezboltz)
|
||||||
|
|
||||||
|
|
||||||
|
### 2022.09.01
|
||||||
|
|
||||||
|
* Add option `--use-extractors`
|
||||||
|
* Merge youtube-dl: Upto [commit/ed5c44e](https://github.com/ytdl-org/youtube-dl/commit/ed5c44e7)
|
||||||
|
* Add yt-dlp version to infojson
|
||||||
|
* Fix `--break-per-url --max-downloads`
|
||||||
|
* Fix bug in `--alias`
|
||||||
|
* [cookies] Support firefox container in `--cookies-from-browser` by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [downloader/external] Smarter detection of executable
|
||||||
|
* [extractor/generic] Don't return JW player without formats
|
||||||
|
* [FormatSort] Fix `aext` for `--prefer-free-formats`
|
||||||
|
* [jsinterp] Various improvements by [pukkandan](https://github.com/pukkandan), [dirkf](https://github.com/dirkf), [elyse0](https://github.com/elyse0)
|
||||||
|
* [cache] Mechanism to invalidate old cache
|
||||||
|
* [utils] Add `deprecation_warning`
|
||||||
|
* [utils] Add `orderedSet_from_options`
|
||||||
|
* [utils] `Popen`: Restore `LD_LIBRARY_PATH` when using PyInstaller by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [build] `make tar` should not follow `DESTDIR` by [satan1st](https://github.com/satan1st)
|
||||||
|
* [build] Update pyinstaller by [shirt-dev](https://github.com/shirt-dev)
|
||||||
|
* [test] Fix `test_youtube_signature`
|
||||||
|
* [cleanup] Misc fixes and cleanup by [DavidH-2022](https://github.com/DavidH-2022), [MrRawes](https://github.com/MrRawes), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/epoch] Add extractor by [tejasa97](https://github.com/tejasa97)
|
||||||
|
* [extractor/eurosport] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/IslamChannel] Add extractors by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [extractor/newspicks] Add extractor by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [extractor/triller] Add extractor by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/VQQ] Add extractors by [elyse0](https://github.com/elyse0)
|
||||||
|
* [extractor/youtube] Improvements to nsig extraction
|
||||||
|
* [extractor/youtube] Fix bug in format sorting
|
||||||
|
* [extractor/youtube] Update iOS Innertube clients by [SamantazFox](https://github.com/SamantazFox)
|
||||||
|
* [extractor/youtube] Use device-specific user agent by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/youtube] Add `--compat-option no-youtube-prefer-utc-upload-date` by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/arte] Bug fix by [cgrigis](https://github.com/cgrigis)
|
||||||
|
* [extractor/bilibili] Extract `flac` with premium account by [jackyyf](https://github.com/jackyyf)
|
||||||
|
* [extractor/BiliBiliSearch] Don't sort by date
|
||||||
|
* [extractor/BiliBiliSearch] Fix infinite loop
|
||||||
|
* [extractor/bitchute] Mark errors as expected
|
||||||
|
* [extractor/crunchyroll:beta] Use anonymous access by [tejing1](https://github.com/tejing1)
|
||||||
|
* [extractor/huya] Fix stream extraction by [ohaiibuzzle](https://github.com/ohaiibuzzle)
|
||||||
|
* [extractor/medaltv] Fix extraction by [xenova](https://github.com/xenova)
|
||||||
|
* [extractor/mediaset] Fix embed extraction
|
||||||
|
* [extractor/mixcloud] All formats are audio-only
|
||||||
|
* [extractor/rtbf] Fix jwt extraction by [elyse0](https://github.com/elyse0)
|
||||||
|
* [extractor/screencastomatic] Support `--video-password` by [shreyasminocha](https://github.com/shreyasminocha)
|
||||||
|
* [extractor/stripchat] Don't modify input URL by [dfaker](https://github.com/dfaker)
|
||||||
|
* [extractor/uktv] Improve `_VALID_URL` by [dirkf](https://github.com/dirkf)
|
||||||
|
* [extractor/vimeo:user] Fix `_VALID_URL`
|
||||||
|
|
||||||
|
|
||||||
|
### 2022.08.19
|
||||||
|
|
||||||
|
* Fix bug in `--download-archive`
|
||||||
|
* [jsinterp] **Fix for new youtube players** and related improvements by [dirkf](https://github.com/dirkf), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [phantomjs] Add function to execute JS without a DOM by [MinePlayersPE](https://github.com/MinePlayersPE), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [build] Exclude devscripts from installs by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [cleanup] Misc fixes and cleanup
|
||||||
|
* [extractor/youtube] **Add fallback to phantomjs** for nsig
|
||||||
|
* [extractor/youtube] Fix error reporting of "Incomplete data"
|
||||||
|
* [extractor/youtube] Improve format sorting for IOS formats
|
||||||
|
* [extractor/youtube] Improve signature caching
|
||||||
|
* [extractor/instagram] Fix extraction by [bashonly](https://github.com/bashonly), [pritam20ps05](https://github.com/pritam20ps05)
|
||||||
|
* [extractor/rai] Minor fix by [nixxo](https://github.com/nixxo)
|
||||||
|
* [extractor/rtbf] Fix stream extractor by [elyse0](https://github.com/elyse0)
|
||||||
|
* [extractor/SovietsCloset] Fix extractor by [ChillingPepper](https://github.com/ChillingPepper)
|
||||||
|
* [extractor/zattoo] Fix Zattoo resellers by [goggle](https://github.com/goggle)
|
||||||
|
|
||||||
|
### 2022.08.14
|
||||||
|
|
||||||
|
* Merge youtube-dl: Upto [commit/d231b56](https://github.com/ytdl-org/youtube-dl/commit/d231b56)
|
||||||
|
* [jsinterp] Handle **new youtube signature functions**
|
||||||
|
* [jsinterp] Truncate error messages
|
||||||
|
* [extractor] Fix format sorting of `channels`
|
||||||
|
* [ffmpeg] Disable avconv unless `--prefer-avconv`
|
||||||
|
* [ffmpeg] Smarter detection of ffprobe filename
|
||||||
|
* [embedthumbnail] Detect `libatomicparsley.so`
|
||||||
|
* [ThumbnailsConvertor] Fix conversion after `fixup_webp`
|
||||||
|
* [utils] Fix `get_compatible_ext`
|
||||||
|
* [build] Fix changelog
|
||||||
|
* [update] Set executable bit-mask by [pukkandan](https://github.com/pukkandan), [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [devscripts] Fix import
|
||||||
|
* [docs] Consistent use of `e.g.` by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [cleanup] Misc fixes and cleanup
|
||||||
|
* [extractor/moview] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/parler] Add extractor by [palewire](https://github.com/palewire)
|
||||||
|
* [extractor/patreon] Ignore erroneous media attachments by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/truth] Add extractor by [palewire](https://github.com/palewire)
|
||||||
|
* [extractor/aenetworks] Add formats parameter by [jacobtruman](https://github.com/jacobtruman)
|
||||||
|
* [extractor/crunchyroll] Improve `_VALID_URL`s
|
||||||
|
* [extractor/doodstream] Add `wf` domain by [aldoridhoni](https://github.com/aldoridhoni)
|
||||||
|
* [extractor/facebook] Add reel support by [bashonly](https://github.com/bashonly)
|
||||||
|
* [extractor/MLB] New extractor by [ischmidt20](https://github.com/ischmidt20)
|
||||||
|
* [extractor/rai] Misc fixes by [nixxo](https://github.com/nixxo)
|
||||||
|
* [extractor/toggo] Improve `_VALID_URL` by [masta79](https://github.com/masta79)
|
||||||
|
* [extractor/tubitv] Extract additional formats by [shirt-dev](https://github.com/shirt-dev)
|
||||||
|
* [extractor/zattoo] Potential fix for resellers
|
||||||
|
|
||||||
|
|
||||||
|
### 2022.08.08
|
||||||
|
|
||||||
|
* **Remove Python 3.6 support**
|
||||||
|
* Determine merge container better by [pukkandan](https://github.com/pukkandan), [selfisekai](https://github.com/selfisekai)
|
||||||
|
* Framework for embed detection by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* Merge youtube-dl: Upto [commit/adb5294](https://github.com/ytdl-org/youtube-dl/commit/adb5294)
|
||||||
|
* `--compat-option no-live-chat` should disable danmaku
|
||||||
|
* Fix misleading DRM message
|
||||||
|
* Import ctypes only when necessary
|
||||||
|
* Minor bugfixes
|
||||||
|
* Reject entire playlists faster with `--match-filter`
|
||||||
|
* Remove filtered entries from `-J`
|
||||||
|
* Standardize retry mechanism
|
||||||
|
* Validate `--merge-output-format`
|
||||||
|
* [downloader] Add average speed to final progress line
|
||||||
|
* [extractor] Add field `audio_channels`
|
||||||
|
* [extractor] Support multiple archive ids for one video
|
||||||
|
* [ffmpeg] Set `ffmpeg_location` in a contextvar
|
||||||
|
* [FFmpegThumbnailsConvertor] Fix conversion from GIF
|
||||||
|
* [MetadataParser] Don't set `None` when the field didn't match
|
||||||
|
* [outtmpl] Smarter replacing of unsupported characters
|
||||||
|
* [outtmpl] Treat empty values as None in filenames
|
||||||
|
* [utils] sanitize_open: Allow any IO stream as stdout
|
||||||
|
* [build, devscripts] Add devscript to set a build variant
|
||||||
|
* [build] Improve build process by [shirt-dev](https://github.com/shirt-dev)
|
||||||
|
* [build] Update pyinstaller
|
||||||
|
* [devscripts] Create `utils` and refactor
|
||||||
|
* [docs] Clarify `best*`
|
||||||
|
* [docs] Fix bug report issue template
|
||||||
|
* [docs] Fix capitalization in references by [christoph-heinrich](https://github.com/christoph-heinrich)
|
||||||
|
* [cleanup, mhtml] Use imghdr
|
||||||
|
* [cleanup, utils] Consolidate known media extensions
|
||||||
|
* [cleanup] Misc fixes and cleanup
|
||||||
|
* [extractor/angel] Add extractor by [AxiosDeminence](https://github.com/AxiosDeminence)
|
||||||
|
* [extractor/dplay] Add MotorTrend extractor by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||||
|
* [extractor/harpodeon] Add extractor by [eren-kemer](https://github.com/eren-kemer)
|
||||||
|
* [extractor/holodex] Add extractor by [pukkandan](https://github.com/pukkandan), [sqrtNOT](https://github.com/sqrtNOT)
|
||||||
|
* [extractor/kompas] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/rai] Add raisudtirol extractor by [nixxo](https://github.com/nixxo)
|
||||||
|
* [extractor/tempo] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/youtube] **Fixes for third party client detection** by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/youtube] Add `live_status=post_live` by [lazypete365](https://github.com/lazypete365)
|
||||||
|
* [extractor/youtube] Extract more format info
|
||||||
|
* [extractor/youtube] Parse translated subtitles only when requested
|
||||||
|
* [extractor/youtube, extractor/twitch] Allow waiting for channels to become live
|
||||||
|
* [extractor/youtube, webvtt] Extract auto-subs from livestream VODs by [fstirlitz](https://github.com/fstirlitz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/AbemaTVTitle] Implement paging by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [extractor/archiveorg] Improve handling of formats by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/arte] Fix title extraction
|
||||||
|
* [extractor/arte] **Move to v2 API** by [fstirlitz](https://github.com/fstirlitz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/bbc] Fix news articles by [ajj8](https://github.com/ajj8)
|
||||||
|
* [extractor/camtasia] Separate into own extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [extractor/cloudflarestream] Fix video_id padding by [haobinliang](https://github.com/haobinliang)
|
||||||
|
* [extractor/crunchyroll] Fix conversion of thumbnail from GIF
|
||||||
|
* [extractor/crunchyroll] Handle missing metadata correctly by [Burve](https://github.com/Burve), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/crunchyroll:beta] Extract timestamp and fix tests by [tejing1](https://github.com/tejing1)
|
||||||
|
* [extractor/crunchyroll:beta] Use streams API by [tejing1](https://github.com/tejing1)
|
||||||
|
* [extractor/doodstream] Support more domains by [Galiley](https://github.com/Galiley)
|
||||||
|
* [extractor/ESPN] Extract duration by [ischmidt20](https://github.com/ischmidt20)
|
||||||
|
* [extractor/FIFA] Change API endpoint by [Bricio](https://github.com/Bricio), [yashkc2025](https://github.com/yashkc2025)
|
||||||
|
* [extractor/globo:article] Remove false positives by [Bricio](https://github.com/Bricio)
|
||||||
|
* [extractor/Go] Extract timestamp by [ischmidt20](https://github.com/ischmidt20)
|
||||||
|
* [extractor/hidive] Fix cookie login when netrc is also given by [winterbird-code](https://github.com/winterbird-code)
|
||||||
|
* [extractor/html5] Separate into own extractor by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/ina] Improve extractor by [elyse0](https://github.com/elyse0)
|
||||||
|
* [extractor/NaverNow] Change endpoint by [ping](https://github.com/ping)
|
||||||
|
* [extractor/ninegag] Extract uploader by [DjesonPV](https://github.com/DjesonPV)
|
||||||
|
* [extractor/NovaPlay] Fix extractor by [Bojidarist](https://github.com/Bojidarist)
|
||||||
|
* [extractor/orf:radio] Rewrite extractors
|
||||||
|
* [extractor/patreon] Fix and improve extractors by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [extractor/rai] Fix RaiNews extraction by [nixxo](https://github.com/nixxo)
|
||||||
|
* [extractor/redbee] Unify and update extractors by [elyse0](https://github.com/elyse0)
|
||||||
|
* [extractor/stripchat] Fix _VALID_URL by [freezboltz](https://github.com/freezboltz)
|
||||||
|
* [extractor/tubi] Exclude playlists from playlist entries by [sqrtNOT](https://github.com/sqrtNOT)
|
||||||
|
* [extractor/tviplayer] Improve `_VALID_URL` by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/twitch] Extract chapters for single chapter VODs by [mpeter50](https://github.com/mpeter50)
|
||||||
|
* [extractor/vgtv] Support tv.vg.no by [sqrtNOT](https://github.com/sqrtNOT)
|
||||||
|
* [extractor/vidio] Support embed link by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
* [extractor/vk] Fix extractor by [Mehavoid](https://github.com/Mehavoid)
|
||||||
|
* [extractor/WASDTV:record] Fix `_VALID_URL`
|
||||||
|
* [extractor/xfileshare] Add Referer by [Galiley](https://github.com/Galiley)
|
||||||
|
* [extractor/YahooJapanNews] Fix extractor by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
* [extractor/yandexmusic] Extract higher quality format
|
||||||
|
* [extractor/zee5] Update Device ID by [m4tu4g](https://github.com/m4tu4g)
|
||||||
|
|
||||||
|
|
||||||
### 2022.07.18
|
### 2022.07.18
|
||||||
|
|
||||||
* Allow users to specify encoding in each config files by [Lesmiscore](https://github.com/Lesmiscore)
|
* Allow users to specify encoding in each config files by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
@@ -125,7 +662,7 @@
|
|||||||
|
|
||||||
* [**Deprecate support for Python 3.6**](https://github.com/yt-dlp/yt-dlp/issues/3764#issuecomment-1154051119)
|
* [**Deprecate support for Python 3.6**](https://github.com/yt-dlp/yt-dlp/issues/3764#issuecomment-1154051119)
|
||||||
* **Add option `--download-sections` to download video partially**
|
* **Add option `--download-sections` to download video partially**
|
||||||
* Chapter regex and time ranges are accepted (Eg: `--download-sections *1:10-2:20`)
|
* Chapter regex and time ranges are accepted, e.g. `--download-sections *1:10-2:20`
|
||||||
* Add option `--alias`
|
* Add option `--alias`
|
||||||
* Add option `--lazy-playlist` to process entries as they are received
|
* Add option `--lazy-playlist` to process entries as they are received
|
||||||
* Add option `--retry-sleep`
|
* Add option `--retry-sleep`
|
||||||
@@ -1289,7 +1826,7 @@
|
|||||||
|
|
||||||
* Add new option `--netrc-location`
|
* Add new option `--netrc-location`
|
||||||
* [outtmpl] Allow alternate fields using `,`
|
* [outtmpl] Allow alternate fields using `,`
|
||||||
* [outtmpl] Add format type `B` to treat the value as bytes (eg: to limit the filename to a certain number of bytes)
|
* [outtmpl] Add format type `B` to treat the value as bytes, e.g. to limit the filename to a certain number of bytes
|
||||||
* Separate the options `--ignore-errors` and `--no-abort-on-error`
|
* Separate the options `--ignore-errors` and `--no-abort-on-error`
|
||||||
* Basic framework for simultaneous download of multiple formats by [nao20010128nao](https://github.com/nao20010128nao)
|
* Basic framework for simultaneous download of multiple formats by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
* [17live] Add 17.live extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
* [17live] Add 17.live extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
@@ -1679,7 +2216,7 @@
|
|||||||
|
|
||||||
* Merge youtube-dl: Upto [commit/a803582](https://github.com/ytdl-org/youtube-dl/commit/a8035827177d6b59aca03bd717acb6a9bdd75ada)
|
* Merge youtube-dl: Upto [commit/a803582](https://github.com/ytdl-org/youtube-dl/commit/a8035827177d6b59aca03bd717acb6a9bdd75ada)
|
||||||
* Add `--extractor-args` to pass some extractor-specific arguments. See [readme](https://github.com/yt-dlp/yt-dlp#extractor-arguments)
|
* Add `--extractor-args` to pass some extractor-specific arguments. See [readme](https://github.com/yt-dlp/yt-dlp#extractor-arguments)
|
||||||
* Add extractor option `skip` for `youtube`. Eg: `--extractor-args youtube:skip=hls,dash`
|
* Add extractor option `skip` for `youtube`, e.g. `--extractor-args youtube:skip=hls,dash`
|
||||||
* Deprecates `--youtube-skip-dash-manifest`, `--youtube-skip-hls-manifest`, `--youtube-include-dash-manifest`, `--youtube-include-hls-manifest`
|
* Deprecates `--youtube-skip-dash-manifest`, `--youtube-skip-hls-manifest`, `--youtube-include-dash-manifest`, `--youtube-include-hls-manifest`
|
||||||
* Allow `--list...` options to work with `--print`, `--quiet` and other `--list...` options
|
* Allow `--list...` options to work with `--print`, `--quiet` and other `--list...` options
|
||||||
* [youtube] Use `player` API for additional video extraction requests by [coletdjnz](https://github.com/coletdjnz)
|
* [youtube] Use `player` API for additional video extraction requests by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
|||||||
@@ -28,12 +28,12 @@ You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [autho
|
|||||||
[](https://github.com/sponsors/coletdjnz)
|
[](https://github.com/sponsors/coletdjnz)
|
||||||
|
|
||||||
* YouTube improvements including: age-gate bypass, private playlists, multiple-clients (to avoid throttling) and a lot of under-the-hood improvements
|
* YouTube improvements including: age-gate bypass, private playlists, multiple-clients (to avoid throttling) and a lot of under-the-hood improvements
|
||||||
* Added support for downloading YoutubeWebArchive videos
|
* Added support for new websites YoutubeWebArchive, MainStreaming, PRX, nzherald, Mediaklikk, StarTV etc
|
||||||
* Added support for new websites MainStreaming, PRX, nzherald, etc
|
* Improved/fixed support for Patreon, panopto, gfycat, itv, pbs, SouthParkDE etc
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## [Ashish0804](https://github.com/Ashish0804)
|
## [Ashish0804](https://github.com/Ashish0804) <sub><sup>[Inactive]</sup></sub>
|
||||||
|
|
||||||
[](https://ko-fi.com/ashish0804)
|
[](https://ko-fi.com/ashish0804)
|
||||||
|
|
||||||
@@ -42,10 +42,18 @@ You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [autho
|
|||||||
* Improved/fixed support for HiDive, HotStar, Hungama, LBRY, LinkedInLearning, Mxplayer, SonyLiv, TV2, Vimeo, VLive etc
|
* Improved/fixed support for HiDive, HotStar, Hungama, LBRY, LinkedInLearning, Mxplayer, SonyLiv, TV2, Vimeo, VLive etc
|
||||||
|
|
||||||
|
|
||||||
## [Lesmiscore](https://github.com/Lesmiscore) (nao20010128nao)
|
## [Lesmiscore](https://github.com/Lesmiscore) <sup><sub>(nao20010128nao)</sup></sub>
|
||||||
|
|
||||||
**Bitcoin**: bc1qfd02r007cutfdjwjmyy9w23rjvtls6ncve7r3s
|
**Bitcoin**: bc1qfd02r007cutfdjwjmyy9w23rjvtls6ncve7r3s
|
||||||
**Monacoin**: mona1q3tf7dzvshrhfe3md379xtvt2n22duhglv5dskr
|
**Monacoin**: mona1q3tf7dzvshrhfe3md379xtvt2n22duhglv5dskr
|
||||||
|
|
||||||
* Download live from start to end for YouTube
|
* Download live from start to end for YouTube
|
||||||
* Added support for new websites mildom, PixivSketch, skeb, radiko, voicy, mirrativ, openrec, whowatch, damtomo, 17.live, mixch etc
|
* Added support for new websites AbemaTV, mildom, PixivSketch, skeb, radiko, voicy, mirrativ, openrec, whowatch, damtomo, 17.live, mixch etc
|
||||||
|
* Improved/fixed support for fc2, YahooJapanNews, tver, iwara etc
|
||||||
|
|
||||||
|
|
||||||
|
## [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
* `--cookies-from-browser` support for Firefox containers
|
||||||
|
* Added support for new websites Genius, Kick, NBCStations, Triller, VideoKen etc
|
||||||
|
* Improved/fixed support for Anvato, Brightcove, Instagram, ParamountPlus, Reddit, SlidesLive, TikTok, Twitter, Vimeo etc
|
||||||
|
|||||||
14
Makefile
14
Makefile
@@ -17,8 +17,8 @@ pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites \
|
|||||||
clean-test:
|
clean-test:
|
||||||
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
|
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
|
||||||
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \
|
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \
|
||||||
*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.jpeg *.jpg *.m4a *.mpga *.m4v *.mhtml *.mkv *.mov \
|
*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.gif *.jpeg *.jpg *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 \
|
||||||
*.mp3 *.mp4 *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
*.mp4 *.mpga *.oga *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.tt *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
||||||
clean-dist:
|
clean-dist:
|
||||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
|
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
|
||||||
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
|
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
|
||||||
@@ -33,7 +33,6 @@ completion-zsh: completions/zsh/_yt-dlp
|
|||||||
lazy-extractors: yt_dlp/extractor/lazy_extractors.py
|
lazy-extractors: yt_dlp/extractor/lazy_extractors.py
|
||||||
|
|
||||||
PREFIX ?= /usr/local
|
PREFIX ?= /usr/local
|
||||||
DESTDIR ?= .
|
|
||||||
BINDIR ?= $(PREFIX)/bin
|
BINDIR ?= $(PREFIX)/bin
|
||||||
MANDIR ?= $(PREFIX)/man
|
MANDIR ?= $(PREFIX)/man
|
||||||
SHAREDIR ?= $(PREFIX)/share
|
SHAREDIR ?= $(PREFIX)/share
|
||||||
@@ -75,17 +74,16 @@ offlinetest: codetest
|
|||||||
$(PYTHON) -m pytest -k "not download"
|
$(PYTHON) -m pytest -k "not download"
|
||||||
|
|
||||||
# XXX: This is hard to maintain
|
# XXX: This is hard to maintain
|
||||||
CODE_FOLDERS = yt_dlp yt_dlp/downloader yt_dlp/extractor yt_dlp/postprocessor yt_dlp/compat \
|
CODE_FOLDERS = yt_dlp yt_dlp/downloader yt_dlp/extractor yt_dlp/postprocessor yt_dlp/compat
|
||||||
yt_dlp/extractor/anvato_token_generator
|
|
||||||
yt-dlp: yt_dlp/*.py yt_dlp/*/*.py
|
yt-dlp: yt_dlp/*.py yt_dlp/*/*.py
|
||||||
mkdir -p zip
|
mkdir -p zip
|
||||||
for d in $(CODE_FOLDERS) ; do \
|
for d in $(CODE_FOLDERS) ; do \
|
||||||
mkdir -p zip/$$d ;\
|
mkdir -p zip/$$d ;\
|
||||||
cp -pPR $$d/*.py zip/$$d/ ;\
|
cp -pPR $$d/*.py zip/$$d/ ;\
|
||||||
done
|
done
|
||||||
touch -t 200001010101 zip/yt_dlp/*.py zip/yt_dlp/*/*.py zip/yt_dlp/*/*/*.py
|
touch -t 200001010101 zip/yt_dlp/*.py zip/yt_dlp/*/*.py
|
||||||
mv zip/yt_dlp/__main__.py zip/
|
mv zip/yt_dlp/__main__.py zip/
|
||||||
cd zip ; zip -q ../yt-dlp yt_dlp/*.py yt_dlp/*/*.py yt_dlp/*/*/*.py __main__.py
|
cd zip ; zip -q ../yt-dlp yt_dlp/*.py yt_dlp/*/*.py __main__.py
|
||||||
rm -rf zip
|
rm -rf zip
|
||||||
echo '#!$(PYTHON)' > yt-dlp
|
echo '#!$(PYTHON)' > yt-dlp
|
||||||
cat yt-dlp.zip >> yt-dlp
|
cat yt-dlp.zip >> yt-dlp
|
||||||
@@ -134,7 +132,7 @@ yt_dlp/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscrip
|
|||||||
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
||||||
|
|
||||||
yt-dlp.tar.gz: all
|
yt-dlp.tar.gz: all
|
||||||
@tar -czf $(DESTDIR)/yt-dlp.tar.gz --transform "s|^|yt-dlp/|" --owner 0 --group 0 \
|
@tar -czf yt-dlp.tar.gz --transform "s|^|yt-dlp/|" --owner 0 --group 0 \
|
||||||
--exclude '*.DS_Store' \
|
--exclude '*.DS_Store' \
|
||||||
--exclude '*.kate-swp' \
|
--exclude '*.kate-swp' \
|
||||||
--exclude '*.pyc' \
|
--exclude '*.pyc' \
|
||||||
|
|||||||
1
devscripts/__init__.py
Normal file
1
devscripts/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# Empty file needed to make devscripts.utils properly importable from outside
|
||||||
@@ -9,14 +9,19 @@ from ..utils import (
|
|||||||
write_string,
|
write_string,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# These bloat the lazy_extractors, so allow them to passthrough silently
|
||||||
|
ALLOWED_CLASSMETHODS = {'extract_from_webpage', 'get_testcases', 'get_webpage_testcases'}
|
||||||
|
_WARNED = False
|
||||||
|
|
||||||
|
|
||||||
class LazyLoadMetaClass(type):
|
class LazyLoadMetaClass(type):
|
||||||
def __getattr__(cls, name):
|
def __getattr__(cls, name):
|
||||||
# "_TESTS" bloat the lazy_extractors
|
global _WARNED
|
||||||
if '_real_class' not in cls.__dict__ and name != 'get_testcases':
|
if ('_real_class' not in cls.__dict__
|
||||||
write_string(
|
and name not in ALLOWED_CLASSMETHODS and not _WARNED):
|
||||||
'WARNING: Falling back to normal extractor since lazy extractor '
|
_WARNED = True
|
||||||
f'{cls.__name__} does not have attribute {name}{bug_reports_message()}\n')
|
write_string('WARNING: Falling back to normal extractor since lazy extractor '
|
||||||
|
f'{cls.__name__} does not have attribute {name}{bug_reports_message()}\n')
|
||||||
return getattr(cls.real_class, name)
|
return getattr(cls.real_class, name)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -7,20 +7,14 @@ import sys
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
import optparse
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from devscripts.utils import (
|
||||||
def read(fname):
|
get_filename_args,
|
||||||
with open(fname, encoding='utf-8') as f:
|
read_file,
|
||||||
return f.read()
|
read_version,
|
||||||
|
write_file,
|
||||||
|
)
|
||||||
# Get the version without importing the package
|
|
||||||
def read_version(fname):
|
|
||||||
exec(compile(read(fname), fname, 'exec'))
|
|
||||||
return locals()['__version__']
|
|
||||||
|
|
||||||
|
|
||||||
VERBOSE_TMPL = '''
|
VERBOSE_TMPL = '''
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
@@ -58,20 +52,24 @@ VERBOSE_TMPL = '''
|
|||||||
required: true
|
required: true
|
||||||
'''.strip()
|
'''.strip()
|
||||||
|
|
||||||
|
NO_SKIP = '''
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
||||||
|
description: Fill all fields even if you think it is irrelevant for the issue
|
||||||
|
options:
|
||||||
|
- label: I understand that I will be **blocked** if I remove or skip any mandatory\\* field
|
||||||
|
required: true
|
||||||
|
'''.strip()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
fields = {'version': read_version(), 'no_skip': NO_SKIP}
|
||||||
_, args = parser.parse_args()
|
|
||||||
if len(args) != 2:
|
|
||||||
parser.error('Expected an input and an output filename')
|
|
||||||
|
|
||||||
fields = {'version': read_version('yt_dlp/version.py')}
|
|
||||||
fields['verbose'] = VERBOSE_TMPL % fields
|
fields['verbose'] = VERBOSE_TMPL % fields
|
||||||
fields['verbose_optional'] = re.sub(r'(\n\s+validations:)?\n\s+required: true', '', fields['verbose'])
|
fields['verbose_optional'] = re.sub(r'(\n\s+validations:)?\n\s+required: true', '', fields['verbose'])
|
||||||
|
|
||||||
infile, outfile = args
|
infile, outfile = get_filename_args(has_infile=True)
|
||||||
with open(outfile, 'w', encoding='utf-8') as outf:
|
write_file(outfile, read_file(infile) % fields)
|
||||||
outf.write(read(infile) % fields)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -2,41 +2,50 @@
|
|||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
import optparse
|
|
||||||
from inspect import getsource
|
from inspect import getsource
|
||||||
|
|
||||||
|
from devscripts.utils import get_filename_args, read_file, write_file
|
||||||
|
|
||||||
NO_ATTR = object()
|
NO_ATTR = object()
|
||||||
STATIC_CLASS_PROPERTIES = ['IE_NAME', 'IE_DESC', 'SEARCH_KEY', '_WORKING', '_NETRC_MACHINE', 'age_limit']
|
STATIC_CLASS_PROPERTIES = [
|
||||||
|
'IE_NAME', '_ENABLED', '_VALID_URL', # Used for URL matching
|
||||||
|
'_WORKING', 'IE_DESC', '_NETRC_MACHINE', 'SEARCH_KEY', # Used for --extractor-descriptions
|
||||||
|
'age_limit', # Used for --age-limit (evaluated)
|
||||||
|
'_RETURN_TYPE', # Accessed in CLI only with instance (evaluated)
|
||||||
|
]
|
||||||
CLASS_METHODS = [
|
CLASS_METHODS = [
|
||||||
'ie_key', 'working', 'description', 'suitable', '_match_valid_url', '_match_id', 'get_temp_id', 'is_suitable'
|
'ie_key', 'suitable', '_match_valid_url', # Used for URL matching
|
||||||
|
'working', 'get_temp_id', '_match_id', # Accessed just before instance creation
|
||||||
|
'description', # Used for --extractor-descriptions
|
||||||
|
'is_suitable', # Used for --age-limit
|
||||||
|
'supports_login', 'is_single_video', # Accessed in CLI only with instance
|
||||||
]
|
]
|
||||||
IE_TEMPLATE = '''
|
IE_TEMPLATE = '''
|
||||||
class {name}({bases}):
|
class {name}({bases}):
|
||||||
_module = {module!r}
|
_module = {module!r}
|
||||||
'''
|
'''
|
||||||
with open('devscripts/lazy_load_template.py', encoding='utf-8') as f:
|
MODULE_TEMPLATE = read_file('devscripts/lazy_load_template.py')
|
||||||
MODULE_TEMPLATE = f.read()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = optparse.OptionParser(usage='%prog [OUTFILE.py]')
|
lazy_extractors_filename = get_filename_args(default_outfile='yt_dlp/extractor/lazy_extractors.py')
|
||||||
args = parser.parse_args()[1] or ['yt_dlp/extractor/lazy_extractors.py']
|
|
||||||
if len(args) != 1:
|
|
||||||
parser.error('Expected only an output filename')
|
|
||||||
|
|
||||||
lazy_extractors_filename = args[0]
|
|
||||||
if os.path.exists(lazy_extractors_filename):
|
if os.path.exists(lazy_extractors_filename):
|
||||||
os.remove(lazy_extractors_filename)
|
os.remove(lazy_extractors_filename)
|
||||||
|
|
||||||
_ALL_CLASSES = get_all_ies() # Must be before import
|
_ALL_CLASSES = get_all_ies() # Must be before import
|
||||||
|
|
||||||
|
import yt_dlp.plugins
|
||||||
from yt_dlp.extractor.common import InfoExtractor, SearchInfoExtractor
|
from yt_dlp.extractor.common import InfoExtractor, SearchInfoExtractor
|
||||||
|
|
||||||
|
# Filter out plugins
|
||||||
|
_ALL_CLASSES = [cls for cls in _ALL_CLASSES if not cls.__module__.startswith(f'{yt_dlp.plugins.PACKAGE_NAME}.')]
|
||||||
|
|
||||||
DummyInfoExtractor = type('InfoExtractor', (InfoExtractor,), {'IE_NAME': NO_ATTR})
|
DummyInfoExtractor = type('InfoExtractor', (InfoExtractor,), {'IE_NAME': NO_ATTR})
|
||||||
module_src = '\n'.join((
|
module_src = '\n'.join((
|
||||||
MODULE_TEMPLATE,
|
MODULE_TEMPLATE,
|
||||||
@@ -46,20 +55,20 @@ def main():
|
|||||||
*build_ies(_ALL_CLASSES, (InfoExtractor, SearchInfoExtractor), DummyInfoExtractor),
|
*build_ies(_ALL_CLASSES, (InfoExtractor, SearchInfoExtractor), DummyInfoExtractor),
|
||||||
))
|
))
|
||||||
|
|
||||||
with open(lazy_extractors_filename, 'wt', encoding='utf-8') as f:
|
write_file(lazy_extractors_filename, f'{module_src}\n')
|
||||||
f.write(f'{module_src}\n')
|
|
||||||
|
|
||||||
|
|
||||||
def get_all_ies():
|
def get_all_ies():
|
||||||
PLUGINS_DIRNAME = 'ytdlp_plugins'
|
PLUGINS_DIRNAME = 'ytdlp_plugins'
|
||||||
BLOCKED_DIRNAME = f'{PLUGINS_DIRNAME}_blocked'
|
BLOCKED_DIRNAME = f'{PLUGINS_DIRNAME}_blocked'
|
||||||
if os.path.exists(PLUGINS_DIRNAME):
|
if os.path.exists(PLUGINS_DIRNAME):
|
||||||
os.rename(PLUGINS_DIRNAME, BLOCKED_DIRNAME)
|
# os.rename cannot be used, e.g. in Docker. See https://github.com/yt-dlp/yt-dlp/pull/4958
|
||||||
|
shutil.move(PLUGINS_DIRNAME, BLOCKED_DIRNAME)
|
||||||
try:
|
try:
|
||||||
from yt_dlp.extractor.extractors import _ALL_CLASSES
|
from yt_dlp.extractor.extractors import _ALL_CLASSES
|
||||||
finally:
|
finally:
|
||||||
if os.path.exists(BLOCKED_DIRNAME):
|
if os.path.exists(BLOCKED_DIRNAME):
|
||||||
os.rename(BLOCKED_DIRNAME, PLUGINS_DIRNAME)
|
shutil.move(BLOCKED_DIRNAME, PLUGINS_DIRNAME)
|
||||||
return _ALL_CLASSES
|
return _ALL_CLASSES
|
||||||
|
|
||||||
|
|
||||||
@@ -116,11 +125,6 @@ def build_lazy_ie(ie, name, attr_base):
|
|||||||
}.get(base.__name__, base.__name__) for base in ie.__bases__)
|
}.get(base.__name__, base.__name__) for base in ie.__bases__)
|
||||||
|
|
||||||
s = IE_TEMPLATE.format(name=name, module=ie.__module__, bases=bases)
|
s = IE_TEMPLATE.format(name=name, module=ie.__module__, bases=bases)
|
||||||
valid_url = getattr(ie, '_VALID_URL', None)
|
|
||||||
if not valid_url and hasattr(ie, '_make_valid_url'):
|
|
||||||
valid_url = ie._make_valid_url()
|
|
||||||
if valid_url:
|
|
||||||
s += f' _VALID_URL = {valid_url!r}\n'
|
|
||||||
return s + '\n'.join(extra_ie_code(ie, attr_base))
|
return s + '\n'.join(extra_ie_code(ie, attr_base))
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,10 +5,17 @@ yt-dlp --help | make_readme.py
|
|||||||
This must be run in a console of correct width
|
This must be run in a console of correct width
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
import functools
|
import functools
|
||||||
import re
|
import re
|
||||||
import sys
|
|
||||||
|
from devscripts.utils import read_file, write_file
|
||||||
|
|
||||||
README_FILE = 'README.md'
|
README_FILE = 'README.md'
|
||||||
|
|
||||||
@@ -38,6 +45,10 @@ switch_col_width = len(re.search(r'(?m)^\s{5,}', options).group())
|
|||||||
delim = f'\n{" " * switch_col_width}'
|
delim = f'\n{" " * switch_col_width}'
|
||||||
|
|
||||||
PATCHES = (
|
PATCHES = (
|
||||||
|
( # Standardize update message
|
||||||
|
r'(?m)^( -U, --update\s+).+(\n \s.+)*$',
|
||||||
|
r'\1Update this program to the latest version',
|
||||||
|
),
|
||||||
( # Headings
|
( # Headings
|
||||||
r'(?m)^ (\w.+\n)( (?=\w))?',
|
r'(?m)^ (\w.+\n)( (?=\w))?',
|
||||||
r'## \1'
|
r'## \1'
|
||||||
@@ -63,12 +74,10 @@ PATCHES = (
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
with open(README_FILE, encoding='utf-8') as f:
|
readme = read_file(README_FILE)
|
||||||
readme = f.read()
|
|
||||||
|
|
||||||
with open(README_FILE, 'w', encoding='utf-8') as f:
|
write_file(README_FILE, ''.join((
|
||||||
f.write(''.join((
|
take_section(readme, end=f'## {OPTIONS_START}'),
|
||||||
take_section(readme, end=f'## {OPTIONS_START}'),
|
functools.reduce(apply_patch, PATCHES, options),
|
||||||
functools.reduce(apply_patch, PATCHES, options),
|
take_section(readme, f'# {OPTIONS_END}'),
|
||||||
take_section(readme, f'# {OPTIONS_END}'),
|
)))
|
||||||
)))
|
|
||||||
|
|||||||
@@ -7,21 +7,13 @@ import sys
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
import optparse
|
from devscripts.utils import get_filename_args, write_file
|
||||||
|
|
||||||
from yt_dlp.extractor import list_extractor_classes
|
from yt_dlp.extractor import list_extractor_classes
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
|
|
||||||
_, args = parser.parse_args()
|
|
||||||
if len(args) != 1:
|
|
||||||
parser.error('Expected an output filename')
|
|
||||||
|
|
||||||
out = '\n'.join(ie.description() for ie in list_extractor_classes() if ie.IE_DESC is not False)
|
out = '\n'.join(ie.description() for ie in list_extractor_classes() if ie.IE_DESC is not False)
|
||||||
|
write_file(get_filename_args(), f'# Supported sites\n{out}\n')
|
||||||
with open(args[0], 'w', encoding='utf-8') as outf:
|
|
||||||
outf.write(f'# Supported sites\n{out}\n')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -1,9 +1,22 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import optparse
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
import os.path
|
import os.path
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from devscripts.utils import (
|
||||||
|
compose_functions,
|
||||||
|
get_filename_args,
|
||||||
|
read_file,
|
||||||
|
write_file,
|
||||||
|
)
|
||||||
|
|
||||||
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
README_FILE = os.path.join(ROOT_DIR, 'README.md')
|
README_FILE = os.path.join(ROOT_DIR, 'README.md')
|
||||||
|
|
||||||
@@ -22,25 +35,6 @@ yt\-dlp \- A youtube-dl fork with additional features and patches
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
|
|
||||||
_, args = parser.parse_args()
|
|
||||||
if len(args) != 1:
|
|
||||||
parser.error('Expected an output filename')
|
|
||||||
|
|
||||||
outfile, = args
|
|
||||||
|
|
||||||
with open(README_FILE, encoding='utf-8') as f:
|
|
||||||
readme = f.read()
|
|
||||||
|
|
||||||
readme = filter_excluded_sections(readme)
|
|
||||||
readme = move_sections(readme)
|
|
||||||
readme = filter_options(readme)
|
|
||||||
|
|
||||||
with open(outfile, 'w', encoding='utf-8') as outf:
|
|
||||||
outf.write(PREFIX + readme)
|
|
||||||
|
|
||||||
|
|
||||||
def filter_excluded_sections(readme):
|
def filter_excluded_sections(readme):
|
||||||
EXCLUDED_SECTION_BEGIN_STRING = re.escape('<!-- MANPAGE: BEGIN EXCLUDED SECTION -->')
|
EXCLUDED_SECTION_BEGIN_STRING = re.escape('<!-- MANPAGE: BEGIN EXCLUDED SECTION -->')
|
||||||
EXCLUDED_SECTION_END_STRING = re.escape('<!-- MANPAGE: END EXCLUDED SECTION -->')
|
EXCLUDED_SECTION_END_STRING = re.escape('<!-- MANPAGE: END EXCLUDED SECTION -->')
|
||||||
@@ -92,5 +86,12 @@ def filter_options(readme):
|
|||||||
return readme.replace(section, options, 1)
|
return readme.replace(section, options, 1)
|
||||||
|
|
||||||
|
|
||||||
|
TRANSFORM = compose_functions(filter_excluded_sections, move_sections, filter_options)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
write_file(get_filename_args(), PREFIX + TRANSFORM(read_file(README_FILE)))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
#!/usr/bin/env sh
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
if [ -z $1 ]; then
|
if [ -z "$1" ]; then
|
||||||
test_set='test'
|
test_set='test'
|
||||||
elif [ $1 = 'core' ]; then
|
elif [ "$1" = 'core' ]; then
|
||||||
test_set="-m not download"
|
test_set="-m not download"
|
||||||
elif [ $1 = 'download' ]; then
|
elif [ "$1" = 'download' ]; then
|
||||||
test_set="-m download"
|
test_set="-m download"
|
||||||
else
|
else
|
||||||
echo 'Invalid test type "'$1'". Use "core" | "download"'
|
echo 'Invalid test type "'"$1"'". Use "core" | "download"'
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
36
devscripts/set-variant.py
Normal file
36
devscripts/set-variant.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import functools
|
||||||
|
import re
|
||||||
|
|
||||||
|
from devscripts.utils import compose_functions, read_file, write_file
|
||||||
|
|
||||||
|
VERSION_FILE = 'yt_dlp/version.py'
|
||||||
|
|
||||||
|
|
||||||
|
def parse_options():
|
||||||
|
parser = argparse.ArgumentParser(description='Set the build variant of the package')
|
||||||
|
parser.add_argument('variant', help='Name of the variant')
|
||||||
|
parser.add_argument('-M', '--update-message', default=None, help='Message to show in -U')
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def property_setter(name, value):
|
||||||
|
return functools.partial(re.sub, rf'(?m)^{name}\s*=\s*.+$', f'{name} = {value!r}')
|
||||||
|
|
||||||
|
|
||||||
|
opts = parse_options()
|
||||||
|
transform = compose_functions(
|
||||||
|
property_setter('VARIANT', opts.variant),
|
||||||
|
property_setter('UPDATE_HINT', opts.update_message)
|
||||||
|
)
|
||||||
|
|
||||||
|
write_file(VERSION_FILE, transform(read_file(VERSION_FILE)))
|
||||||
@@ -1,5 +1,10 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
"""
|
||||||
|
Usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
|
||||||
|
version can be either 0-aligned (yt-dlp version) or normalized (PyPi version)
|
||||||
|
"""
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@@ -11,8 +16,7 @@ import json
|
|||||||
import re
|
import re
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
|
||||||
# usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
|
from devscripts.utils import read_file, write_file
|
||||||
# version can be either 0-aligned (yt-dlp version) or normalized (PyPl version)
|
|
||||||
|
|
||||||
filename, version = sys.argv[1:]
|
filename, version = sys.argv[1:]
|
||||||
|
|
||||||
@@ -27,11 +31,9 @@ tarball_file = next(x for x in pypi_release['urls'] if x['filename'].endswith('.
|
|||||||
sha256sum = tarball_file['digests']['sha256']
|
sha256sum = tarball_file['digests']['sha256']
|
||||||
url = tarball_file['url']
|
url = tarball_file['url']
|
||||||
|
|
||||||
with open(filename) as r:
|
formulae_text = read_file(filename)
|
||||||
formulae_text = r.read()
|
|
||||||
|
|
||||||
formulae_text = re.sub(r'sha256 "[0-9a-f]*?"', 'sha256 "%s"' % sha256sum, formulae_text, count=1)
|
formulae_text = re.sub(r'sha256 "[0-9a-f]*?"', 'sha256 "%s"' % sha256sum, formulae_text, count=1)
|
||||||
formulae_text = re.sub(r'url "[^"]*?"', 'url "%s"' % url, formulae_text, count=1)
|
formulae_text = re.sub(r'url "[^"]*?"', 'url "%s"' % url, formulae_text, count=1)
|
||||||
|
|
||||||
with open(filename, 'w') as w:
|
write_file(filename, formulae_text)
|
||||||
w.write(formulae_text)
|
|
||||||
|
|||||||
@@ -7,32 +7,35 @@ import sys
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
import contextlib
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
with open('yt_dlp/version.py') as f:
|
from devscripts.utils import read_version, write_file
|
||||||
exec(compile(f.read(), 'yt_dlp/version.py', 'exec'))
|
|
||||||
old_version = locals()['__version__']
|
|
||||||
|
|
||||||
old_version_list = old_version.split('.')
|
|
||||||
|
|
||||||
old_ver = '.'.join(old_version_list[:3])
|
def get_new_version(revision):
|
||||||
old_rev = old_version_list[3] if len(old_version_list) > 3 else ''
|
version = datetime.utcnow().strftime('%Y.%m.%d')
|
||||||
|
|
||||||
ver = datetime.utcnow().strftime("%Y.%m.%d")
|
if revision:
|
||||||
|
assert revision.isdigit(), 'Revision must be a number'
|
||||||
|
else:
|
||||||
|
old_version = read_version().split('.')
|
||||||
|
if version.split('.') == old_version[:3]:
|
||||||
|
revision = str(int((old_version + [0])[3]) + 1)
|
||||||
|
|
||||||
rev = (sys.argv[1:] or [''])[0] # Use first argument, if present as revision number
|
return f'{version}.{revision}' if revision else version
|
||||||
if not rev:
|
|
||||||
rev = str(int(old_rev or 0) + 1) if old_ver == ver else ''
|
|
||||||
|
|
||||||
VERSION = '.'.join((ver, rev)) if rev else ver
|
|
||||||
|
|
||||||
try:
|
def get_git_head():
|
||||||
sp = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE)
|
with contextlib.suppress(Exception):
|
||||||
GIT_HEAD = sp.communicate()[0].decode().strip() or None
|
sp = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE)
|
||||||
except Exception:
|
return sp.communicate()[0].decode().strip() or None
|
||||||
GIT_HEAD = None
|
|
||||||
|
|
||||||
|
VERSION = get_new_version((sys.argv + [''])[1])
|
||||||
|
GIT_HEAD = get_git_head()
|
||||||
|
|
||||||
VERSION_FILE = f'''\
|
VERSION_FILE = f'''\
|
||||||
# Autogenerated by devscripts/update-version.py
|
# Autogenerated by devscripts/update-version.py
|
||||||
@@ -40,10 +43,14 @@ VERSION_FILE = f'''\
|
|||||||
__version__ = {VERSION!r}
|
__version__ = {VERSION!r}
|
||||||
|
|
||||||
RELEASE_GIT_HEAD = {GIT_HEAD!r}
|
RELEASE_GIT_HEAD = {GIT_HEAD!r}
|
||||||
|
|
||||||
|
VARIANT = None
|
||||||
|
|
||||||
|
UPDATE_HINT = None
|
||||||
'''
|
'''
|
||||||
|
|
||||||
with open('yt_dlp/version.py', 'wt') as f:
|
write_file('yt_dlp/version.py', VERSION_FILE)
|
||||||
f.write(VERSION_FILE)
|
github_output = os.getenv('GITHUB_OUTPUT')
|
||||||
|
if github_output:
|
||||||
print('::set-output name=ytdlp_version::' + VERSION)
|
write_file(github_output, f'ytdlp_version={VERSION}\n', 'a')
|
||||||
print(f'\nVersion = {VERSION}, Git HEAD = {GIT_HEAD}')
|
print(f'\nVersion = {VERSION}, Git HEAD = {GIT_HEAD}')
|
||||||
|
|||||||
35
devscripts/utils.py
Normal file
35
devscripts/utils.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import argparse
|
||||||
|
import functools
|
||||||
|
|
||||||
|
|
||||||
|
def read_file(fname):
|
||||||
|
with open(fname, encoding='utf-8') as f:
|
||||||
|
return f.read()
|
||||||
|
|
||||||
|
|
||||||
|
def write_file(fname, content, mode='w'):
|
||||||
|
with open(fname, mode, encoding='utf-8') as f:
|
||||||
|
return f.write(content)
|
||||||
|
|
||||||
|
|
||||||
|
# Get the version without importing the package
|
||||||
|
def read_version(fname='yt_dlp/version.py'):
|
||||||
|
exec(compile(read_file(fname), fname, 'exec'))
|
||||||
|
return locals()['__version__']
|
||||||
|
|
||||||
|
|
||||||
|
def get_filename_args(has_infile=False, default_outfile=None):
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
if has_infile:
|
||||||
|
parser.add_argument('infile', help='Input file')
|
||||||
|
kwargs = {'nargs': '?', 'default': default_outfile} if default_outfile else {}
|
||||||
|
parser.add_argument('outfile', **kwargs, help='Output file')
|
||||||
|
|
||||||
|
opts = parser.parse_args()
|
||||||
|
if has_infile:
|
||||||
|
return opts.infile, opts.outfile
|
||||||
|
return opts.outfile
|
||||||
|
|
||||||
|
|
||||||
|
def compose_functions(*functions):
|
||||||
|
return lambda x: functools.reduce(lambda y, f: f(y), functions, x)
|
||||||
27
pyinst.py
27
pyinst.py
@@ -1,20 +1,24 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import platform
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
|
import platform
|
||||||
|
|
||||||
from PyInstaller.__main__ import run as run_pyinstaller
|
from PyInstaller.__main__ import run as run_pyinstaller
|
||||||
|
|
||||||
OS_NAME, MACHINE, ARCH = sys.platform, platform.machine(), platform.architecture()[0][:2]
|
from devscripts.utils import read_version
|
||||||
if MACHINE in ('x86_64', 'AMD64') or ('i' in MACHINE and '86' in MACHINE):
|
|
||||||
# NB: Windows x86 has MACHINE = AMD64 irrespective of bitness
|
OS_NAME, MACHINE, ARCH = sys.platform, platform.machine().lower(), platform.architecture()[0][:2]
|
||||||
|
if MACHINE in ('x86', 'x86_64', 'amd64', 'i386', 'i686'):
|
||||||
MACHINE = 'x86' if ARCH == '32' else ''
|
MACHINE = 'x86' if ARCH == '32' else ''
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
opts = parse_options()
|
opts, version = parse_options(), read_version()
|
||||||
version = read_version('yt_dlp/version.py')
|
|
||||||
|
|
||||||
onedir = '--onedir' in opts or '-D' in opts
|
onedir = '--onedir' in opts or '-D' in opts
|
||||||
if not onedir and '-F' not in opts and '--onefile' not in opts:
|
if not onedir and '-F' not in opts and '--onefile' not in opts:
|
||||||
@@ -53,19 +57,12 @@ def parse_options():
|
|||||||
return opts
|
return opts
|
||||||
|
|
||||||
|
|
||||||
# Get the version from yt_dlp/version.py without importing the package
|
|
||||||
def read_version(fname):
|
|
||||||
with open(fname, encoding='utf-8') as f:
|
|
||||||
exec(compile(f.read(), fname, 'exec'))
|
|
||||||
return locals()['__version__']
|
|
||||||
|
|
||||||
|
|
||||||
def exe(onedir):
|
def exe(onedir):
|
||||||
"""@returns (name, path)"""
|
"""@returns (name, path)"""
|
||||||
name = '_'.join(filter(None, (
|
name = '_'.join(filter(None, (
|
||||||
'yt-dlp',
|
'yt-dlp',
|
||||||
{'win32': '', 'darwin': 'macos'}.get(OS_NAME, OS_NAME),
|
{'win32': '', 'darwin': 'macos'}.get(OS_NAME, OS_NAME),
|
||||||
MACHINE
|
MACHINE,
|
||||||
)))
|
)))
|
||||||
return name, ''.join(filter(None, (
|
return name, ''.join(filter(None, (
|
||||||
'dist/',
|
'dist/',
|
||||||
@@ -83,7 +80,7 @@ def version_to_list(version):
|
|||||||
def dependency_options():
|
def dependency_options():
|
||||||
# Due to the current implementation, these are auto-detected, but explicitly add them just in case
|
# Due to the current implementation, these are auto-detected, but explicitly add them just in case
|
||||||
dependencies = [pycryptodome_module(), 'mutagen', 'brotli', 'certifi', 'websockets']
|
dependencies = [pycryptodome_module(), 'mutagen', 'brotli', 'certifi', 'websockets']
|
||||||
excluded_modules = ['test', 'ytdlp_plugins', 'youtube_dl', 'youtube_dlc']
|
excluded_modules = ('youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts')
|
||||||
|
|
||||||
yield from (f'--hidden-import={module}' for module in dependencies)
|
yield from (f'--hidden-import={module}' for module in dependencies)
|
||||||
yield '--collect-submodules=websockets'
|
yield '--collect-submodules=websockets'
|
||||||
|
|||||||
10
setup.cfg
10
setup.cfg
@@ -10,6 +10,14 @@ per_file_ignores =
|
|||||||
devscripts/lazy_load_template.py: F401
|
devscripts/lazy_load_template.py: F401
|
||||||
|
|
||||||
|
|
||||||
|
[autoflake]
|
||||||
|
ignore-init-module-imports = true
|
||||||
|
ignore-pass-after-docstring = true
|
||||||
|
remove-all-unused-imports = true
|
||||||
|
remove-duplicate-keys = true
|
||||||
|
remove-unused-variables = true
|
||||||
|
|
||||||
|
|
||||||
[tool:pytest]
|
[tool:pytest]
|
||||||
addopts = -ra -v --strict-markers
|
addopts = -ra -v --strict-markers
|
||||||
markers =
|
markers =
|
||||||
@@ -31,7 +39,7 @@ setenv =
|
|||||||
|
|
||||||
|
|
||||||
[isort]
|
[isort]
|
||||||
py_version = 36
|
py_version = 37
|
||||||
multi_line_output = VERTICAL_HANGING_INDENT
|
multi_line_output = VERTICAL_HANGING_INDENT
|
||||||
line_length = 80
|
line_length = 80
|
||||||
reverse_relative = true
|
reverse_relative = true
|
||||||
|
|||||||
149
setup.py
149
setup.py
@@ -12,71 +12,58 @@ except ImportError:
|
|||||||
from distutils.core import Command, setup
|
from distutils.core import Command, setup
|
||||||
setuptools_available = False
|
setuptools_available = False
|
||||||
|
|
||||||
|
from devscripts.utils import read_file, read_version
|
||||||
|
|
||||||
def read(fname):
|
VERSION = read_version()
|
||||||
with open(fname, encoding='utf-8') as f:
|
|
||||||
return f.read()
|
|
||||||
|
|
||||||
|
|
||||||
# Get the version from yt_dlp/version.py without importing the package
|
|
||||||
def read_version(fname):
|
|
||||||
exec(compile(read(fname), fname, 'exec'))
|
|
||||||
return locals()['__version__']
|
|
||||||
|
|
||||||
|
|
||||||
VERSION = read_version('yt_dlp/version.py')
|
|
||||||
|
|
||||||
DESCRIPTION = 'A youtube-dl fork with additional features and patches'
|
DESCRIPTION = 'A youtube-dl fork with additional features and patches'
|
||||||
|
|
||||||
LONG_DESCRIPTION = '\n\n'.join((
|
LONG_DESCRIPTION = '\n\n'.join((
|
||||||
'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
||||||
'**PS**: Some links in this document will not work since this is a copy of the README.md from Github',
|
'**PS**: Some links in this document will not work since this is a copy of the README.md from Github',
|
||||||
read('README.md')))
|
read_file('README.md')))
|
||||||
|
|
||||||
REQUIREMENTS = read('requirements.txt').splitlines()
|
REQUIREMENTS = read_file('requirements.txt').splitlines()
|
||||||
|
|
||||||
|
|
||||||
def packages():
|
def packages():
|
||||||
if setuptools_available:
|
if setuptools_available:
|
||||||
return find_packages(exclude=('youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins'))
|
return find_packages(exclude=('youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts'))
|
||||||
|
|
||||||
return [
|
return [
|
||||||
'yt_dlp', 'yt_dlp.extractor', 'yt_dlp.downloader', 'yt_dlp.postprocessor', 'yt_dlp.compat',
|
'yt_dlp', 'yt_dlp.extractor', 'yt_dlp.downloader', 'yt_dlp.postprocessor', 'yt_dlp.compat',
|
||||||
'yt_dlp.extractor.anvato_token_generator',
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def py2exe_params():
|
def py2exe_params():
|
||||||
import py2exe # noqa: F401
|
|
||||||
|
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
'py2exe builds do not support pycryptodomex and needs VC++14 to run. '
|
'py2exe builds do not support pycryptodomex and needs VC++14 to run. '
|
||||||
'The recommended way is to use "pyinst.py" to build using pyinstaller')
|
'It is recommended to run "pyinst.py" to build using pyinstaller instead')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'console': [{
|
'console': [{
|
||||||
'script': './yt_dlp/__main__.py',
|
'script': './yt_dlp/__main__.py',
|
||||||
'dest_base': 'yt-dlp',
|
'dest_base': 'yt-dlp',
|
||||||
|
'icon_resources': [(1, 'devscripts/logo.ico')],
|
||||||
|
}],
|
||||||
|
'version_info': {
|
||||||
'version': VERSION,
|
'version': VERSION,
|
||||||
'description': DESCRIPTION,
|
'description': DESCRIPTION,
|
||||||
'comments': LONG_DESCRIPTION.split('\n')[0],
|
'comments': LONG_DESCRIPTION.split('\n')[0],
|
||||||
'product_name': 'yt-dlp',
|
'product_name': 'yt-dlp',
|
||||||
'product_version': VERSION,
|
'product_version': VERSION,
|
||||||
'icon_resources': [(1, 'devscripts/logo.ico')],
|
|
||||||
}],
|
|
||||||
'options': {
|
|
||||||
'py2exe': {
|
|
||||||
'bundle_files': 0,
|
|
||||||
'compressed': 1,
|
|
||||||
'optimize': 2,
|
|
||||||
'dist_dir': './dist',
|
|
||||||
'excludes': ['Crypto', 'Cryptodome'], # py2exe cannot import Crypto
|
|
||||||
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
|
|
||||||
# Modules that are only imported dynamically must be added here
|
|
||||||
'includes': ['yt_dlp.compat._legacy'],
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
'zipfile': None
|
'options': {
|
||||||
|
'bundle_files': 0,
|
||||||
|
'compressed': 1,
|
||||||
|
'optimize': 2,
|
||||||
|
'dist_dir': './dist',
|
||||||
|
'excludes': ['Crypto', 'Cryptodome'], # py2exe cannot import Crypto
|
||||||
|
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
|
||||||
|
# Modules that are only imported dynamically must be added here
|
||||||
|
'includes': ['yt_dlp.compat._legacy'],
|
||||||
|
},
|
||||||
|
'zipfile': None,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -121,45 +108,61 @@ class build_lazy_extractors(Command):
|
|||||||
if self.dry_run:
|
if self.dry_run:
|
||||||
print('Skipping build of lazy extractors in dry run mode')
|
print('Skipping build of lazy extractors in dry run mode')
|
||||||
return
|
return
|
||||||
subprocess.run([sys.executable, 'devscripts/make_lazy_extractors.py', 'yt_dlp/extractor/lazy_extractors.py'])
|
subprocess.run([sys.executable, 'devscripts/make_lazy_extractors.py'])
|
||||||
|
|
||||||
|
|
||||||
params = py2exe_params() if sys.argv[1:2] == ['py2exe'] else build_params()
|
def main():
|
||||||
setup(
|
if sys.argv[1:2] == ['py2exe']:
|
||||||
name='yt-dlp',
|
params = py2exe_params()
|
||||||
version=VERSION,
|
try:
|
||||||
maintainer='pukkandan',
|
from py2exe import freeze
|
||||||
maintainer_email='pukkandan.ytdlp@gmail.com',
|
except ImportError:
|
||||||
description=DESCRIPTION,
|
import py2exe # noqa: F401
|
||||||
long_description=LONG_DESCRIPTION,
|
warnings.warn('You are using an outdated version of py2exe. Support for this version will be removed in the future')
|
||||||
long_description_content_type='text/markdown',
|
params['console'][0].update(params.pop('version_info'))
|
||||||
url='https://github.com/yt-dlp/yt-dlp',
|
params['options'] = {'py2exe': params.pop('options')}
|
||||||
packages=packages(),
|
else:
|
||||||
install_requires=REQUIREMENTS,
|
return freeze(**params)
|
||||||
python_requires='>=3.6',
|
else:
|
||||||
project_urls={
|
params = build_params()
|
||||||
'Documentation': 'https://github.com/yt-dlp/yt-dlp#readme',
|
|
||||||
'Source': 'https://github.com/yt-dlp/yt-dlp',
|
setup(
|
||||||
'Tracker': 'https://github.com/yt-dlp/yt-dlp/issues',
|
name='yt-dlp',
|
||||||
'Funding': 'https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators',
|
version=VERSION,
|
||||||
},
|
maintainer='pukkandan',
|
||||||
classifiers=[
|
maintainer_email='pukkandan.ytdlp@gmail.com',
|
||||||
'Topic :: Multimedia :: Video',
|
description=DESCRIPTION,
|
||||||
'Development Status :: 5 - Production/Stable',
|
long_description=LONG_DESCRIPTION,
|
||||||
'Environment :: Console',
|
long_description_content_type='text/markdown',
|
||||||
'Programming Language :: Python',
|
url='https://github.com/yt-dlp/yt-dlp',
|
||||||
'Programming Language :: Python :: 3.6',
|
packages=packages(),
|
||||||
'Programming Language :: Python :: 3.7',
|
install_requires=REQUIREMENTS,
|
||||||
'Programming Language :: Python :: 3.8',
|
python_requires='>=3.7',
|
||||||
'Programming Language :: Python :: 3.9',
|
project_urls={
|
||||||
'Programming Language :: Python :: 3.10',
|
'Documentation': 'https://github.com/yt-dlp/yt-dlp#readme',
|
||||||
'Programming Language :: Python :: 3.11',
|
'Source': 'https://github.com/yt-dlp/yt-dlp',
|
||||||
'Programming Language :: Python :: Implementation',
|
'Tracker': 'https://github.com/yt-dlp/yt-dlp/issues',
|
||||||
'Programming Language :: Python :: Implementation :: CPython',
|
'Funding': 'https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators',
|
||||||
'Programming Language :: Python :: Implementation :: PyPy',
|
},
|
||||||
'License :: Public Domain',
|
classifiers=[
|
||||||
'Operating System :: OS Independent',
|
'Topic :: Multimedia :: Video',
|
||||||
],
|
'Development Status :: 5 - Production/Stable',
|
||||||
cmdclass={'build_lazy_extractors': build_lazy_extractors},
|
'Environment :: Console',
|
||||||
**params
|
'Programming Language :: Python',
|
||||||
)
|
'Programming Language :: Python :: 3.7',
|
||||||
|
'Programming Language :: Python :: 3.8',
|
||||||
|
'Programming Language :: Python :: 3.9',
|
||||||
|
'Programming Language :: Python :: 3.10',
|
||||||
|
'Programming Language :: Python :: 3.11',
|
||||||
|
'Programming Language :: Python :: Implementation',
|
||||||
|
'Programming Language :: Python :: Implementation :: CPython',
|
||||||
|
'Programming Language :: Python :: Implementation :: PyPy',
|
||||||
|
'License :: Public Domain',
|
||||||
|
'Operating System :: OS Independent',
|
||||||
|
],
|
||||||
|
cmdclass={'build_lazy_extractors': build_lazy_extractors},
|
||||||
|
**params
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
main()
|
||||||
|
|||||||
@@ -3,11 +3,12 @@
|
|||||||
- **0000studio:clip**
|
- **0000studio:clip**
|
||||||
- **17live**
|
- **17live**
|
||||||
- **17live:clip**
|
- **17live:clip**
|
||||||
|
- **1News**: 1news.co.nz article videos
|
||||||
- **1tv**: Первый канал
|
- **1tv**: Первый канал
|
||||||
- **20.detik.com**
|
|
||||||
- **20min**
|
- **20min**
|
||||||
- **23video**
|
- **23video**
|
||||||
- **247sports**
|
- **247sports**
|
||||||
|
- **24tv.ua**
|
||||||
- **24video**
|
- **24video**
|
||||||
- **3qsdn**: 3Q SDN
|
- **3qsdn**: 3Q SDN
|
||||||
- **3sat**
|
- **3sat**
|
||||||
@@ -18,11 +19,11 @@
|
|||||||
- **8tracks**
|
- **8tracks**
|
||||||
- **91porn**
|
- **91porn**
|
||||||
- **9c9media**
|
- **9c9media**
|
||||||
- **9gag**
|
- **9gag**: 9GAG
|
||||||
- **9now.com.au**
|
- **9now.com.au**
|
||||||
- **abc.net.au**
|
- **abc.net.au**
|
||||||
- **abc.net.au:iview**
|
- **abc.net.au:iview**
|
||||||
- **abc.net.au:iview:showseries**
|
- **abc.net.au:iview:showseries**
|
||||||
- **abcnews**
|
- **abcnews**
|
||||||
- **abcnews:video**
|
- **abcnews:video**
|
||||||
- **abcotvs**: ABC Owned Television Stations
|
- **abcotvs**: ABC Owned Television Stations
|
||||||
@@ -34,7 +35,7 @@
|
|||||||
- **acast:channel**
|
- **acast:channel**
|
||||||
- **AcFunBangumi**
|
- **AcFunBangumi**
|
||||||
- **AcFunVideo**
|
- **AcFunVideo**
|
||||||
- **ADN**: [<abbr title="netrc machine"><em>animedigitalnetwork</em></abbr>] Anime Digital Network
|
- **ADN**: [<abbr title="netrc machine"><em>animationdigitalnetwork</em></abbr>] Animation Digital Network
|
||||||
- **AdobeConnect**
|
- **AdobeConnect**
|
||||||
- **adobetv**
|
- **adobetv**
|
||||||
- **adobetv:channel**
|
- **adobetv:channel**
|
||||||
@@ -45,10 +46,12 @@
|
|||||||
- **aenetworks**: A+E Networks: A&E, Lifetime, History.com, FYI Network and History Vault
|
- **aenetworks**: A+E Networks: A&E, Lifetime, History.com, FYI Network and History Vault
|
||||||
- **aenetworks:collection**
|
- **aenetworks:collection**
|
||||||
- **aenetworks:show**
|
- **aenetworks:show**
|
||||||
|
- **AeonCo**
|
||||||
- **afreecatv**: [<abbr title="netrc machine"><em>afreecatv</em></abbr>] afreecatv.com
|
- **afreecatv**: [<abbr title="netrc machine"><em>afreecatv</em></abbr>] afreecatv.com
|
||||||
- **afreecatv:live**: [<abbr title="netrc machine"><em>afreecatv</em></abbr>] afreecatv.com
|
- **afreecatv:live**: [<abbr title="netrc machine"><em>afreecatv</em></abbr>] afreecatv.com
|
||||||
- **afreecatv:user**
|
- **afreecatv:user**
|
||||||
- **AirMozilla**
|
- **AirMozilla**
|
||||||
|
- **AirTV**
|
||||||
- **AliExpressLive**
|
- **AliExpressLive**
|
||||||
- **AlJazeera**
|
- **AlJazeera**
|
||||||
- **Allocine**
|
- **Allocine**
|
||||||
@@ -58,14 +61,18 @@
|
|||||||
- **Alura**: [<abbr title="netrc machine"><em>alura</em></abbr>]
|
- **Alura**: [<abbr title="netrc machine"><em>alura</em></abbr>]
|
||||||
- **AluraCourse**: [<abbr title="netrc machine"><em>aluracourse</em></abbr>]
|
- **AluraCourse**: [<abbr title="netrc machine"><em>aluracourse</em></abbr>]
|
||||||
- **Amara**
|
- **Amara**
|
||||||
|
- **AmazonMiniTV**
|
||||||
|
- **amazonminitv:season**: Amazon MiniTV Series, "minitv:season:" prefix
|
||||||
|
- **amazonminitv:series**
|
||||||
|
- **AmazonReviews**
|
||||||
- **AmazonStore**
|
- **AmazonStore**
|
||||||
- **AMCNetworks**
|
- **AMCNetworks**
|
||||||
- **AmericasTestKitchen**
|
- **AmericasTestKitchen**
|
||||||
- **AmericasTestKitchenSeason**
|
- **AmericasTestKitchenSeason**
|
||||||
- **AmHistoryChannel**
|
- **AmHistoryChannel**
|
||||||
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
|
- **Angel**
|
||||||
- **AnimalPlanet**
|
- **AnimalPlanet**
|
||||||
- **AnimeOnDemand**: [<abbr title="netrc machine"><em>animeondemand</em></abbr>]
|
|
||||||
- **ant1newsgr:article**: ant1news.gr articles
|
- **ant1newsgr:article**: ant1news.gr articles
|
||||||
- **ant1newsgr:embed**: ant1news.gr embedded videos
|
- **ant1newsgr:embed**: ant1news.gr embedded videos
|
||||||
- **ant1newsgr:watch**: ant1news.gr videos
|
- **ant1newsgr:watch**: ant1news.gr videos
|
||||||
@@ -118,20 +125,24 @@
|
|||||||
- **Bandcamp:album**
|
- **Bandcamp:album**
|
||||||
- **Bandcamp:user**
|
- **Bandcamp:user**
|
||||||
- **Bandcamp:weekly**
|
- **Bandcamp:weekly**
|
||||||
- **bangumi.bilibili.com**: BiliBili番剧
|
|
||||||
- **BannedVideo**
|
- **BannedVideo**
|
||||||
- **bbc**: [<abbr title="netrc machine"><em>bbc</em></abbr>] BBC
|
- **bbc**: [<abbr title="netrc machine"><em>bbc</em></abbr>] BBC
|
||||||
- **bbc.co.uk**: [<abbr title="netrc machine"><em>bbc</em></abbr>] BBC iPlayer
|
- **bbc.co.uk**: [<abbr title="netrc machine"><em>bbc</em></abbr>] BBC iPlayer
|
||||||
- **bbc.co.uk:article**: BBC articles
|
- **bbc.co.uk:article**: BBC articles
|
||||||
- **bbc.co.uk:iplayer:episodes**
|
- **bbc.co.uk:iplayer:episodes**
|
||||||
- **bbc.co.uk:iplayer:group**
|
- **bbc.co.uk:iplayer:group**
|
||||||
- **bbc.co.uk:playlist**
|
- **bbc.co.uk:playlist**
|
||||||
- **BBVTV**: [<abbr title="netrc machine"><em>bbvtv</em></abbr>]
|
- **BBVTV**: [<abbr title="netrc machine"><em>bbvtv</em></abbr>]
|
||||||
|
- **BBVTVLive**: [<abbr title="netrc machine"><em>bbvtv</em></abbr>]
|
||||||
|
- **BBVTVRecordings**: [<abbr title="netrc machine"><em>bbvtv</em></abbr>]
|
||||||
|
- **BeatBumpPlaylist**
|
||||||
|
- **BeatBumpVideo**
|
||||||
- **Beatport**
|
- **Beatport**
|
||||||
- **Beeg**
|
- **Beeg**
|
||||||
- **BehindKink**
|
- **BehindKink**
|
||||||
- **Bellator**
|
- **Bellator**
|
||||||
- **BellMedia**
|
- **BellMedia**
|
||||||
|
- **BerufeTV**
|
||||||
- **Bet**
|
- **Bet**
|
||||||
- **bfi:player**
|
- **bfi:player**
|
||||||
- **bfmtv**
|
- **bfmtv**
|
||||||
@@ -145,11 +156,15 @@
|
|||||||
- **Bilibili category extractor**
|
- **Bilibili category extractor**
|
||||||
- **BilibiliAudio**
|
- **BilibiliAudio**
|
||||||
- **BilibiliAudioAlbum**
|
- **BilibiliAudioAlbum**
|
||||||
- **BilibiliChannel**
|
- **BiliBiliBangumi**
|
||||||
|
- **BiliBiliBangumiMedia**
|
||||||
- **BiliBiliPlayer**
|
- **BiliBiliPlayer**
|
||||||
- **BiliBiliSearch**: Bilibili video search; "bilisearch:" prefix
|
- **BiliBiliSearch**: Bilibili video search; "bilisearch:" prefix
|
||||||
|
- **BilibiliSpaceAudio**
|
||||||
|
- **BilibiliSpacePlaylist**
|
||||||
|
- **BilibiliSpaceVideo**
|
||||||
- **BiliIntl**: [<abbr title="netrc machine"><em>biliintl</em></abbr>]
|
- **BiliIntl**: [<abbr title="netrc machine"><em>biliintl</em></abbr>]
|
||||||
- **BiliIntlSeries**: [<abbr title="netrc machine"><em>biliintl</em></abbr>]
|
- **biliIntl:series**: [<abbr title="netrc machine"><em>biliintl</em></abbr>]
|
||||||
- **BiliLive**
|
- **BiliLive**
|
||||||
- **BioBioChileTV**
|
- **BioBioChileTV**
|
||||||
- **Biography**
|
- **Biography**
|
||||||
@@ -165,6 +180,7 @@
|
|||||||
- **Bloomberg**
|
- **Bloomberg**
|
||||||
- **BokeCC**
|
- **BokeCC**
|
||||||
- **BongaCams**
|
- **BongaCams**
|
||||||
|
- **BooyahClips**
|
||||||
- **BostonGlobe**
|
- **BostonGlobe**
|
||||||
- **Box**
|
- **Box**
|
||||||
- **Bpb**: Bundeszentrale für politische Bildung
|
- **Bpb**: Bundeszentrale für politische Bildung
|
||||||
@@ -177,6 +193,7 @@
|
|||||||
- **BRMediathek**: Bayerischer Rundfunk Mediathek
|
- **BRMediathek**: Bayerischer Rundfunk Mediathek
|
||||||
- **bt:article**: Bergens Tidende Articles
|
- **bt:article**: Bergens Tidende Articles
|
||||||
- **bt:vestlendingen**: Bergens Tidende - Vestlendingen
|
- **bt:vestlendingen**: Bergens Tidende - Vestlendingen
|
||||||
|
- **Bundesliga**
|
||||||
- **BusinessInsider**
|
- **BusinessInsider**
|
||||||
- **BuzzFeed**
|
- **BuzzFeed**
|
||||||
- **BYUtv**
|
- **BYUtv**
|
||||||
@@ -187,6 +204,8 @@
|
|||||||
- **Camdemy**
|
- **Camdemy**
|
||||||
- **CamdemyFolder**
|
- **CamdemyFolder**
|
||||||
- **CamModels**
|
- **CamModels**
|
||||||
|
- **Camsoda**
|
||||||
|
- **CamtasiaEmbed**
|
||||||
- **CamWithHer**
|
- **CamWithHer**
|
||||||
- **CanalAlpha**
|
- **CanalAlpha**
|
||||||
- **canalc2.tv**
|
- **canalc2.tv**
|
||||||
@@ -209,7 +228,7 @@
|
|||||||
- **cbssports:embed**
|
- **cbssports:embed**
|
||||||
- **CCMA**
|
- **CCMA**
|
||||||
- **CCTV**: 央视网
|
- **CCTV**: 央视网
|
||||||
- **CDA**
|
- **CDA**: [<abbr title="netrc machine"><em>cdapl</em></abbr>]
|
||||||
- **Cellebrite**
|
- **Cellebrite**
|
||||||
- **CeskaTelevize**
|
- **CeskaTelevize**
|
||||||
- **CGTN**
|
- **CGTN**
|
||||||
@@ -224,6 +243,7 @@
|
|||||||
- **cielotv.it**
|
- **cielotv.it**
|
||||||
- **Cinchcast**
|
- **Cinchcast**
|
||||||
- **Cinemax**
|
- **Cinemax**
|
||||||
|
- **CinetecaMilano**
|
||||||
- **CiscoLiveSearch**
|
- **CiscoLiveSearch**
|
||||||
- **CiscoLiveSession**
|
- **CiscoLiveSession**
|
||||||
- **ciscowebex**: Cisco Webex
|
- **ciscowebex**: Cisco Webex
|
||||||
@@ -232,6 +252,7 @@
|
|||||||
- **Clippit**
|
- **Clippit**
|
||||||
- **ClipRs**
|
- **ClipRs**
|
||||||
- **Clipsyndicate**
|
- **Clipsyndicate**
|
||||||
|
- **ClipYouEmbed**
|
||||||
- **CloserToTruth**
|
- **CloserToTruth**
|
||||||
- **CloudflareStream**
|
- **CloudflareStream**
|
||||||
- **Cloudy**
|
- **Cloudy**
|
||||||
@@ -243,6 +264,7 @@
|
|||||||
- **CNN**
|
- **CNN**
|
||||||
- **CNNArticle**
|
- **CNNArticle**
|
||||||
- **CNNBlogs**
|
- **CNNBlogs**
|
||||||
|
- **CNNIndonesia**
|
||||||
- **ComedyCentral**
|
- **ComedyCentral**
|
||||||
- **ComedyCentralTV**
|
- **ComedyCentralTV**
|
||||||
- **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED
|
- **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED
|
||||||
@@ -261,9 +283,7 @@
|
|||||||
- **CrowdBunker**
|
- **CrowdBunker**
|
||||||
- **CrowdBunkerChannel**
|
- **CrowdBunkerChannel**
|
||||||
- **crunchyroll**: [<abbr title="netrc machine"><em>crunchyroll</em></abbr>]
|
- **crunchyroll**: [<abbr title="netrc machine"><em>crunchyroll</em></abbr>]
|
||||||
- **crunchyroll:beta**: [<abbr title="netrc machine"><em>crunchyroll</em></abbr>]
|
|
||||||
- **crunchyroll:playlist**: [<abbr title="netrc machine"><em>crunchyroll</em></abbr>]
|
- **crunchyroll:playlist**: [<abbr title="netrc machine"><em>crunchyroll</em></abbr>]
|
||||||
- **crunchyroll:playlist:beta**: [<abbr title="netrc machine"><em>crunchyroll</em></abbr>]
|
|
||||||
- **CSpan**: C-SPAN
|
- **CSpan**: C-SPAN
|
||||||
- **CSpanCongress**
|
- **CSpanCongress**
|
||||||
- **CtsNews**: 華視新聞
|
- **CtsNews**: 華視新聞
|
||||||
@@ -299,6 +319,9 @@
|
|||||||
- **defense.gouv.fr**
|
- **defense.gouv.fr**
|
||||||
- **democracynow**
|
- **democracynow**
|
||||||
- **DestinationAmerica**
|
- **DestinationAmerica**
|
||||||
|
- **DetikEmbed**
|
||||||
|
- **DeuxM**
|
||||||
|
- **DeuxMNews**
|
||||||
- **DHM**: Filmarchiv - Deutsches Historisches Museum
|
- **DHM**: Filmarchiv - Deutsches Historisches Museum
|
||||||
- **Digg**
|
- **Digg**
|
||||||
- **DigitalConcertHall**: [<abbr title="netrc machine"><em>digitalconcerthall</em></abbr>] DigitalConcertHall extractor
|
- **DigitalConcertHall**: [<abbr title="netrc machine"><em>digitalconcerthall</em></abbr>] DigitalConcertHall extractor
|
||||||
@@ -316,7 +339,6 @@
|
|||||||
- **DIYNetwork**
|
- **DIYNetwork**
|
||||||
- **dlive:stream**
|
- **dlive:stream**
|
||||||
- **dlive:vod**
|
- **dlive:vod**
|
||||||
- **DoodStream**
|
|
||||||
- **Dotsub**
|
- **Dotsub**
|
||||||
- **Douyin**
|
- **Douyin**
|
||||||
- **DouyuShow**
|
- **DouyuShow**
|
||||||
@@ -345,6 +367,8 @@
|
|||||||
- **ehftv**
|
- **ehftv**
|
||||||
- **eHow**
|
- **eHow**
|
||||||
- **EinsUndEinsTV**: [<abbr title="netrc machine"><em>1und1tv</em></abbr>]
|
- **EinsUndEinsTV**: [<abbr title="netrc machine"><em>1und1tv</em></abbr>]
|
||||||
|
- **EinsUndEinsTVLive**: [<abbr title="netrc machine"><em>1und1tv</em></abbr>]
|
||||||
|
- **EinsUndEinsTVRecordings**: [<abbr title="netrc machine"><em>1und1tv</em></abbr>]
|
||||||
- **Einthusan**
|
- **Einthusan**
|
||||||
- **eitb.tv**
|
- **eitb.tv**
|
||||||
- **EllenTube**
|
- **EllenTube**
|
||||||
@@ -357,6 +381,7 @@
|
|||||||
- **Engadget**
|
- **Engadget**
|
||||||
- **Epicon**
|
- **Epicon**
|
||||||
- **EpiconSeries**
|
- **EpiconSeries**
|
||||||
|
- **Epoch**
|
||||||
- **Eporner**
|
- **Eporner**
|
||||||
- **EroProfile**: [<abbr title="netrc machine"><em>eroprofile</em></abbr>]
|
- **EroProfile**: [<abbr title="netrc machine"><em>eroprofile</em></abbr>]
|
||||||
- **EroProfile:album**
|
- **EroProfile:album**
|
||||||
@@ -369,14 +394,19 @@
|
|||||||
- **ESPNCricInfo**
|
- **ESPNCricInfo**
|
||||||
- **EsriVideo**
|
- **EsriVideo**
|
||||||
- **Europa**
|
- **Europa**
|
||||||
|
- **EuroParlWebstream**
|
||||||
- **EuropeanTour**
|
- **EuropeanTour**
|
||||||
|
- **Eurosport**
|
||||||
- **EUScreen**
|
- **EUScreen**
|
||||||
- **EWETV**: [<abbr title="netrc machine"><em>ewetv</em></abbr>]
|
- **EWETV**: [<abbr title="netrc machine"><em>ewetv</em></abbr>]
|
||||||
|
- **EWETVLive**: [<abbr title="netrc machine"><em>ewetv</em></abbr>]
|
||||||
|
- **EWETVRecordings**: [<abbr title="netrc machine"><em>ewetv</em></abbr>]
|
||||||
- **ExpoTV**
|
- **ExpoTV**
|
||||||
- **Expressen**
|
- **Expressen**
|
||||||
- **ExtremeTube**
|
- **ExtremeTube**
|
||||||
- **EyedoTV**
|
- **EyedoTV**
|
||||||
- **facebook**: [<abbr title="netrc machine"><em>facebook</em></abbr>]
|
- **facebook**: [<abbr title="netrc machine"><em>facebook</em></abbr>]
|
||||||
|
- **facebook:reel**
|
||||||
- **FacebookPluginsVideo**
|
- **FacebookPluginsVideo**
|
||||||
- **fancode:live**: [<abbr title="netrc machine"><em>fancode</em></abbr>]
|
- **fancode:live**: [<abbr title="netrc machine"><em>fancode</em></abbr>]
|
||||||
- **fancode:vod**: [<abbr title="netrc machine"><em>fancode</em></abbr>]
|
- **fancode:vod**: [<abbr title="netrc machine"><em>fancode</em></abbr>]
|
||||||
@@ -403,6 +433,7 @@
|
|||||||
- **Foxgay**
|
- **Foxgay**
|
||||||
- **foxnews**: Fox News and Fox Business Video
|
- **foxnews**: Fox News and Fox Business Video
|
||||||
- **foxnews:article**
|
- **foxnews:article**
|
||||||
|
- **FoxNewsVideo**
|
||||||
- **FoxSports**
|
- **FoxSports**
|
||||||
- **fptplay**: fptplay.vn
|
- **fptplay**: fptplay.vn
|
||||||
- **FranceCulture**
|
- **FranceCulture**
|
||||||
@@ -444,12 +475,16 @@
|
|||||||
- **gem.cbc.ca**: [<abbr title="netrc machine"><em>cbcgem</em></abbr>]
|
- **gem.cbc.ca**: [<abbr title="netrc machine"><em>cbcgem</em></abbr>]
|
||||||
- **gem.cbc.ca:live**
|
- **gem.cbc.ca:live**
|
||||||
- **gem.cbc.ca:playlist**
|
- **gem.cbc.ca:playlist**
|
||||||
|
- **Genius**
|
||||||
|
- **GeniusLyrics**
|
||||||
- **Gettr**
|
- **Gettr**
|
||||||
- **GettrStreaming**
|
- **GettrStreaming**
|
||||||
- **Gfycat**
|
- **Gfycat**
|
||||||
- **GiantBomb**
|
- **GiantBomb**
|
||||||
- **Giga**
|
- **Giga**
|
||||||
- **GlattvisionTV**: [<abbr title="netrc machine"><em>glattvisiontv</em></abbr>]
|
- **GlattvisionTV**: [<abbr title="netrc machine"><em>glattvisiontv</em></abbr>]
|
||||||
|
- **GlattvisionTVLive**: [<abbr title="netrc machine"><em>glattvisiontv</em></abbr>]
|
||||||
|
- **GlattvisionTVRecordings**: [<abbr title="netrc machine"><em>glattvisiontv</em></abbr>]
|
||||||
- **Glide**: Glide mobile video messages (glide.me)
|
- **Glide**: Glide mobile video messages (glide.me)
|
||||||
- **Globo**: [<abbr title="netrc machine"><em>globo</em></abbr>]
|
- **Globo**: [<abbr title="netrc machine"><em>globo</em></abbr>]
|
||||||
- **GloboArticle**
|
- **GloboArticle**
|
||||||
@@ -462,9 +497,10 @@
|
|||||||
- **Golem**
|
- **Golem**
|
||||||
- **goodgame:stream**
|
- **goodgame:stream**
|
||||||
- **google:podcasts**
|
- **google:podcasts**
|
||||||
- **google:podcasts:feed**
|
- **google:podcasts:feed**
|
||||||
- **GoogleDrive**
|
- **GoogleDrive**
|
||||||
- **GoogleDrive:Folder**
|
- **GoogleDrive:Folder**
|
||||||
|
- **GoPlay**: [<abbr title="netrc machine"><em>goplay</em></abbr>]
|
||||||
- **GoPro**
|
- **GoPro**
|
||||||
- **Goshgay**
|
- **Goshgay**
|
||||||
- **GoToStage**
|
- **GoToStage**
|
||||||
@@ -473,6 +509,7 @@
|
|||||||
- **gronkh:feed**
|
- **gronkh:feed**
|
||||||
- **gronkh:vods**
|
- **gronkh:vods**
|
||||||
- **Groupon**
|
- **Groupon**
|
||||||
|
- **Harpodeon**
|
||||||
- **hbo**
|
- **hbo**
|
||||||
- **HearThisAt**
|
- **HearThisAt**
|
||||||
- **Heise**
|
- **Heise**
|
||||||
@@ -491,9 +528,11 @@
|
|||||||
- **hitbox:live**
|
- **hitbox:live**
|
||||||
- **HitRecord**
|
- **HitRecord**
|
||||||
- **hketv**: 香港教育局教育電視 (HKETV) Educational Television, Hong Kong Educational Bureau
|
- **hketv**: 香港教育局教育電視 (HKETV) Educational Television, Hong Kong Educational Bureau
|
||||||
|
- **Holodex**
|
||||||
- **HotNewHipHop**
|
- **HotNewHipHop**
|
||||||
- **hotstar**
|
- **hotstar**
|
||||||
- **hotstar:playlist**
|
- **hotstar:playlist**
|
||||||
|
- **hotstar:season**
|
||||||
- **hotstar:series**
|
- **hotstar:series**
|
||||||
- **Howcast**
|
- **Howcast**
|
||||||
- **HowStuffWorks**
|
- **HowStuffWorks**
|
||||||
@@ -502,6 +541,7 @@
|
|||||||
- **HRTiPlaylist**: [<abbr title="netrc machine"><em>hrti</em></abbr>]
|
- **HRTiPlaylist**: [<abbr title="netrc machine"><em>hrti</em></abbr>]
|
||||||
- **HSEProduct**
|
- **HSEProduct**
|
||||||
- **HSEShow**
|
- **HSEShow**
|
||||||
|
- **html5**
|
||||||
- **Huajiao**: 花椒直播
|
- **Huajiao**: 花椒直播
|
||||||
- **HuffPost**: Huffington Post
|
- **HuffPost**: Huffington Post
|
||||||
- **Hungama**
|
- **Hungama**
|
||||||
@@ -511,11 +551,14 @@
|
|||||||
- **Hypem**
|
- **Hypem**
|
||||||
- **Hytale**
|
- **Hytale**
|
||||||
- **Icareus**
|
- **Icareus**
|
||||||
|
- **iflix:episode**
|
||||||
|
- **IflixSeries**
|
||||||
- **ign.com**
|
- **ign.com**
|
||||||
- **IGNArticle**
|
- **IGNArticle**
|
||||||
- **IGNVideo**
|
- **IGNVideo**
|
||||||
- **IHeartRadio**
|
- **IHeartRadio**
|
||||||
- **iheartradio:podcast**
|
- **iheartradio:podcast**
|
||||||
|
- **Iltalehti**
|
||||||
- **imdb**: Internet Movie Database trailers
|
- **imdb**: Internet Movie Database trailers
|
||||||
- **imdb:list**: Internet Movie Database lists
|
- **imdb:list**: Internet Movie Database lists
|
||||||
- **Imgur**
|
- **Imgur**
|
||||||
@@ -538,6 +581,9 @@
|
|||||||
- **iq.com**: International version of iQiyi
|
- **iq.com**: International version of iQiyi
|
||||||
- **iq.com:album**
|
- **iq.com:album**
|
||||||
- **iqiyi**: [<abbr title="netrc machine"><em>iqiyi</em></abbr>] 爱奇艺
|
- **iqiyi**: [<abbr title="netrc machine"><em>iqiyi</em></abbr>] 爱奇艺
|
||||||
|
- **IslamChannel**
|
||||||
|
- **IslamChannelSeries**
|
||||||
|
- **IsraelNationalNews**
|
||||||
- **ITProTV**
|
- **ITProTV**
|
||||||
- **ITProTVCourse**
|
- **ITProTVCourse**
|
||||||
- **ITTF**
|
- **ITTF**
|
||||||
@@ -561,6 +607,8 @@
|
|||||||
- **JWPlatform**
|
- **JWPlatform**
|
||||||
- **Kakao**
|
- **Kakao**
|
||||||
- **Kaltura**
|
- **Kaltura**
|
||||||
|
- **Kanal2**
|
||||||
|
- **KankaNews**
|
||||||
- **Karaoketv**
|
- **Karaoketv**
|
||||||
- **KarriereVideos**
|
- **KarriereVideos**
|
||||||
- **Katsomo**
|
- **Katsomo**
|
||||||
@@ -569,10 +617,13 @@
|
|||||||
- **Ketnet**
|
- **Ketnet**
|
||||||
- **khanacademy**
|
- **khanacademy**
|
||||||
- **khanacademy:unit**
|
- **khanacademy:unit**
|
||||||
|
- **Kick**
|
||||||
- **Kicker**
|
- **Kicker**
|
||||||
- **KickStarter**
|
- **KickStarter**
|
||||||
|
- **KickVOD**
|
||||||
- **KinjaEmbed**
|
- **KinjaEmbed**
|
||||||
- **KinoPoisk**
|
- **KinoPoisk**
|
||||||
|
- **KompasVideo**
|
||||||
- **KonserthusetPlay**
|
- **KonserthusetPlay**
|
||||||
- **Koo**
|
- **Koo**
|
||||||
- **KrasView**: Красвью
|
- **KrasView**: Красвью
|
||||||
@@ -586,7 +637,7 @@
|
|||||||
- **kuwo:singer**: 酷我音乐 - 歌手
|
- **kuwo:singer**: 酷我音乐 - 歌手
|
||||||
- **kuwo:song**: 酷我音乐
|
- **kuwo:song**: 酷我音乐
|
||||||
- **la7.it**
|
- **la7.it**
|
||||||
- **la7.it:pod:episode**
|
- **la7.it:pod:episode**
|
||||||
- **la7.it:podcast**
|
- **la7.it:podcast**
|
||||||
- **laola1tv**
|
- **laola1tv**
|
||||||
- **laola1tv:embed**
|
- **laola1tv:embed**
|
||||||
@@ -620,9 +671,10 @@
|
|||||||
- **LineLiveChannel**
|
- **LineLiveChannel**
|
||||||
- **LinkedIn**: [<abbr title="netrc machine"><em>linkedin</em></abbr>]
|
- **LinkedIn**: [<abbr title="netrc machine"><em>linkedin</em></abbr>]
|
||||||
- **linkedin:learning**: [<abbr title="netrc machine"><em>linkedin</em></abbr>]
|
- **linkedin:learning**: [<abbr title="netrc machine"><em>linkedin</em></abbr>]
|
||||||
- **linkedin:learning:course**: [<abbr title="netrc machine"><em>linkedin</em></abbr>]
|
- **linkedin:learning:course**: [<abbr title="netrc machine"><em>linkedin</em></abbr>]
|
||||||
- **LinuxAcademy**: [<abbr title="netrc machine"><em>linuxacademy</em></abbr>]
|
- **LinuxAcademy**: [<abbr title="netrc machine"><em>linuxacademy</em></abbr>]
|
||||||
- **Liputan6**
|
- **Liputan6**
|
||||||
|
- **ListenNotes**
|
||||||
- **LiTV**
|
- **LiTV**
|
||||||
- **LiveJournal**
|
- **LiveJournal**
|
||||||
- **livestream**
|
- **livestream**
|
||||||
@@ -641,7 +693,7 @@
|
|||||||
- **MagentaMusik360**
|
- **MagentaMusik360**
|
||||||
- **mailru**: Видео@Mail.Ru
|
- **mailru**: Видео@Mail.Ru
|
||||||
- **mailru:music**: Музыка@Mail.Ru
|
- **mailru:music**: Музыка@Mail.Ru
|
||||||
- **mailru:music:search**: Музыка@Mail.Ru
|
- **mailru:music:search**: Музыка@Mail.Ru
|
||||||
- **MainStreaming**: MainStreaming Player
|
- **MainStreaming**: MainStreaming Player
|
||||||
- **MallTV**
|
- **MallTV**
|
||||||
- **mangomolo:live**
|
- **mangomolo:live**
|
||||||
@@ -669,6 +721,8 @@
|
|||||||
- **Mediasite**
|
- **Mediasite**
|
||||||
- **MediasiteCatalog**
|
- **MediasiteCatalog**
|
||||||
- **MediasiteNamedCatalog**
|
- **MediasiteNamedCatalog**
|
||||||
|
- **MediaStream**
|
||||||
|
- **MediaWorksNZVOD**
|
||||||
- **Medici**
|
- **Medici**
|
||||||
- **megaphone.fm**: megaphone.fm embedded players
|
- **megaphone.fm**: megaphone.fm embedded players
|
||||||
- **megatvcom**: megatv.com videos
|
- **megatvcom**: megatv.com videos
|
||||||
@@ -681,10 +735,11 @@
|
|||||||
- **mewatch**
|
- **mewatch**
|
||||||
- **Mgoon**
|
- **Mgoon**
|
||||||
- **MiaoPai**
|
- **MiaoPai**
|
||||||
|
- **MicrosoftEmbed**
|
||||||
- **microsoftstream**: Microsoft Stream
|
- **microsoftstream**: Microsoft Stream
|
||||||
- **mildom**: Record ongoing live by specific user in Mildom
|
- **mildom**: Record ongoing live by specific user in Mildom
|
||||||
- **mildom:clip**: Clip in Mildom
|
- **mildom:clip**: Clip in Mildom
|
||||||
- **mildom:user:vod**: Download all VODs from specific user in Mildom
|
- **mildom:user:vod**: Download all VODs from specific user in Mildom
|
||||||
- **mildom:vod**: VOD in Mildom
|
- **mildom:vod**: VOD in Mildom
|
||||||
- **minds**
|
- **minds**
|
||||||
- **minds:channel**
|
- **minds:channel**
|
||||||
@@ -702,10 +757,14 @@
|
|||||||
- **mixcloud:playlist**
|
- **mixcloud:playlist**
|
||||||
- **mixcloud:user**
|
- **mixcloud:user**
|
||||||
- **MLB**
|
- **MLB**
|
||||||
|
- **MLBArticle**
|
||||||
|
- **MLBTV**: [<abbr title="netrc machine"><em>mlb</em></abbr>]
|
||||||
- **MLBVideo**
|
- **MLBVideo**
|
||||||
- **MLSSoccer**
|
- **MLSSoccer**
|
||||||
- **Mnet**
|
- **Mnet**
|
||||||
- **MNetTV**: [<abbr title="netrc machine"><em>mnettv</em></abbr>]
|
- **MNetTV**: [<abbr title="netrc machine"><em>mnettv</em></abbr>]
|
||||||
|
- **MNetTVLive**: [<abbr title="netrc machine"><em>mnettv</em></abbr>]
|
||||||
|
- **MNetTVRecordings**: [<abbr title="netrc machine"><em>mnettv</em></abbr>]
|
||||||
- **MochaVideo**
|
- **MochaVideo**
|
||||||
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
||||||
- **Mofosex**
|
- **Mofosex**
|
||||||
@@ -715,9 +774,12 @@
|
|||||||
- **Motherless**
|
- **Motherless**
|
||||||
- **MotherlessGroup**
|
- **MotherlessGroup**
|
||||||
- **Motorsport**: motorsport.com
|
- **Motorsport**: motorsport.com
|
||||||
|
- **MotorTrend**
|
||||||
|
- **MotorTrendOnDemand**
|
||||||
- **MovieClips**
|
- **MovieClips**
|
||||||
- **MovieFap**
|
- **MovieFap**
|
||||||
- **Moviepilot**
|
- **Moviepilot**
|
||||||
|
- **MoviewPlay**
|
||||||
- **Moviezine**
|
- **Moviezine**
|
||||||
- **MovingImage**
|
- **MovingImage**
|
||||||
- **MSN**
|
- **MSN**
|
||||||
@@ -764,7 +826,7 @@
|
|||||||
- **navernow**
|
- **navernow**
|
||||||
- **NBA**
|
- **NBA**
|
||||||
- **nba:watch**
|
- **nba:watch**
|
||||||
- **nba:watch:collection**
|
- **nba:watch:collection**
|
||||||
- **NBAChannel**
|
- **NBAChannel**
|
||||||
- **NBAEmbed**
|
- **NBAEmbed**
|
||||||
- **NBAWatchEmbed**
|
- **NBAWatchEmbed**
|
||||||
@@ -775,9 +837,10 @@
|
|||||||
- **NBCSports**
|
- **NBCSports**
|
||||||
- **NBCSportsStream**
|
- **NBCSportsStream**
|
||||||
- **NBCSportsVPlayer**
|
- **NBCSportsVPlayer**
|
||||||
|
- **NBCStations**
|
||||||
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
||||||
- **ndr:embed**
|
- **ndr:embed**
|
||||||
- **ndr:embed:base**
|
- **ndr:embed:base**
|
||||||
- **NDTV**
|
- **NDTV**
|
||||||
- **Nebula**: [<abbr title="netrc machine"><em>watchnebula</em></abbr>]
|
- **Nebula**: [<abbr title="netrc machine"><em>watchnebula</em></abbr>]
|
||||||
- **nebula:channel**: [<abbr title="netrc machine"><em>watchnebula</em></abbr>]
|
- **nebula:channel**: [<abbr title="netrc machine"><em>watchnebula</em></abbr>]
|
||||||
@@ -790,13 +853,17 @@
|
|||||||
- **netease:program**: 网易云音乐 - 电台节目
|
- **netease:program**: 网易云音乐 - 电台节目
|
||||||
- **netease:singer**: 网易云音乐 - 歌手
|
- **netease:singer**: 网易云音乐 - 歌手
|
||||||
- **netease:song**: 网易云音乐
|
- **netease:song**: 网易云音乐
|
||||||
- **NetPlus**: [<abbr title="netrc machine"><em>netplus</em></abbr>]
|
- **NetPlusTV**: [<abbr title="netrc machine"><em>netplus</em></abbr>]
|
||||||
|
- **NetPlusTVLive**: [<abbr title="netrc machine"><em>netplus</em></abbr>]
|
||||||
|
- **NetPlusTVRecordings**: [<abbr title="netrc machine"><em>netplus</em></abbr>]
|
||||||
- **Netverse**
|
- **Netverse**
|
||||||
- **NetversePlaylist**
|
- **NetversePlaylist**
|
||||||
|
- **NetverseSearch**: "netsearch:" prefix
|
||||||
- **Netzkino**
|
- **Netzkino**
|
||||||
- **Newgrounds**
|
- **Newgrounds**
|
||||||
- **Newgrounds:playlist**
|
- **Newgrounds:playlist**
|
||||||
- **Newgrounds:user**
|
- **Newgrounds:user**
|
||||||
|
- **NewsPicks**
|
||||||
- **Newstube**
|
- **Newstube**
|
||||||
- **Newsy**
|
- **Newsy**
|
||||||
- **NextMedia**: 蘋果日報
|
- **NextMedia**: 蘋果日報
|
||||||
@@ -806,8 +873,8 @@
|
|||||||
- **NexxEmbed**
|
- **NexxEmbed**
|
||||||
- **NFB**
|
- **NFB**
|
||||||
- **NFHSNetwork**
|
- **NFHSNetwork**
|
||||||
- **nfl.com**: (**Currently broken**)
|
- **nfl.com**
|
||||||
- **nfl.com:article**: (**Currently broken**)
|
- **nfl.com:article**
|
||||||
- **NhkForSchoolBangumi**
|
- **NhkForSchoolBangumi**
|
||||||
- **NhkForSchoolProgramList**
|
- **NhkForSchoolProgramList**
|
||||||
- **NhkForSchoolSubject**: Portal page for each school subjects, like Japanese (kokugo, 国語) or math (sansuu/suugaku or 算数・数学)
|
- **NhkForSchoolSubject**: Portal page for each school subjects, like Japanese (kokugo, 国語) or math (sansuu/suugaku or 算数・数学)
|
||||||
@@ -826,7 +893,7 @@
|
|||||||
- **niconico:tag**: NicoNico video tag URLs
|
- **niconico:tag**: NicoNico video tag URLs
|
||||||
- **NiconicoUser**
|
- **NiconicoUser**
|
||||||
- **nicovideo:search**: Nico video search; "nicosearch:" prefix
|
- **nicovideo:search**: Nico video search; "nicosearch:" prefix
|
||||||
- **nicovideo:search:date**: Nico video search, newest first; "nicosearchdate:" prefix
|
- **nicovideo:search:date**: Nico video search, newest first; "nicosearchdate:" prefix
|
||||||
- **nicovideo:search_url**: Nico video search URLs
|
- **nicovideo:search_url**: Nico video search URLs
|
||||||
- **Nintendo**
|
- **Nintendo**
|
||||||
- **Nitter**
|
- **Nitter**
|
||||||
@@ -834,10 +901,12 @@
|
|||||||
- **njoy:embed**
|
- **njoy:embed**
|
||||||
- **NJPWWorld**: [<abbr title="netrc machine"><em>njpwworld</em></abbr>] 新日本プロレスワールド
|
- **NJPWWorld**: [<abbr title="netrc machine"><em>njpwworld</em></abbr>] 新日本プロレスワールド
|
||||||
- **NobelPrize**
|
- **NobelPrize**
|
||||||
|
- **NoicePodcast**
|
||||||
- **NonkTube**
|
- **NonkTube**
|
||||||
- **NoodleMagazine**
|
- **NoodleMagazine**
|
||||||
- **Noovo**
|
- **Noovo**
|
||||||
- **Normalboots**
|
- **Normalboots**
|
||||||
|
- **NOSNLArticle**
|
||||||
- **NosVideo**
|
- **NosVideo**
|
||||||
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
||||||
- **NovaEmbed**
|
- **NovaEmbed**
|
||||||
@@ -849,7 +918,7 @@
|
|||||||
- **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
- **npo.nl:live**
|
- **npo.nl:live**
|
||||||
- **npo.nl:radio**
|
- **npo.nl:radio**
|
||||||
- **npo.nl:radio:fragment**
|
- **npo.nl:radio:fragment**
|
||||||
- **Npr**
|
- **Npr**
|
||||||
- **NRK**
|
- **NRK**
|
||||||
- **NRKPlaylist**
|
- **NRKPlaylist**
|
||||||
@@ -872,11 +941,14 @@
|
|||||||
- **ocw.mit.edu**
|
- **ocw.mit.edu**
|
||||||
- **OdaTV**
|
- **OdaTV**
|
||||||
- **Odnoklassniki**
|
- **Odnoklassniki**
|
||||||
|
- **OfTV**
|
||||||
|
- **OfTVPlaylist**
|
||||||
- **OktoberfestTV**
|
- **OktoberfestTV**
|
||||||
- **OlympicsReplay**
|
- **OlympicsReplay**
|
||||||
- **on24**: ON24
|
- **on24**: ON24
|
||||||
- **OnDemandKorea**
|
- **OnDemandKorea**
|
||||||
- **OneFootball**
|
- **OneFootball**
|
||||||
|
- **OnePlacePodcast**
|
||||||
- **onet.pl**
|
- **onet.pl**
|
||||||
- **onet.tv**
|
- **onet.tv**
|
||||||
- **onet.tv:channel**
|
- **onet.tv:channel**
|
||||||
@@ -890,22 +962,13 @@
|
|||||||
- **openrec:capture**
|
- **openrec:capture**
|
||||||
- **openrec:movie**
|
- **openrec:movie**
|
||||||
- **OraTV**
|
- **OraTV**
|
||||||
- **orf:burgenland**: Radio Burgenland
|
- **orf:fm4:story**: fm4.orf.at stories
|
||||||
- **orf:fm4**: radio FM4
|
|
||||||
- **orf:fm4:story**: fm4.orf.at stories
|
|
||||||
- **orf:iptv**: iptv.ORF.at
|
- **orf:iptv**: iptv.ORF.at
|
||||||
- **orf:kaernten**: Radio Kärnten
|
- **orf:radio**
|
||||||
- **orf:noe**: Radio Niederösterreich
|
|
||||||
- **orf:oberoesterreich**: Radio Oberösterreich
|
|
||||||
- **orf:oe1**: Radio Österreich 1
|
|
||||||
- **orf:oe3**: Radio Österreich 3
|
|
||||||
- **orf:salzburg**: Radio Salzburg
|
|
||||||
- **orf:steiermark**: Radio Steiermark
|
|
||||||
- **orf:tirol**: Radio Tirol
|
|
||||||
- **orf:tvthek**: ORF TVthek
|
- **orf:tvthek**: ORF TVthek
|
||||||
- **orf:vorarlberg**: Radio Vorarlberg
|
|
||||||
- **orf:wien**: Radio Wien
|
|
||||||
- **OsnatelTV**: [<abbr title="netrc machine"><em>osnateltv</em></abbr>]
|
- **OsnatelTV**: [<abbr title="netrc machine"><em>osnateltv</em></abbr>]
|
||||||
|
- **OsnatelTVLive**: [<abbr title="netrc machine"><em>osnateltv</em></abbr>]
|
||||||
|
- **OsnatelTVRecordings**: [<abbr title="netrc machine"><em>osnateltv</em></abbr>]
|
||||||
- **OutsideTV**
|
- **OutsideTV**
|
||||||
- **PacktPub**: [<abbr title="netrc machine"><em>packtpub</em></abbr>]
|
- **PacktPub**: [<abbr title="netrc machine"><em>packtpub</em></abbr>]
|
||||||
- **PacktPubCourse**
|
- **PacktPubCourse**
|
||||||
@@ -919,10 +982,11 @@
|
|||||||
- **ParamountNetwork**
|
- **ParamountNetwork**
|
||||||
- **ParamountPlus**
|
- **ParamountPlus**
|
||||||
- **ParamountPlusSeries**
|
- **ParamountPlusSeries**
|
||||||
|
- **Parler**: Posts on parler.com
|
||||||
- **parliamentlive.tv**: UK parliament videos
|
- **parliamentlive.tv**: UK parliament videos
|
||||||
- **Parlview**
|
- **Parlview**
|
||||||
- **Patreon**
|
- **Patreon**
|
||||||
- **PatreonUser**
|
- **PatreonCampaign**
|
||||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||||
- **PearVideo**
|
- **PearVideo**
|
||||||
- **PeekVids**
|
- **PeekVids**
|
||||||
@@ -946,7 +1010,7 @@
|
|||||||
- **Pinterest**
|
- **Pinterest**
|
||||||
- **PinterestCollection**
|
- **PinterestCollection**
|
||||||
- **pixiv:sketch**
|
- **pixiv:sketch**
|
||||||
- **pixiv:sketch:user**
|
- **pixiv:sketch:user**
|
||||||
- **Pladform**
|
- **Pladform**
|
||||||
- **PlanetMarathi**
|
- **PlanetMarathi**
|
||||||
- **Platzi**: [<abbr title="netrc machine"><em>platzi</em></abbr>]
|
- **Platzi**: [<abbr title="netrc machine"><em>platzi</em></abbr>]
|
||||||
@@ -964,6 +1028,8 @@
|
|||||||
- **pluralsight**: [<abbr title="netrc machine"><em>pluralsight</em></abbr>]
|
- **pluralsight**: [<abbr title="netrc machine"><em>pluralsight</em></abbr>]
|
||||||
- **pluralsight:course**
|
- **pluralsight:course**
|
||||||
- **PlutoTV**
|
- **PlutoTV**
|
||||||
|
- **PodbayFM**
|
||||||
|
- **PodbayFMChannel**
|
||||||
- **Podchaser**
|
- **Podchaser**
|
||||||
- **podomatic**
|
- **podomatic**
|
||||||
- **Pokemon**
|
- **Pokemon**
|
||||||
@@ -972,11 +1038,13 @@
|
|||||||
- **PokerGoCollection**: [<abbr title="netrc machine"><em>pokergo</em></abbr>]
|
- **PokerGoCollection**: [<abbr title="netrc machine"><em>pokergo</em></abbr>]
|
||||||
- **PolsatGo**
|
- **PolsatGo**
|
||||||
- **PolskieRadio**
|
- **PolskieRadio**
|
||||||
|
- **polskieradio:audition**
|
||||||
|
- **polskieradio:category**
|
||||||
- **polskieradio:kierowcow**
|
- **polskieradio:kierowcow**
|
||||||
|
- **polskieradio:legacy**
|
||||||
- **polskieradio:player**
|
- **polskieradio:player**
|
||||||
- **polskieradio:podcast**
|
- **polskieradio:podcast**
|
||||||
- **polskieradio:podcast:list**
|
- **polskieradio:podcast:list**
|
||||||
- **PolskieRadioCategory**
|
|
||||||
- **Popcorntimes**
|
- **Popcorntimes**
|
||||||
- **PopcornTV**
|
- **PopcornTV**
|
||||||
- **PornCom**
|
- **PornCom**
|
||||||
@@ -993,6 +1061,7 @@
|
|||||||
- **PornoVoisines**
|
- **PornoVoisines**
|
||||||
- **PornoXO**
|
- **PornoXO**
|
||||||
- **PornTube**
|
- **PornTube**
|
||||||
|
- **PrankCast**
|
||||||
- **PremiershipRugby**
|
- **PremiershipRugby**
|
||||||
- **PressTV**
|
- **PressTV**
|
||||||
- **ProjectVeritas**
|
- **ProjectVeritas**
|
||||||
@@ -1006,12 +1075,15 @@
|
|||||||
- **puhutv:serie**
|
- **puhutv:serie**
|
||||||
- **Puls4**
|
- **Puls4**
|
||||||
- **Pyvideo**
|
- **Pyvideo**
|
||||||
|
- **QingTing**
|
||||||
- **qqmusic**: QQ音乐
|
- **qqmusic**: QQ音乐
|
||||||
- **qqmusic:album**: QQ音乐 - 专辑
|
- **qqmusic:album**: QQ音乐 - 专辑
|
||||||
- **qqmusic:playlist**: QQ音乐 - 歌单
|
- **qqmusic:playlist**: QQ音乐 - 歌单
|
||||||
- **qqmusic:singer**: QQ音乐 - 歌手
|
- **qqmusic:singer**: QQ音乐 - 歌手
|
||||||
- **qqmusic:toplist**: QQ音乐 - 排行榜
|
- **qqmusic:toplist**: QQ音乐 - 排行榜
|
||||||
- **QuantumTV**: [<abbr title="netrc machine"><em>quantumtv</em></abbr>]
|
- **QuantumTV**: [<abbr title="netrc machine"><em>quantumtv</em></abbr>]
|
||||||
|
- **QuantumTVLive**: [<abbr title="netrc machine"><em>quantumtv</em></abbr>]
|
||||||
|
- **QuantumTVRecordings**: [<abbr title="netrc machine"><em>quantumtv</em></abbr>]
|
||||||
- **Qub**
|
- **Qub**
|
||||||
- **R7**
|
- **R7**
|
||||||
- **R7Article**
|
- **R7Article**
|
||||||
@@ -1030,12 +1102,14 @@
|
|||||||
- **radlive:channel**
|
- **radlive:channel**
|
||||||
- **radlive:season**
|
- **radlive:season**
|
||||||
- **Rai**
|
- **Rai**
|
||||||
|
- **RaiNews**
|
||||||
- **RaiPlay**
|
- **RaiPlay**
|
||||||
- **RaiPlayLive**
|
- **RaiPlayLive**
|
||||||
- **RaiPlayPlaylist**
|
- **RaiPlayPlaylist**
|
||||||
- **RaiPlaySound**
|
- **RaiPlaySound**
|
||||||
- **RaiPlaySoundLive**
|
- **RaiPlaySoundLive**
|
||||||
- **RaiPlaySoundPlaylist**
|
- **RaiPlaySoundPlaylist**
|
||||||
|
- **RaiSudtirol**
|
||||||
- **RayWenderlich**
|
- **RayWenderlich**
|
||||||
- **RayWenderlichCourse**
|
- **RayWenderlichCourse**
|
||||||
- **RBMARadio**
|
- **RBMARadio**
|
||||||
@@ -1072,7 +1146,7 @@
|
|||||||
- **RoosterTeethSeries**: [<abbr title="netrc machine"><em>roosterteeth</em></abbr>]
|
- **RoosterTeethSeries**: [<abbr title="netrc machine"><em>roosterteeth</em></abbr>]
|
||||||
- **RottenTomatoes**
|
- **RottenTomatoes**
|
||||||
- **Rozhlas**
|
- **Rozhlas**
|
||||||
- **RTBF**
|
- **RTBF**: [<abbr title="netrc machine"><em>rtbf</em></abbr>]
|
||||||
- **RTDocumentry**
|
- **RTDocumentry**
|
||||||
- **RTDocumentryPlaylist**
|
- **RTDocumentryPlaylist**
|
||||||
- **rte**: Raidió Teilifís Éireann TV
|
- **rte**: Raidió Teilifís Éireann TV
|
||||||
@@ -1082,7 +1156,7 @@
|
|||||||
- **rtl.nl**: rtl.nl and rtlxl.nl
|
- **rtl.nl**: rtl.nl and rtlxl.nl
|
||||||
- **rtl2**
|
- **rtl2**
|
||||||
- **rtl2:you**
|
- **rtl2:you**
|
||||||
- **rtl2:you:series**
|
- **rtl2:you:series**
|
||||||
- **RTLLuLive**
|
- **RTLLuLive**
|
||||||
- **RTLLuRadio**
|
- **RTLLuRadio**
|
||||||
- **RTNews**
|
- **RTNews**
|
||||||
@@ -1099,6 +1173,7 @@
|
|||||||
- **rtvslo.si**
|
- **rtvslo.si**
|
||||||
- **RUHD**
|
- **RUHD**
|
||||||
- **Rule34Video**
|
- **Rule34Video**
|
||||||
|
- **Rumble**
|
||||||
- **RumbleChannel**
|
- **RumbleChannel**
|
||||||
- **RumbleEmbed**
|
- **RumbleEmbed**
|
||||||
- **Ruptly**
|
- **Ruptly**
|
||||||
@@ -1118,15 +1193,23 @@
|
|||||||
- **safari:course**: [<abbr title="netrc machine"><em>safari</em></abbr>] safaribooksonline.com online courses
|
- **safari:course**: [<abbr title="netrc machine"><em>safari</em></abbr>] safaribooksonline.com online courses
|
||||||
- **Saitosan**
|
- **Saitosan**
|
||||||
- **SAKTV**: [<abbr title="netrc machine"><em>saktv</em></abbr>]
|
- **SAKTV**: [<abbr title="netrc machine"><em>saktv</em></abbr>]
|
||||||
|
- **SAKTVLive**: [<abbr title="netrc machine"><em>saktv</em></abbr>]
|
||||||
|
- **SAKTVRecordings**: [<abbr title="netrc machine"><em>saktv</em></abbr>]
|
||||||
- **SaltTV**: [<abbr title="netrc machine"><em>salttv</em></abbr>]
|
- **SaltTV**: [<abbr title="netrc machine"><em>salttv</em></abbr>]
|
||||||
|
- **SaltTVLive**: [<abbr title="netrc machine"><em>salttv</em></abbr>]
|
||||||
|
- **SaltTVRecordings**: [<abbr title="netrc machine"><em>salttv</em></abbr>]
|
||||||
- **SampleFocus**
|
- **SampleFocus**
|
||||||
|
- **SamplePlugin**: (**Currently broken**)
|
||||||
|
- **Sangiin**: 参議院インターネット審議中継 (archive)
|
||||||
- **Sapo**: SAPO Vídeos
|
- **Sapo**: SAPO Vídeos
|
||||||
- **savefrom.net**
|
- **savefrom.net**
|
||||||
- **SBS**: sbs.com.au
|
- **SBS**: sbs.com.au
|
||||||
- **schooltv**
|
- **schooltv**
|
||||||
- **ScienceChannel**
|
- **ScienceChannel**
|
||||||
- **screen.yahoo:search**: Yahoo screen search; "yvsearch:" prefix
|
- **screen.yahoo:search**: Yahoo screen search; "yvsearch:" prefix
|
||||||
|
- **Screen9**
|
||||||
- **Screencast**
|
- **Screencast**
|
||||||
|
- **Screencastify**
|
||||||
- **ScreencastOMatic**
|
- **ScreencastOMatic**
|
||||||
- **ScrippsNetworks**
|
- **ScrippsNetworks**
|
||||||
- **scrippsnetworks:watch**
|
- **scrippsnetworks:watch**
|
||||||
@@ -1144,8 +1227,13 @@
|
|||||||
- **Shahid**: [<abbr title="netrc machine"><em>shahid</em></abbr>]
|
- **Shahid**: [<abbr title="netrc machine"><em>shahid</em></abbr>]
|
||||||
- **ShahidShow**
|
- **ShahidShow**
|
||||||
- **Shared**: shared.sx
|
- **Shared**: shared.sx
|
||||||
|
- **ShareVideosEmbed**
|
||||||
- **ShemarooMe**
|
- **ShemarooMe**
|
||||||
- **ShowRoomLive**
|
- **ShowRoomLive**
|
||||||
|
- **ShugiinItvLive**: 衆議院インターネット審議中継
|
||||||
|
- **ShugiinItvLiveRoom**: 衆議院インターネット審議中継 (中継)
|
||||||
|
- **ShugiinItvVod**: 衆議院インターネット審議中継 (ビデオライブラリ)
|
||||||
|
- **SibnetEmbed**
|
||||||
- **simplecast**
|
- **simplecast**
|
||||||
- **simplecast:episode**
|
- **simplecast:episode**
|
||||||
- **simplecast:podcast**
|
- **simplecast:podcast**
|
||||||
@@ -1153,10 +1241,9 @@
|
|||||||
- **Skeb**
|
- **Skeb**
|
||||||
- **sky.it**
|
- **sky.it**
|
||||||
- **sky:news**
|
- **sky:news**
|
||||||
- **sky:news:story**
|
- **sky:news:story**
|
||||||
- **sky:sports**
|
- **sky:sports**
|
||||||
- **sky:sports:news**
|
- **sky:sports:news**
|
||||||
- **skyacademy.it**
|
|
||||||
- **SkylineWebcams**
|
- **SkylineWebcams**
|
||||||
- **skynewsarabia:article**
|
- **skynewsarabia:article**
|
||||||
- **skynewsarabia:video**
|
- **skynewsarabia:video**
|
||||||
@@ -1164,6 +1251,7 @@
|
|||||||
- **Slideshare**
|
- **Slideshare**
|
||||||
- **SlidesLive**
|
- **SlidesLive**
|
||||||
- **Slutload**
|
- **Slutload**
|
||||||
|
- **Smotrim**
|
||||||
- **Snotr**
|
- **Snotr**
|
||||||
- **Sohu**
|
- **Sohu**
|
||||||
- **SonyLIV**: [<abbr title="netrc machine"><em>sonyliv</em></abbr>]
|
- **SonyLIV**: [<abbr title="netrc machine"><em>sonyliv</em></abbr>]
|
||||||
@@ -1175,6 +1263,7 @@
|
|||||||
- **soundcloud:set**: [<abbr title="netrc machine"><em>soundcloud</em></abbr>]
|
- **soundcloud:set**: [<abbr title="netrc machine"><em>soundcloud</em></abbr>]
|
||||||
- **soundcloud:trackstation**: [<abbr title="netrc machine"><em>soundcloud</em></abbr>]
|
- **soundcloud:trackstation**: [<abbr title="netrc machine"><em>soundcloud</em></abbr>]
|
||||||
- **soundcloud:user**: [<abbr title="netrc machine"><em>soundcloud</em></abbr>]
|
- **soundcloud:user**: [<abbr title="netrc machine"><em>soundcloud</em></abbr>]
|
||||||
|
- **soundcloud:user:permalink**: [<abbr title="netrc machine"><em>soundcloud</em></abbr>]
|
||||||
- **SoundcloudEmbed**
|
- **SoundcloudEmbed**
|
||||||
- **soundgasm**
|
- **soundgasm**
|
||||||
- **soundgasm:profile**
|
- **soundgasm:profile**
|
||||||
@@ -1193,8 +1282,8 @@
|
|||||||
- **Sport5**
|
- **Sport5**
|
||||||
- **SportBox**
|
- **SportBox**
|
||||||
- **SportDeutschland**
|
- **SportDeutschland**
|
||||||
- **spotify**: Spotify episodes
|
- **spotify**: Spotify episodes (**Currently broken**)
|
||||||
- **spotify:show**: Spotify shows
|
- **spotify:show**: Spotify shows (**Currently broken**)
|
||||||
- **Spreaker**
|
- **Spreaker**
|
||||||
- **SpreakerPage**
|
- **SpreakerPage**
|
||||||
- **SpreakerShow**
|
- **SpreakerShow**
|
||||||
@@ -1231,6 +1320,7 @@
|
|||||||
- **SVTPage**
|
- **SVTPage**
|
||||||
- **SVTPlay**: SVT Play and Öppet arkiv
|
- **SVTPlay**: SVT Play and Öppet arkiv
|
||||||
- **SVTSeries**
|
- **SVTSeries**
|
||||||
|
- **SwearnetEpisode**
|
||||||
- **SWRMediathek**
|
- **SWRMediathek**
|
||||||
- **Syfy**
|
- **Syfy**
|
||||||
- **SYVDK**
|
- **SYVDK**
|
||||||
@@ -1243,7 +1333,7 @@
|
|||||||
- **Teachable**: [<abbr title="netrc machine"><em>teachable</em></abbr>]
|
- **Teachable**: [<abbr title="netrc machine"><em>teachable</em></abbr>]
|
||||||
- **TeachableCourse**: [<abbr title="netrc machine"><em>teachable</em></abbr>]
|
- **TeachableCourse**: [<abbr title="netrc machine"><em>teachable</em></abbr>]
|
||||||
- **teachertube**: teachertube.com videos
|
- **teachertube**: teachertube.com videos
|
||||||
- **teachertube:user:collection**: teachertube.com user and collection videos
|
- **teachertube:user:collection**: teachertube.com user and collection videos
|
||||||
- **TeachingChannel**
|
- **TeachingChannel**
|
||||||
- **Teamcoco**
|
- **Teamcoco**
|
||||||
- **TeamTreeHouse**: [<abbr title="netrc machine"><em>teamtreehouse</em></abbr>]
|
- **TeamTreeHouse**: [<abbr title="netrc machine"><em>teamtreehouse</em></abbr>]
|
||||||
@@ -1268,6 +1358,7 @@
|
|||||||
- **TeleQuebecVideo**
|
- **TeleQuebecVideo**
|
||||||
- **TeleTask**
|
- **TeleTask**
|
||||||
- **Telewebion**
|
- **Telewebion**
|
||||||
|
- **Tempo**
|
||||||
- **TennisTV**: [<abbr title="netrc machine"><em>tennistv</em></abbr>]
|
- **TennisTV**: [<abbr title="netrc machine"><em>tennistv</em></abbr>]
|
||||||
- **TenPlay**: [<abbr title="netrc machine"><em>10play</em></abbr>]
|
- **TenPlay**: [<abbr title="netrc machine"><em>10play</em></abbr>]
|
||||||
- **TF1**
|
- **TF1**
|
||||||
@@ -1287,10 +1378,10 @@
|
|||||||
- **ThreeSpeak**
|
- **ThreeSpeak**
|
||||||
- **ThreeSpeakUser**
|
- **ThreeSpeakUser**
|
||||||
- **TikTok**
|
- **TikTok**
|
||||||
- **tiktok:effect**
|
- **tiktok:effect**: (**Currently broken**)
|
||||||
- **tiktok:sound**
|
- **tiktok:sound**: (**Currently broken**)
|
||||||
- **tiktok:tag**
|
- **tiktok:tag**: (**Currently broken**)
|
||||||
- **tiktok:user**
|
- **tiktok:user**: (**Currently broken**)
|
||||||
- **tinypic**: tinypic.com videos
|
- **tinypic**: tinypic.com videos
|
||||||
- **TLC**
|
- **TLC**
|
||||||
- **TMZ**
|
- **TMZ**
|
||||||
@@ -1300,19 +1391,25 @@
|
|||||||
- **toggo**
|
- **toggo**
|
||||||
- **Tokentube**
|
- **Tokentube**
|
||||||
- **Tokentube:channel**
|
- **Tokentube:channel**
|
||||||
|
- **tokfm:audition**
|
||||||
|
- **tokfm:podcast**
|
||||||
- **ToonGoggles**
|
- **ToonGoggles**
|
||||||
- **tou.tv**: [<abbr title="netrc machine"><em>toutv</em></abbr>]
|
- **tou.tv**: [<abbr title="netrc machine"><em>toutv</em></abbr>]
|
||||||
- **Toypics**: Toypics video
|
- **Toypics**: Toypics video
|
||||||
- **ToypicsUser**: Toypics user profile
|
- **ToypicsUser**: Toypics user profile
|
||||||
- **TrailerAddict**: (**Currently broken**)
|
- **TrailerAddict**: (**Currently broken**)
|
||||||
- **TravelChannel**
|
- **TravelChannel**
|
||||||
|
- **Triller**: [<abbr title="netrc machine"><em>triller</em></abbr>]
|
||||||
|
- **TrillerUser**: [<abbr title="netrc machine"><em>triller</em></abbr>]
|
||||||
- **Trilulilu**
|
- **Trilulilu**
|
||||||
- **Trovo**
|
- **Trovo**
|
||||||
- **TrovoChannelClip**: All Clips of a trovo.live channel; "trovoclip:" prefix
|
- **TrovoChannelClip**: All Clips of a trovo.live channel; "trovoclip:" prefix
|
||||||
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
||||||
- **TrovoVod**
|
- **TrovoVod**
|
||||||
|
- **TrtCocukVideo**
|
||||||
- **TrueID**
|
- **TrueID**
|
||||||
- **TruNews**
|
- **TruNews**
|
||||||
|
- **Truth**
|
||||||
- **TruTV**
|
- **TruTV**
|
||||||
- **Tube8**
|
- **Tube8**
|
||||||
- **TubeTuGraz**: [<abbr title="netrc machine"><em>tubetugraz</em></abbr>] tube.tugraz.at
|
- **TubeTuGraz**: [<abbr title="netrc machine"><em>tubetugraz</em></abbr>] tube.tugraz.at
|
||||||
@@ -1360,8 +1457,9 @@
|
|||||||
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
|
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
|
||||||
- **tvp**: Telewizja Polska
|
- **tvp**: Telewizja Polska
|
||||||
- **tvp:embed**: Telewizja Polska
|
- **tvp:embed**: Telewizja Polska
|
||||||
- **tvp:series**
|
|
||||||
- **tvp:stream**
|
- **tvp:stream**
|
||||||
|
- **tvp:vod**
|
||||||
|
- **tvp:vod:series**
|
||||||
- **TVPlayer**
|
- **TVPlayer**
|
||||||
- **TVPlayHome**
|
- **TVPlayHome**
|
||||||
- **Tweakers**
|
- **Tweakers**
|
||||||
@@ -1380,6 +1478,7 @@
|
|||||||
- **twitter:broadcast**
|
- **twitter:broadcast**
|
||||||
- **twitter:card**
|
- **twitter:card**
|
||||||
- **twitter:shortener**
|
- **twitter:shortener**
|
||||||
|
- **twitter:spaces**
|
||||||
- **udemy**: [<abbr title="netrc machine"><em>udemy</em></abbr>]
|
- **udemy**: [<abbr title="netrc machine"><em>udemy</em></abbr>]
|
||||||
- **udemy:course**: [<abbr title="netrc machine"><em>udemy</em></abbr>]
|
- **udemy:course**: [<abbr title="netrc machine"><em>udemy</em></abbr>]
|
||||||
- **UDNEmbed**: 聯合影音
|
- **UDNEmbed**: 聯合影音
|
||||||
@@ -1390,6 +1489,7 @@
|
|||||||
- **umg:de**: Universal Music Deutschland
|
- **umg:de**: Universal Music Deutschland
|
||||||
- **Unistra**
|
- **Unistra**
|
||||||
- **Unity**
|
- **Unity**
|
||||||
|
- **UnscriptedNewsVideo**
|
||||||
- **uol.com.br**
|
- **uol.com.br**
|
||||||
- **uplynk**
|
- **uplynk**
|
||||||
- **uplynk:preplay**
|
- **uplynk:preplay**
|
||||||
@@ -1407,6 +1507,7 @@
|
|||||||
- **VeeHD**
|
- **VeeHD**
|
||||||
- **Veo**
|
- **Veo**
|
||||||
- **Veoh**
|
- **Veoh**
|
||||||
|
- **veoh:user**
|
||||||
- **Vesti**: Вести.Ru
|
- **Vesti**: Вести.Ru
|
||||||
- **Vevo**
|
- **Vevo**
|
||||||
- **VevoPlaylist**
|
- **VevoPlaylist**
|
||||||
@@ -1426,6 +1527,11 @@
|
|||||||
- **video.sky.it:live**
|
- **video.sky.it:live**
|
||||||
- **VideoDetective**
|
- **VideoDetective**
|
||||||
- **videofy.me**
|
- **videofy.me**
|
||||||
|
- **VideoKen**
|
||||||
|
- **VideoKenCategory**
|
||||||
|
- **VideoKenPlayer**
|
||||||
|
- **VideoKenPlaylist**
|
||||||
|
- **VideoKenTopic**
|
||||||
- **videomore**
|
- **videomore**
|
||||||
- **videomore:season**
|
- **videomore:season**
|
||||||
- **videomore:video**
|
- **videomore:video**
|
||||||
@@ -1434,8 +1540,6 @@
|
|||||||
- **VidioLive**: [<abbr title="netrc machine"><em>vidio</em></abbr>]
|
- **VidioLive**: [<abbr title="netrc machine"><em>vidio</em></abbr>]
|
||||||
- **VidioPremier**: [<abbr title="netrc machine"><em>vidio</em></abbr>]
|
- **VidioPremier**: [<abbr title="netrc machine"><em>vidio</em></abbr>]
|
||||||
- **VidLii**
|
- **VidLii**
|
||||||
- **vier**: [<abbr title="netrc machine"><em>vier</em></abbr>] vier.be and vijf.be
|
|
||||||
- **vier:videos**
|
|
||||||
- **viewlift**
|
- **viewlift**
|
||||||
- **viewlift:embed**
|
- **viewlift:embed**
|
||||||
- **Viidea**
|
- **Viidea**
|
||||||
@@ -1447,6 +1551,7 @@
|
|||||||
- **vimeo:group**: [<abbr title="netrc machine"><em>vimeo</em></abbr>]
|
- **vimeo:group**: [<abbr title="netrc machine"><em>vimeo</em></abbr>]
|
||||||
- **vimeo:likes**: [<abbr title="netrc machine"><em>vimeo</em></abbr>] Vimeo user likes
|
- **vimeo:likes**: [<abbr title="netrc machine"><em>vimeo</em></abbr>] Vimeo user likes
|
||||||
- **vimeo:ondemand**: [<abbr title="netrc machine"><em>vimeo</em></abbr>]
|
- **vimeo:ondemand**: [<abbr title="netrc machine"><em>vimeo</em></abbr>]
|
||||||
|
- **vimeo:pro**: [<abbr title="netrc machine"><em>vimeo</em></abbr>]
|
||||||
- **vimeo:review**: [<abbr title="netrc machine"><em>vimeo</em></abbr>] Review pages on vimeo
|
- **vimeo:review**: [<abbr title="netrc machine"><em>vimeo</em></abbr>] Review pages on vimeo
|
||||||
- **vimeo:user**: [<abbr title="netrc machine"><em>vimeo</em></abbr>]
|
- **vimeo:user**: [<abbr title="netrc machine"><em>vimeo</em></abbr>]
|
||||||
- **vimeo:watchlater**: [<abbr title="netrc machine"><em>vimeo</em></abbr>] Vimeo watch later list, ":vimeowatchlater" keyword (requires authentication)
|
- **vimeo:watchlater**: [<abbr title="netrc machine"><em>vimeo</em></abbr>] Vimeo watch later list, ":vimeowatchlater" keyword (requires authentication)
|
||||||
@@ -1480,6 +1585,8 @@
|
|||||||
- **VoxMedia**
|
- **VoxMedia**
|
||||||
- **VoxMediaVolume**
|
- **VoxMediaVolume**
|
||||||
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
|
- **vqq:series**
|
||||||
|
- **vqq:video**
|
||||||
- **Vrak**
|
- **Vrak**
|
||||||
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
||||||
- **VrtNU**: [<abbr title="netrc machine"><em>vrtnu</em></abbr>] VrtNU.be
|
- **VrtNU**: [<abbr title="netrc machine"><em>vrtnu</em></abbr>] VrtNU.be
|
||||||
@@ -1488,6 +1595,8 @@
|
|||||||
- **VShare**
|
- **VShare**
|
||||||
- **VTM**
|
- **VTM**
|
||||||
- **VTXTV**: [<abbr title="netrc machine"><em>vtxtv</em></abbr>]
|
- **VTXTV**: [<abbr title="netrc machine"><em>vtxtv</em></abbr>]
|
||||||
|
- **VTXTVLive**: [<abbr title="netrc machine"><em>vtxtv</em></abbr>]
|
||||||
|
- **VTXTVRecordings**: [<abbr title="netrc machine"><em>vtxtv</em></abbr>]
|
||||||
- **VuClip**
|
- **VuClip**
|
||||||
- **Vupload**
|
- **Vupload**
|
||||||
- **VVVVID**
|
- **VVVVID**
|
||||||
@@ -1497,6 +1606,8 @@
|
|||||||
- **Wakanim**
|
- **Wakanim**
|
||||||
- **Walla**
|
- **Walla**
|
||||||
- **WalyTV**: [<abbr title="netrc machine"><em>walytv</em></abbr>]
|
- **WalyTV**: [<abbr title="netrc machine"><em>walytv</em></abbr>]
|
||||||
|
- **WalyTVLive**: [<abbr title="netrc machine"><em>walytv</em></abbr>]
|
||||||
|
- **WalyTVRecordings**: [<abbr title="netrc machine"><em>walytv</em></abbr>]
|
||||||
- **wasdtv:clip**
|
- **wasdtv:clip**
|
||||||
- **wasdtv:record**
|
- **wasdtv:record**
|
||||||
- **wasdtv:stream**
|
- **wasdtv:stream**
|
||||||
@@ -1511,6 +1622,7 @@
|
|||||||
- **WDRElefant**
|
- **WDRElefant**
|
||||||
- **WDRPage**
|
- **WDRPage**
|
||||||
- **web.archive:youtube**: web.archive.org saved youtube videos, "ytarchive:" prefix
|
- **web.archive:youtube**: web.archive.org saved youtube videos, "ytarchive:" prefix
|
||||||
|
- **Webcamerapl**
|
||||||
- **Webcaster**
|
- **Webcaster**
|
||||||
- **WebcasterFeed**
|
- **WebcasterFeed**
|
||||||
- **WebOfStories**
|
- **WebOfStories**
|
||||||
@@ -1524,25 +1636,27 @@
|
|||||||
- **wikimedia.org**
|
- **wikimedia.org**
|
||||||
- **Willow**
|
- **Willow**
|
||||||
- **WimTV**
|
- **WimTV**
|
||||||
|
- **WinSportsVideo**
|
||||||
- **Wistia**
|
- **Wistia**
|
||||||
|
- **WistiaChannel**
|
||||||
- **WistiaPlaylist**
|
- **WistiaPlaylist**
|
||||||
- **wnl**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **wnl**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
|
- **wordpress:mb.miniAudioPlayer**
|
||||||
|
- **wordpress:playlist**
|
||||||
- **WorldStarHipHop**
|
- **WorldStarHipHop**
|
||||||
- **wppilot**
|
- **wppilot**
|
||||||
- **wppilot:channels**
|
- **wppilot:channels**
|
||||||
- **WSJ**: Wall Street Journal
|
- **WSJ**: Wall Street Journal
|
||||||
- **WSJArticle**
|
- **WSJArticle**
|
||||||
- **WWE**
|
- **WWE**
|
||||||
|
- **wyborcza:video**
|
||||||
|
- **WyborczaPodcast**
|
||||||
- **XBef**
|
- **XBef**
|
||||||
- **XboxClips**
|
- **XboxClips**
|
||||||
- **XFileShare**: XFileShare based sites: Aparat, ClipWatching, GoUnlimited, GoVid, HolaVid, Streamty, TheVideoBee, Uqload, VidBom, vidlo, VidLocker, VidShare, VUp, WolfStream, XVideoSharing
|
- **XFileShare**: XFileShare based sites: Aparat, ClipWatching, GoUnlimited, GoVid, HolaVid, Streamty, TheVideoBee, Uqload, VidBom, vidlo, VidLocker, VidShare, VUp, WolfStream, XVideoSharing
|
||||||
- **XHamster**
|
- **XHamster**
|
||||||
- **XHamsterEmbed**
|
- **XHamsterEmbed**
|
||||||
- **XHamsterUser**
|
- **XHamsterUser**
|
||||||
- **xiami:album**: 虾米音乐 - 专辑
|
|
||||||
- **xiami:artist**: 虾米音乐 - 歌手
|
|
||||||
- **xiami:collection**: 虾米音乐 - 精选集
|
|
||||||
- **xiami:song**: 虾米音乐
|
|
||||||
- **ximalaya**: 喜马拉雅FM
|
- **ximalaya**: 喜马拉雅FM
|
||||||
- **ximalaya:album**: 喜马拉雅FM 专辑
|
- **ximalaya:album**: 喜马拉雅FM 专辑
|
||||||
- **xinpianchang**: xinpianchang.com
|
- **xinpianchang**: xinpianchang.com
|
||||||
@@ -1556,12 +1670,12 @@
|
|||||||
- **XXXYMovies**
|
- **XXXYMovies**
|
||||||
- **Yahoo**: Yahoo screen and movies
|
- **Yahoo**: Yahoo screen and movies
|
||||||
- **yahoo:gyao**
|
- **yahoo:gyao**
|
||||||
- **yahoo:gyao:player**
|
- **yahoo:gyao:player**
|
||||||
- **yahoo:japannews**: Yahoo! Japan News
|
- **yahoo:japannews**: Yahoo! Japan News
|
||||||
- **YandexDisk**
|
- **YandexDisk**
|
||||||
- **yandexmusic:album**: Яндекс.Музыка - Альбом
|
- **yandexmusic:album**: Яндекс.Музыка - Альбом
|
||||||
- **yandexmusic:artist:albums**: Яндекс.Музыка - Артист - Альбомы
|
- **yandexmusic:artist:albums**: Яндекс.Музыка - Артист - Альбомы
|
||||||
- **yandexmusic:artist:tracks**: Яндекс.Музыка - Артист - Треки
|
- **yandexmusic:artist:tracks**: Яндекс.Музыка - Артист - Треки
|
||||||
- **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
|
- **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
|
||||||
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
||||||
- **YandexVideo**
|
- **YandexVideo**
|
||||||
@@ -1569,6 +1683,7 @@
|
|||||||
- **YapFiles**
|
- **YapFiles**
|
||||||
- **YesJapan**
|
- **YesJapan**
|
||||||
- **yinyuetai:video**: 音悦Tai
|
- **yinyuetai:video**: 音悦Tai
|
||||||
|
- **YleAreena**
|
||||||
- **Ynet**
|
- **Ynet**
|
||||||
- **YouJizz**
|
- **YouJizz**
|
||||||
- **youku**: 优酷
|
- **youku**: 优酷
|
||||||
@@ -1579,17 +1694,18 @@
|
|||||||
- **YouPorn**
|
- **YouPorn**
|
||||||
- **YourPorn**
|
- **YourPorn**
|
||||||
- **YourUpload**
|
- **YourUpload**
|
||||||
- **youtube**: YouTube
|
- **youtube+sample+NSIG+AGB**: YouTube
|
||||||
- **youtube:clip**
|
- **youtube:clip**
|
||||||
- **youtube:favorites**: YouTube liked videos; ":ytfav" keyword (requires cookies)
|
- **youtube:favorites**: YouTube liked videos; ":ytfav" keyword (requires cookies)
|
||||||
- **youtube:history**: Youtube watch history; ":ythis" keyword (requires cookies)
|
- **youtube:history**: Youtube watch history; ":ythis" keyword (requires cookies)
|
||||||
- **youtube:music:search_url**: YouTube music search URLs with selectable sections (Eg: #songs)
|
- **youtube:music:search_url**: YouTube music search URLs with selectable sections, e.g. #songs
|
||||||
- **youtube:notif**: YouTube notifications; ":ytnotif" keyword (requires cookies)
|
- **youtube:notif**: YouTube notifications; ":ytnotif" keyword (requires cookies)
|
||||||
- **youtube:playlist**: YouTube playlists
|
- **youtube:playlist**: YouTube playlists
|
||||||
- **youtube:recommended**: YouTube recommended videos; ":ytrec" keyword
|
- **youtube:recommended**: YouTube recommended videos; ":ytrec" keyword
|
||||||
- **youtube:search**: YouTube search; "ytsearch:" prefix
|
- **youtube:search**: YouTube search; "ytsearch:" prefix
|
||||||
- **youtube:search:date**: YouTube search, newest videos first; "ytsearchdate:" prefix
|
- **youtube:search:date**: YouTube search, newest videos first; "ytsearchdate:" prefix
|
||||||
- **youtube:search_url**: YouTube search URLs with sorting and filter support
|
- **youtube:search_url**: YouTube search URLs with sorting and filter support
|
||||||
|
- **youtube:shorts:pivot:audio**: YouTube Shorts audio pivot (Shorts using audio of a given video)
|
||||||
- **youtube:stories**: YouTube channel stories; "ytstories:" prefix
|
- **youtube:stories**: YouTube channel stories; "ytstories:" prefix
|
||||||
- **youtube:subscriptions**: YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)
|
- **youtube:subscriptions**: YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)
|
||||||
- **youtube:tab**: YouTube Tabs
|
- **youtube:tab**: YouTube Tabs
|
||||||
@@ -1606,6 +1722,7 @@
|
|||||||
- **ZDFChannel**
|
- **ZDFChannel**
|
||||||
- **Zee5**: [<abbr title="netrc machine"><em>zee5</em></abbr>]
|
- **Zee5**: [<abbr title="netrc machine"><em>zee5</em></abbr>]
|
||||||
- **zee5:series**
|
- **zee5:series**
|
||||||
|
- **ZeeNews**
|
||||||
- **ZenYandex**
|
- **ZenYandex**
|
||||||
- **ZenYandexChannel**
|
- **ZenYandexChannel**
|
||||||
- **Zhihu**
|
- **Zhihu**
|
||||||
|
|||||||
@@ -92,6 +92,13 @@ def gettestcases(include_onlymatching=False):
|
|||||||
yield from ie.get_testcases(include_onlymatching)
|
yield from ie.get_testcases(include_onlymatching)
|
||||||
|
|
||||||
|
|
||||||
|
def getwebpagetestcases():
|
||||||
|
for ie in yt_dlp.extractor.gen_extractors():
|
||||||
|
for tc in ie.get_webpage_testcases():
|
||||||
|
tc.setdefault('add_ie', []).append('Generic')
|
||||||
|
yield tc
|
||||||
|
|
||||||
|
|
||||||
md5 = lambda s: hashlib.md5(s.encode()).hexdigest()
|
md5 = lambda s: hashlib.md5(s.encode()).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
@@ -215,6 +222,10 @@ def sanitize_got_info_dict(got_dict):
|
|||||||
if test_info_dict.get('display_id') == test_info_dict.get('id'):
|
if test_info_dict.get('display_id') == test_info_dict.get('id'):
|
||||||
test_info_dict.pop('display_id')
|
test_info_dict.pop('display_id')
|
||||||
|
|
||||||
|
# Check url for flat entries
|
||||||
|
if got_dict.get('_type', 'video') != 'video' and got_dict.get('url'):
|
||||||
|
test_info_dict['url'] = got_dict['url']
|
||||||
|
|
||||||
return test_info_dict
|
return test_info_dict
|
||||||
|
|
||||||
|
|
||||||
@@ -228,8 +239,9 @@ def expect_info_dict(self, got_dict, expected_dict):
|
|||||||
for key in mandatory_fields:
|
for key in mandatory_fields:
|
||||||
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
||||||
# Check for mandatory fields that are automatically set by YoutubeDL
|
# Check for mandatory fields that are automatically set by YoutubeDL
|
||||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
if got_dict.get('_type', 'video') == 'video':
|
||||||
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
|
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||||
|
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
|
||||||
|
|
||||||
test_info_dict = sanitize_got_info_dict(got_dict)
|
test_info_dict = sanitize_got_info_dict(got_dict)
|
||||||
|
|
||||||
@@ -242,19 +254,16 @@ def expect_info_dict(self, got_dict, expected_dict):
|
|||||||
return v.__name__
|
return v.__name__
|
||||||
else:
|
else:
|
||||||
return repr(v)
|
return repr(v)
|
||||||
info_dict_str = ''
|
info_dict_str = ''.join(
|
||||||
if len(missing_keys) != len(expected_dict):
|
f' {_repr(k)}: {_repr(v)},\n'
|
||||||
info_dict_str += ''.join(
|
for k, v in test_info_dict.items() if k not in missing_keys)
|
||||||
f' {_repr(k)}: {_repr(v)},\n'
|
if info_dict_str:
|
||||||
for k, v in test_info_dict.items() if k not in missing_keys)
|
info_dict_str += '\n'
|
||||||
|
|
||||||
if info_dict_str:
|
|
||||||
info_dict_str += '\n'
|
|
||||||
info_dict_str += ''.join(
|
info_dict_str += ''.join(
|
||||||
f' {_repr(k)}: {_repr(test_info_dict[k])},\n'
|
f' {_repr(k)}: {_repr(test_info_dict[k])},\n'
|
||||||
for k in missing_keys)
|
for k in missing_keys)
|
||||||
write_string(
|
info_dict_str = '\n\'info_dict\': {\n' + info_dict_str + '},\n'
|
||||||
'\n\'info_dict\': {\n' + info_dict_str + '},\n', out=sys.stderr)
|
write_string(info_dict_str.replace('\n', '\n '), out=sys.stderr)
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
missing_keys,
|
missing_keys,
|
||||||
'Missing keys in test definition: %s' % (
|
'Missing keys in test definition: %s' % (
|
||||||
|
|||||||
@@ -44,5 +44,6 @@
|
|||||||
"writesubtitles": false,
|
"writesubtitles": false,
|
||||||
"allsubtitles": false,
|
"allsubtitles": false,
|
||||||
"listsubtitles": false,
|
"listsubtitles": false,
|
||||||
"fixup": "never"
|
"fixup": "never",
|
||||||
|
"allow_playlist_files": false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,7 +41,9 @@ class InfoExtractorTestRequestHandler(http.server.BaseHTTPRequestHandler):
|
|||||||
|
|
||||||
|
|
||||||
class DummyIE(InfoExtractor):
|
class DummyIE(InfoExtractor):
|
||||||
pass
|
def _sort_formats(self, formats, field_preference=[]):
|
||||||
|
self._downloader.sort_formats(
|
||||||
|
{'formats': formats, '_format_sort_fields': field_preference})
|
||||||
|
|
||||||
|
|
||||||
class TestInfoExtractor(unittest.TestCase):
|
class TestInfoExtractor(unittest.TestCase):
|
||||||
@@ -1567,6 +1569,292 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
'ec-3_test',
|
||||||
|
'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
[{
|
||||||
|
'format_id': 'audio_deu_1-224',
|
||||||
|
'url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'manifest_url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'ext': 'isma',
|
||||||
|
'tbr': 224,
|
||||||
|
'asr': 48000,
|
||||||
|
'vcodec': 'none',
|
||||||
|
'acodec': 'EC-3',
|
||||||
|
'protocol': 'ism',
|
||||||
|
'_download_params':
|
||||||
|
{
|
||||||
|
'stream_type': 'audio',
|
||||||
|
'duration': 370000000,
|
||||||
|
'timescale': 10000000,
|
||||||
|
'width': 0,
|
||||||
|
'height': 0,
|
||||||
|
'fourcc': 'EC-3',
|
||||||
|
'language': 'deu',
|
||||||
|
'codec_private_data': '00063F000000AF87FBA7022DFB42A4D405CD93843BDD0700200F00',
|
||||||
|
'sampling_rate': 48000,
|
||||||
|
'channels': 6,
|
||||||
|
'bits_per_sample': 16,
|
||||||
|
'nal_unit_length_field': 4
|
||||||
|
},
|
||||||
|
'audio_ext': 'isma',
|
||||||
|
'video_ext': 'none',
|
||||||
|
'abr': 224,
|
||||||
|
}, {
|
||||||
|
'format_id': 'audio_deu-127',
|
||||||
|
'url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'manifest_url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'ext': 'isma',
|
||||||
|
'tbr': 127,
|
||||||
|
'asr': 48000,
|
||||||
|
'vcodec': 'none',
|
||||||
|
'acodec': 'AACL',
|
||||||
|
'protocol': 'ism',
|
||||||
|
'_download_params':
|
||||||
|
{
|
||||||
|
'stream_type': 'audio',
|
||||||
|
'duration': 370000000,
|
||||||
|
'timescale': 10000000,
|
||||||
|
'width': 0,
|
||||||
|
'height': 0,
|
||||||
|
'fourcc': 'AACL',
|
||||||
|
'language': 'deu',
|
||||||
|
'codec_private_data': '1190',
|
||||||
|
'sampling_rate': 48000,
|
||||||
|
'channels': 2,
|
||||||
|
'bits_per_sample': 16,
|
||||||
|
'nal_unit_length_field': 4
|
||||||
|
},
|
||||||
|
'audio_ext': 'isma',
|
||||||
|
'video_ext': 'none',
|
||||||
|
'abr': 127,
|
||||||
|
}, {
|
||||||
|
'format_id': 'video_deu-23',
|
||||||
|
'url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'manifest_url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'ext': 'ismv',
|
||||||
|
'width': 384,
|
||||||
|
'height': 216,
|
||||||
|
'tbr': 23,
|
||||||
|
'vcodec': 'AVC1',
|
||||||
|
'acodec': 'none',
|
||||||
|
'protocol': 'ism',
|
||||||
|
'_download_params':
|
||||||
|
{
|
||||||
|
'stream_type': 'video',
|
||||||
|
'duration': 370000000,
|
||||||
|
'timescale': 10000000,
|
||||||
|
'width': 384,
|
||||||
|
'height': 216,
|
||||||
|
'fourcc': 'AVC1',
|
||||||
|
'language': 'deu',
|
||||||
|
'codec_private_data': '000000016742C00CDB06077E5C05A808080A00000300020000030009C0C02EE0177CC6300F142AE00000000168CA8DC8',
|
||||||
|
'channels': 2,
|
||||||
|
'bits_per_sample': 16,
|
||||||
|
'nal_unit_length_field': 4
|
||||||
|
},
|
||||||
|
'video_ext': 'ismv',
|
||||||
|
'audio_ext': 'none',
|
||||||
|
'vbr': 23,
|
||||||
|
}, {
|
||||||
|
'format_id': 'video_deu-403',
|
||||||
|
'url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'manifest_url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'ext': 'ismv',
|
||||||
|
'width': 400,
|
||||||
|
'height': 224,
|
||||||
|
'tbr': 403,
|
||||||
|
'vcodec': 'AVC1',
|
||||||
|
'acodec': 'none',
|
||||||
|
'protocol': 'ism',
|
||||||
|
'_download_params':
|
||||||
|
{
|
||||||
|
'stream_type': 'video',
|
||||||
|
'duration': 370000000,
|
||||||
|
'timescale': 10000000,
|
||||||
|
'width': 400,
|
||||||
|
'height': 224,
|
||||||
|
'fourcc': 'AVC1',
|
||||||
|
'language': 'deu',
|
||||||
|
'codec_private_data': '00000001674D4014E98323B602D4040405000003000100000300320F1429380000000168EAECF2',
|
||||||
|
'channels': 2,
|
||||||
|
'bits_per_sample': 16,
|
||||||
|
'nal_unit_length_field': 4
|
||||||
|
},
|
||||||
|
'video_ext': 'ismv',
|
||||||
|
'audio_ext': 'none',
|
||||||
|
'vbr': 403,
|
||||||
|
}, {
|
||||||
|
'format_id': 'video_deu-680',
|
||||||
|
'url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'manifest_url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'ext': 'ismv',
|
||||||
|
'width': 640,
|
||||||
|
'height': 360,
|
||||||
|
'tbr': 680,
|
||||||
|
'vcodec': 'AVC1',
|
||||||
|
'acodec': 'none',
|
||||||
|
'protocol': 'ism',
|
||||||
|
'_download_params':
|
||||||
|
{
|
||||||
|
'stream_type': 'video',
|
||||||
|
'duration': 370000000,
|
||||||
|
'timescale': 10000000,
|
||||||
|
'width': 640,
|
||||||
|
'height': 360,
|
||||||
|
'fourcc': 'AVC1',
|
||||||
|
'language': 'deu',
|
||||||
|
'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2',
|
||||||
|
'channels': 2,
|
||||||
|
'bits_per_sample': 16,
|
||||||
|
'nal_unit_length_field': 4
|
||||||
|
},
|
||||||
|
'video_ext': 'ismv',
|
||||||
|
'audio_ext': 'none',
|
||||||
|
'vbr': 680,
|
||||||
|
}, {
|
||||||
|
'format_id': 'video_deu-1253',
|
||||||
|
'url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'manifest_url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'ext': 'ismv',
|
||||||
|
'width': 640,
|
||||||
|
'height': 360,
|
||||||
|
'tbr': 1253,
|
||||||
|
'vcodec': 'AVC1',
|
||||||
|
'acodec': 'none',
|
||||||
|
'protocol': 'ism',
|
||||||
|
'_download_params':
|
||||||
|
{
|
||||||
|
'stream_type': 'video',
|
||||||
|
'duration': 370000000,
|
||||||
|
'timescale': 10000000,
|
||||||
|
'width': 640,
|
||||||
|
'height': 360,
|
||||||
|
'fourcc': 'AVC1',
|
||||||
|
'language': 'deu',
|
||||||
|
'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2',
|
||||||
|
'channels': 2,
|
||||||
|
'bits_per_sample': 16,
|
||||||
|
'nal_unit_length_field': 4
|
||||||
|
},
|
||||||
|
'video_ext': 'ismv',
|
||||||
|
'audio_ext': 'none',
|
||||||
|
'vbr': 1253,
|
||||||
|
}, {
|
||||||
|
'format_id': 'video_deu-2121',
|
||||||
|
'url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'manifest_url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'ext': 'ismv',
|
||||||
|
'width': 768,
|
||||||
|
'height': 432,
|
||||||
|
'tbr': 2121,
|
||||||
|
'vcodec': 'AVC1',
|
||||||
|
'acodec': 'none',
|
||||||
|
'protocol': 'ism',
|
||||||
|
'_download_params':
|
||||||
|
{
|
||||||
|
'stream_type': 'video',
|
||||||
|
'duration': 370000000,
|
||||||
|
'timescale': 10000000,
|
||||||
|
'width': 768,
|
||||||
|
'height': 432,
|
||||||
|
'fourcc': 'AVC1',
|
||||||
|
'language': 'deu',
|
||||||
|
'codec_private_data': '00000001674D401EECA0601BD80B50101014000003000400000300C83C58B6580000000168E93B3C80',
|
||||||
|
'channels': 2,
|
||||||
|
'bits_per_sample': 16,
|
||||||
|
'nal_unit_length_field': 4
|
||||||
|
},
|
||||||
|
'video_ext': 'ismv',
|
||||||
|
'audio_ext': 'none',
|
||||||
|
'vbr': 2121,
|
||||||
|
}, {
|
||||||
|
'format_id': 'video_deu-3275',
|
||||||
|
'url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'manifest_url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'ext': 'ismv',
|
||||||
|
'width': 1280,
|
||||||
|
'height': 720,
|
||||||
|
'tbr': 3275,
|
||||||
|
'vcodec': 'AVC1',
|
||||||
|
'acodec': 'none',
|
||||||
|
'protocol': 'ism',
|
||||||
|
'_download_params':
|
||||||
|
{
|
||||||
|
'stream_type': 'video',
|
||||||
|
'duration': 370000000,
|
||||||
|
'timescale': 10000000,
|
||||||
|
'width': 1280,
|
||||||
|
'height': 720,
|
||||||
|
'fourcc': 'AVC1',
|
||||||
|
'language': 'deu',
|
||||||
|
'codec_private_data': '00000001674D4020ECA02802DD80B501010140000003004000000C83C60C65800000000168E93B3C80',
|
||||||
|
'channels': 2,
|
||||||
|
'bits_per_sample': 16,
|
||||||
|
'nal_unit_length_field': 4
|
||||||
|
},
|
||||||
|
'video_ext': 'ismv',
|
||||||
|
'audio_ext': 'none',
|
||||||
|
'vbr': 3275,
|
||||||
|
}, {
|
||||||
|
'format_id': 'video_deu-5300',
|
||||||
|
'url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'manifest_url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'ext': 'ismv',
|
||||||
|
'width': 1920,
|
||||||
|
'height': 1080,
|
||||||
|
'tbr': 5300,
|
||||||
|
'vcodec': 'AVC1',
|
||||||
|
'acodec': 'none',
|
||||||
|
'protocol': 'ism',
|
||||||
|
'_download_params':
|
||||||
|
{
|
||||||
|
'stream_type': 'video',
|
||||||
|
'duration': 370000000,
|
||||||
|
'timescale': 10000000,
|
||||||
|
'width': 1920,
|
||||||
|
'height': 1080,
|
||||||
|
'fourcc': 'AVC1',
|
||||||
|
'language': 'deu',
|
||||||
|
'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80',
|
||||||
|
'channels': 2,
|
||||||
|
'bits_per_sample': 16,
|
||||||
|
'nal_unit_length_field': 4
|
||||||
|
},
|
||||||
|
'video_ext': 'ismv',
|
||||||
|
'audio_ext': 'none',
|
||||||
|
'vbr': 5300,
|
||||||
|
}, {
|
||||||
|
'format_id': 'video_deu-8079',
|
||||||
|
'url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'manifest_url': 'https://smstr01.dmm.t-online.de/smooth24/smoothstream_m1/streaming/sony/9221438342941275747/636887760842957027/25_km_h-Trailer-9221571562372022953_deu_20_1300k_HD_H_264_ISMV.ism/Manifest',
|
||||||
|
'ext': 'ismv',
|
||||||
|
'width': 1920,
|
||||||
|
'height': 1080,
|
||||||
|
'tbr': 8079,
|
||||||
|
'vcodec': 'AVC1',
|
||||||
|
'acodec': 'none',
|
||||||
|
'protocol': 'ism',
|
||||||
|
'_download_params':
|
||||||
|
{
|
||||||
|
'stream_type': 'video',
|
||||||
|
'duration': 370000000,
|
||||||
|
'timescale': 10000000,
|
||||||
|
'width': 1920,
|
||||||
|
'height': 1080,
|
||||||
|
'fourcc': 'AVC1',
|
||||||
|
'language': 'deu',
|
||||||
|
'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80',
|
||||||
|
'channels': 2,
|
||||||
|
'bits_per_sample': 16,
|
||||||
|
'nal_unit_length_field': 4
|
||||||
|
},
|
||||||
|
'video_ext': 'ismv',
|
||||||
|
'audio_ext': 'none',
|
||||||
|
'vbr': 8079,
|
||||||
|
}],
|
||||||
|
{},
|
||||||
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
for ism_file, ism_url, expected_formats, expected_subtitles in _TEST_CASES:
|
for ism_file, ism_url, expected_formats, expected_subtitles in _TEST_CASES:
|
||||||
|
|||||||
@@ -68,8 +68,7 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
{'ext': 'mp4', 'height': 460, 'url': TEST_URL},
|
{'ext': 'mp4', 'height': 460, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict = _make_result(formats)
|
info_dict = _make_result(formats)
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['ext'], 'webm')
|
self.assertEqual(downloaded['ext'], 'webm')
|
||||||
@@ -82,8 +81,7 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
{'ext': 'mp4', 'height': 1080, 'url': TEST_URL},
|
{'ext': 'mp4', 'height': 1080, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict['formats'] = formats
|
info_dict['formats'] = formats
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['ext'], 'mp4')
|
self.assertEqual(downloaded['ext'], 'mp4')
|
||||||
@@ -97,8 +95,7 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
|
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict['formats'] = formats
|
info_dict['formats'] = formats
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['ext'], 'mp4')
|
self.assertEqual(downloaded['ext'], 'mp4')
|
||||||
@@ -110,15 +107,14 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
|
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict['formats'] = formats
|
info_dict['formats'] = formats
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['ext'], 'webm')
|
self.assertEqual(downloaded['ext'], 'webm')
|
||||||
|
|
||||||
def test_format_selection(self):
|
def test_format_selection(self):
|
||||||
formats = [
|
formats = [
|
||||||
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
|
{'format_id': '35', 'ext': 'mp4', 'preference': 0, 'url': TEST_URL},
|
||||||
{'format_id': 'example-with-dashes', 'ext': 'webm', 'preference': 1, 'url': TEST_URL},
|
{'format_id': 'example-with-dashes', 'ext': 'webm', 'preference': 1, 'url': TEST_URL},
|
||||||
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL},
|
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL},
|
||||||
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL},
|
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL},
|
||||||
@@ -186,22 +182,19 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
|
|
||||||
info_dict = _make_result(formats)
|
info_dict = _make_result(formats)
|
||||||
ydl = YDL({'format': 'best'})
|
ydl = YDL({'format': 'best'})
|
||||||
ie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
ie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(copy.deepcopy(info_dict))
|
ydl.process_ie_result(copy.deepcopy(info_dict))
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], 'aac-64')
|
self.assertEqual(downloaded['format_id'], 'aac-64')
|
||||||
|
|
||||||
ydl = YDL({'format': 'mp3'})
|
ydl = YDL({'format': 'mp3'})
|
||||||
ie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
ie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(copy.deepcopy(info_dict))
|
ydl.process_ie_result(copy.deepcopy(info_dict))
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], 'mp3-64')
|
self.assertEqual(downloaded['format_id'], 'mp3-64')
|
||||||
|
|
||||||
ydl = YDL({'prefer_free_formats': True})
|
ydl = YDL({'prefer_free_formats': True})
|
||||||
ie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
ie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(copy.deepcopy(info_dict))
|
ydl.process_ie_result(copy.deepcopy(info_dict))
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], 'ogg-64')
|
self.assertEqual(downloaded['format_id'], 'ogg-64')
|
||||||
@@ -346,8 +339,7 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
|
|
||||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
ydl = YDL({'format': 'bestvideo+bestaudio'})
|
ydl = YDL({'format': 'bestvideo+bestaudio'})
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], '248+172')
|
self.assertEqual(downloaded['format_id'], '248+172')
|
||||||
@@ -355,40 +347,35 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
|
|
||||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
ydl = YDL({'format': 'bestvideo[height>=999999]+bestaudio/best'})
|
ydl = YDL({'format': 'bestvideo[height>=999999]+bestaudio/best'})
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], '38')
|
self.assertEqual(downloaded['format_id'], '38')
|
||||||
|
|
||||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
ydl = YDL({'format': 'bestvideo/best,bestaudio'})
|
ydl = YDL({'format': 'bestvideo/best,bestaudio'})
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||||
self.assertEqual(downloaded_ids, ['137', '141'])
|
self.assertEqual(downloaded_ids, ['137', '141'])
|
||||||
|
|
||||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])+bestaudio'})
|
ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])+bestaudio'})
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||||
self.assertEqual(downloaded_ids, ['137+141', '248+141'])
|
self.assertEqual(downloaded_ids, ['137+141', '248+141'])
|
||||||
|
|
||||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])[height<=720]+bestaudio'})
|
ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])[height<=720]+bestaudio'})
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||||
self.assertEqual(downloaded_ids, ['136+141', '247+141'])
|
self.assertEqual(downloaded_ids, ['136+141', '247+141'])
|
||||||
|
|
||||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||||
ydl = YDL({'format': '(bestvideo[ext=none]/bestvideo[ext=webm])+bestaudio'})
|
ydl = YDL({'format': '(bestvideo[ext=none]/bestvideo[ext=webm])+bestaudio'})
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||||
self.assertEqual(downloaded_ids, ['248+141'])
|
self.assertEqual(downloaded_ids, ['248+141'])
|
||||||
@@ -396,16 +383,14 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
for f1, f2 in zip(formats_order, formats_order[1:]):
|
for f1, f2 in zip(formats_order, formats_order[1:]):
|
||||||
info_dict = _make_result([f1, f2], extractor='youtube')
|
info_dict = _make_result([f1, f2], extractor='youtube')
|
||||||
ydl = YDL({'format': 'best/bestvideo'})
|
ydl = YDL({'format': 'best/bestvideo'})
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], f1['format_id'])
|
self.assertEqual(downloaded['format_id'], f1['format_id'])
|
||||||
|
|
||||||
info_dict = _make_result([f2, f1], extractor='youtube')
|
info_dict = _make_result([f2, f1], extractor='youtube')
|
||||||
ydl = YDL({'format': 'best/bestvideo'})
|
ydl = YDL({'format': 'best/bestvideo'})
|
||||||
yie = YoutubeIE(ydl)
|
ydl.sort_formats(info_dict)
|
||||||
yie._sort_formats(info_dict['formats'])
|
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], f1['format_id'])
|
self.assertEqual(downloaded['format_id'], f1['format_id'])
|
||||||
@@ -480,7 +465,7 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
for f in formats:
|
for f in formats:
|
||||||
f['url'] = 'http://_/'
|
f['url'] = 'http://_/'
|
||||||
f['ext'] = 'unknown'
|
f['ext'] = 'unknown'
|
||||||
info_dict = _make_result(formats)
|
info_dict = _make_result(formats, _format_sort_fields=('id', ))
|
||||||
|
|
||||||
ydl = YDL({'format': 'best[filesize<3000]'})
|
ydl = YDL({'format': 'best[filesize<3000]'})
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
@@ -662,13 +647,17 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
'playlist_autonumber': 2,
|
'playlist_autonumber': 2,
|
||||||
'__last_playlist_index': 100,
|
'__last_playlist_index': 100,
|
||||||
'n_entries': 10,
|
'n_entries': 10,
|
||||||
'formats': [{'id': 'id 1'}, {'id': 'id 2'}, {'id': 'id 3'}]
|
'formats': [
|
||||||
|
{'id': 'id 1', 'height': 1080, 'width': 1920},
|
||||||
|
{'id': 'id 2', 'height': 720},
|
||||||
|
{'id': 'id 3'}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
def test_prepare_outtmpl_and_filename(self):
|
def test_prepare_outtmpl_and_filename(self):
|
||||||
def test(tmpl, expected, *, info=None, **params):
|
def test(tmpl, expected, *, info=None, **params):
|
||||||
params['outtmpl'] = tmpl
|
params['outtmpl'] = tmpl
|
||||||
ydl = YoutubeDL(params)
|
ydl = FakeYDL(params)
|
||||||
ydl._num_downloads = 1
|
ydl._num_downloads = 1
|
||||||
self.assertEqual(ydl.validate_outtmpl(tmpl), None)
|
self.assertEqual(ydl.validate_outtmpl(tmpl), None)
|
||||||
|
|
||||||
@@ -722,13 +711,14 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
test('%(id)s', '-abcd', info={'id': '-abcd'})
|
test('%(id)s', '-abcd', info={'id': '-abcd'})
|
||||||
test('%(id)s', '.abcd', info={'id': '.abcd'})
|
test('%(id)s', '.abcd', info={'id': '.abcd'})
|
||||||
test('%(id)s', 'ab__cd', info={'id': 'ab__cd'})
|
test('%(id)s', 'ab__cd', info={'id': 'ab__cd'})
|
||||||
test('%(id)s', ('ab:cd', 'ab -cd'), info={'id': 'ab:cd'})
|
test('%(id)s', ('ab:cd', 'ab:cd'), info={'id': 'ab:cd'})
|
||||||
test('%(id.0)s', '-', info={'id': '--'})
|
test('%(id.0)s', '-', info={'id': '--'})
|
||||||
|
|
||||||
# Invalid templates
|
# Invalid templates
|
||||||
self.assertTrue(isinstance(YoutubeDL.validate_outtmpl('%(title)'), ValueError))
|
self.assertTrue(isinstance(YoutubeDL.validate_outtmpl('%(title)'), ValueError))
|
||||||
test('%(invalid@tmpl|def)s', 'none', outtmpl_na_placeholder='none')
|
test('%(invalid@tmpl|def)s', 'none', outtmpl_na_placeholder='none')
|
||||||
test('%(..)s', 'NA')
|
test('%(..)s', 'NA')
|
||||||
|
test('%(formats.{id)s', 'NA')
|
||||||
|
|
||||||
# Entire info_dict
|
# Entire info_dict
|
||||||
def expect_same_infodict(out):
|
def expect_same_infodict(out):
|
||||||
@@ -770,7 +760,7 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
test('a%(width|)d', 'a', outtmpl_na_placeholder='none')
|
test('a%(width|)d', 'a', outtmpl_na_placeholder='none')
|
||||||
|
|
||||||
FORMATS = self.outtmpl_info['formats']
|
FORMATS = self.outtmpl_info['formats']
|
||||||
sanitize = lambda x: x.replace(':', ' -').replace('"', "'").replace('\n', ' ')
|
sanitize = lambda x: x.replace(':', ':').replace('"', """).replace('\n', ' ')
|
||||||
|
|
||||||
# Custom type casting
|
# Custom type casting
|
||||||
test('%(formats.:.id)l', 'id 1, id 2, id 3')
|
test('%(formats.:.id)l', 'id 1, id 2, id 3')
|
||||||
@@ -788,13 +778,13 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
test('%(filesize)#D', '1Ki')
|
test('%(filesize)#D', '1Ki')
|
||||||
test('%(height)5.2D', ' 1.08k')
|
test('%(height)5.2D', ' 1.08k')
|
||||||
test('%(title4)#S', 'foo_bar_test')
|
test('%(title4)#S', 'foo_bar_test')
|
||||||
test('%(title4).10S', ('foo \'bar\' ', 'foo \'bar\'' + ('#' if compat_os_name == 'nt' else ' ')))
|
test('%(title4).10S', ('foo "bar" ', 'foo "bar"' + ('#' if compat_os_name == 'nt' else ' ')))
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
test('%(title4)q', ('"foo \\"bar\\" test"', "'foo _'bar_' test'"))
|
test('%(title4)q', ('"foo \\"bar\\" test"', ""foo ⧹"bar⧹" test""))
|
||||||
test('%(formats.:.id)#q', ('"id 1" "id 2" "id 3"', "'id 1' 'id 2' 'id 3'"))
|
test('%(formats.:.id)#q', ('"id 1" "id 2" "id 3"', '"id 1" "id 2" "id 3"'))
|
||||||
test('%(formats.0.id)#q', ('"id 1"', "'id 1'"))
|
test('%(formats.0.id)#q', ('"id 1"', '"id 1"'))
|
||||||
else:
|
else:
|
||||||
test('%(title4)q', ('\'foo "bar" test\'', "'foo 'bar' test'"))
|
test('%(title4)q', ('\'foo "bar" test\'', '\'foo "bar" test\''))
|
||||||
test('%(formats.:.id)#q', "'id 1' 'id 2' 'id 3'")
|
test('%(formats.:.id)#q', "'id 1' 'id 2' 'id 3'")
|
||||||
test('%(formats.0.id)#q', "'id 1'")
|
test('%(formats.0.id)#q', "'id 1'")
|
||||||
|
|
||||||
@@ -813,6 +803,12 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
test('%(formats.:2:-1)r', repr(FORMATS[:2:-1]))
|
test('%(formats.:2:-1)r', repr(FORMATS[:2:-1]))
|
||||||
test('%(formats.0.id.-1+id)f', '1235.000000')
|
test('%(formats.0.id.-1+id)f', '1235.000000')
|
||||||
test('%(formats.0.id.-1+formats.1.id.-1)d', '3')
|
test('%(formats.0.id.-1+formats.1.id.-1)d', '3')
|
||||||
|
out = json.dumps([{'id': f['id'], 'height.:2': str(f['height'])[:2]}
|
||||||
|
if 'height' in f else {'id': f['id']}
|
||||||
|
for f in FORMATS])
|
||||||
|
test('%(formats.:.{id,height.:2})j', (out, sanitize(out)))
|
||||||
|
test('%(formats.:.{id,height}.id)l', ', '.join(f['id'] for f in FORMATS))
|
||||||
|
test('%(.{id,title})j', ('{"id": "1234"}', '{"id": "1234"}'))
|
||||||
|
|
||||||
# Alternates
|
# Alternates
|
||||||
test('%(title,id)s', '1234')
|
test('%(title,id)s', '1234')
|
||||||
@@ -852,8 +848,8 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
# Path expansion and escaping
|
# Path expansion and escaping
|
||||||
test('Hello %(title1)s', 'Hello $PATH')
|
test('Hello %(title1)s', 'Hello $PATH')
|
||||||
test('Hello %(title2)s', 'Hello %PATH%')
|
test('Hello %(title2)s', 'Hello %PATH%')
|
||||||
test('%(title3)s', ('foo/bar\\test', 'foo_bar_test'))
|
test('%(title3)s', ('foo/bar\\test', 'foo⧸bar⧹test'))
|
||||||
test('folder/%(title3)s', ('folder/foo/bar\\test', 'folder%sfoo_bar_test' % os.path.sep))
|
test('folder/%(title3)s', ('folder/foo/bar\\test', 'folder%sfoo⧸bar⧹test' % os.path.sep))
|
||||||
|
|
||||||
def test_format_note(self):
|
def test_format_note(self):
|
||||||
ydl = YoutubeDL()
|
ydl = YoutubeDL()
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|||||||
import base64
|
import base64
|
||||||
|
|
||||||
from yt_dlp.aes import (
|
from yt_dlp.aes import (
|
||||||
BLOCK_SIZE_BYTES,
|
|
||||||
aes_cbc_decrypt,
|
aes_cbc_decrypt,
|
||||||
aes_cbc_decrypt_bytes,
|
aes_cbc_decrypt_bytes,
|
||||||
aes_cbc_encrypt,
|
aes_cbc_encrypt,
|
||||||
@@ -103,8 +102,7 @@ class TestAES(unittest.TestCase):
|
|||||||
|
|
||||||
def test_ecb_encrypt(self):
|
def test_ecb_encrypt(self):
|
||||||
data = bytes_to_intlist(self.secret_msg)
|
data = bytes_to_intlist(self.secret_msg)
|
||||||
data += [0x08] * (BLOCK_SIZE_BYTES - len(data) % BLOCK_SIZE_BYTES)
|
encrypted = intlist_to_bytes(aes_ecb_encrypt(data, self.key))
|
||||||
encrypted = intlist_to_bytes(aes_ecb_encrypt(data, self.key, self.iv))
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
encrypted,
|
encrypted,
|
||||||
b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:')
|
b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:')
|
||||||
|
|||||||
@@ -28,7 +28,8 @@ class TestCompat(unittest.TestCase):
|
|||||||
with self.assertWarns(DeprecationWarning):
|
with self.assertWarns(DeprecationWarning):
|
||||||
compat.WINDOWS_VT_MODE
|
compat.WINDOWS_VT_MODE
|
||||||
|
|
||||||
compat.asyncio.events # Must not raise error
|
# TODO: Test submodule
|
||||||
|
# compat.asyncio.events # Must not raise error
|
||||||
|
|
||||||
def test_compat_expanduser(self):
|
def test_compat_expanduser(self):
|
||||||
old_home = os.environ.get('HOME')
|
old_home = os.environ.get('HOME')
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ from datetime import datetime, timezone
|
|||||||
|
|
||||||
from yt_dlp import cookies
|
from yt_dlp import cookies
|
||||||
from yt_dlp.cookies import (
|
from yt_dlp.cookies import (
|
||||||
|
LenientSimpleCookie,
|
||||||
LinuxChromeCookieDecryptor,
|
LinuxChromeCookieDecryptor,
|
||||||
MacChromeCookieDecryptor,
|
MacChromeCookieDecryptor,
|
||||||
WindowsChromeCookieDecryptor,
|
WindowsChromeCookieDecryptor,
|
||||||
@@ -137,3 +138,163 @@ class TestCookies(unittest.TestCase):
|
|||||||
def test_pbkdf2_sha1(self):
|
def test_pbkdf2_sha1(self):
|
||||||
key = pbkdf2_sha1(b'peanuts', b' ' * 16, 1, 16)
|
key = pbkdf2_sha1(b'peanuts', b' ' * 16, 1, 16)
|
||||||
self.assertEqual(key, b'g\xe1\x8e\x0fQ\x1c\x9b\xf3\xc9`!\xaa\x90\xd9\xd34')
|
self.assertEqual(key, b'g\xe1\x8e\x0fQ\x1c\x9b\xf3\xc9`!\xaa\x90\xd9\xd34')
|
||||||
|
|
||||||
|
|
||||||
|
class TestLenientSimpleCookie(unittest.TestCase):
|
||||||
|
def _run_tests(self, *cases):
|
||||||
|
for message, raw_cookie, expected in cases:
|
||||||
|
cookie = LenientSimpleCookie(raw_cookie)
|
||||||
|
|
||||||
|
with self.subTest(message, expected=expected):
|
||||||
|
self.assertEqual(cookie.keys(), expected.keys(), message)
|
||||||
|
|
||||||
|
for key, expected_value in expected.items():
|
||||||
|
morsel = cookie[key]
|
||||||
|
if isinstance(expected_value, tuple):
|
||||||
|
expected_value, expected_attributes = expected_value
|
||||||
|
else:
|
||||||
|
expected_attributes = {}
|
||||||
|
|
||||||
|
attributes = {
|
||||||
|
key: value
|
||||||
|
for key, value in dict(morsel).items()
|
||||||
|
if value != ""
|
||||||
|
}
|
||||||
|
self.assertEqual(attributes, expected_attributes, message)
|
||||||
|
|
||||||
|
self.assertEqual(morsel.value, expected_value, message)
|
||||||
|
|
||||||
|
def test_parsing(self):
|
||||||
|
self._run_tests(
|
||||||
|
# Copied from https://github.com/python/cpython/blob/v3.10.7/Lib/test/test_http_cookies.py
|
||||||
|
(
|
||||||
|
"Test basic cookie",
|
||||||
|
"chips=ahoy; vienna=finger",
|
||||||
|
{"chips": "ahoy", "vienna": "finger"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Test quoted cookie",
|
||||||
|
'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
|
||||||
|
{"keebler": 'E=mc2; L="Loves"; fudge=\012;'},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Allow '=' in an unquoted value",
|
||||||
|
"keebler=E=mc2",
|
||||||
|
{"keebler": "E=mc2"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Allow cookies with ':' in their name",
|
||||||
|
"key:term=value:term",
|
||||||
|
{"key:term": "value:term"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Allow '[' and ']' in cookie values",
|
||||||
|
"a=b; c=[; d=r; f=h",
|
||||||
|
{"a": "b", "c": "[", "d": "r", "f": "h"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Test basic cookie attributes",
|
||||||
|
'Customer="WILE_E_COYOTE"; Version=1; Path=/acme',
|
||||||
|
{"Customer": ("WILE_E_COYOTE", {"version": "1", "path": "/acme"})},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Test flag only cookie attributes",
|
||||||
|
'Customer="WILE_E_COYOTE"; HttpOnly; Secure',
|
||||||
|
{"Customer": ("WILE_E_COYOTE", {"httponly": True, "secure": True})},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Test flag only attribute with values",
|
||||||
|
"eggs=scrambled; httponly=foo; secure=bar; Path=/bacon",
|
||||||
|
{"eggs": ("scrambled", {"httponly": "foo", "secure": "bar", "path": "/bacon"})},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Test special case for 'expires' attribute, 4 digit year",
|
||||||
|
'Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT',
|
||||||
|
{"Customer": ("W", {"expires": "Wed, 01 Jan 2010 00:00:00 GMT"})},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Test special case for 'expires' attribute, 2 digit year",
|
||||||
|
'Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT',
|
||||||
|
{"Customer": ("W", {"expires": "Wed, 01 Jan 98 00:00:00 GMT"})},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Test extra spaces in keys and values",
|
||||||
|
"eggs = scrambled ; secure ; path = bar ; foo=foo ",
|
||||||
|
{"eggs": ("scrambled", {"secure": True, "path": "bar"}), "foo": "foo"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Test quoted attributes",
|
||||||
|
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"',
|
||||||
|
{"Customer": ("WILE_E_COYOTE", {"version": "1", "path": "/acme"})}
|
||||||
|
),
|
||||||
|
# Our own tests that CPython passes
|
||||||
|
(
|
||||||
|
"Allow ';' in quoted value",
|
||||||
|
'chips="a;hoy"; vienna=finger',
|
||||||
|
{"chips": "a;hoy", "vienna": "finger"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Keep only the last set value",
|
||||||
|
"a=c; a=b",
|
||||||
|
{"a": "b"},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_lenient_parsing(self):
|
||||||
|
self._run_tests(
|
||||||
|
(
|
||||||
|
"Ignore and try to skip invalid cookies",
|
||||||
|
'chips={"ahoy;": 1}; vienna="finger;"',
|
||||||
|
{"vienna": "finger;"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Ignore cookies without a name",
|
||||||
|
"a=b; unnamed; c=d",
|
||||||
|
{"a": "b", "c": "d"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Ignore '\"' cookie without name",
|
||||||
|
'a=b; "; c=d',
|
||||||
|
{"a": "b", "c": "d"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Skip all space separated values",
|
||||||
|
"x a=b c=d x; e=f",
|
||||||
|
{"a": "b", "c": "d", "e": "f"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Skip all space separated values",
|
||||||
|
'x a=b; data={"complex": "json", "with": "key=value"}; x c=d x',
|
||||||
|
{"a": "b", "c": "d"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Expect quote mending",
|
||||||
|
'a=b; invalid="; c=d',
|
||||||
|
{"a": "b", "c": "d"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Reset morsel after invalid to not capture attributes",
|
||||||
|
"a=b; invalid; Version=1; c=d",
|
||||||
|
{"a": "b", "c": "d"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Reset morsel after invalid to not capture attributes",
|
||||||
|
"a=b; $invalid; $Version=1; c=d",
|
||||||
|
{"a": "b", "c": "d"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Continue after non-flag attribute without value",
|
||||||
|
"a=b; path; Version=1; c=d",
|
||||||
|
{"a": "b", "c": "d"},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Allow cookie attributes with `$` prefix",
|
||||||
|
'Customer="WILE_E_COYOTE"; $Version=1; $Secure; $Path=/acme',
|
||||||
|
{"Customer": ("WILE_E_COYOTE", {"version": "1", "secure": True, "path": "/acme"})},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Invalid Morsel keys should not result in an error",
|
||||||
|
"Key=Value; [Invalid]=Value; Another=Value",
|
||||||
|
{"Key": "Value", "Another": "Value"},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import unittest
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
import collections
|
||||||
import hashlib
|
import hashlib
|
||||||
import http.client
|
import http.client
|
||||||
import json
|
import json
|
||||||
@@ -20,6 +21,7 @@ from test.helper import (
|
|||||||
expect_warnings,
|
expect_warnings,
|
||||||
get_params,
|
get_params,
|
||||||
gettestcases,
|
gettestcases,
|
||||||
|
getwebpagetestcases,
|
||||||
is_download_test,
|
is_download_test,
|
||||||
report_warning,
|
report_warning,
|
||||||
try_rm,
|
try_rm,
|
||||||
@@ -32,6 +34,7 @@ from yt_dlp.utils import (
|
|||||||
ExtractorError,
|
ExtractorError,
|
||||||
UnavailableVideoError,
|
UnavailableVideoError,
|
||||||
format_bytes,
|
format_bytes,
|
||||||
|
join_nonempty,
|
||||||
)
|
)
|
||||||
|
|
||||||
RETRIES = 3
|
RETRIES = 3
|
||||||
@@ -57,7 +60,9 @@ def _file_md5(fn):
|
|||||||
return hashlib.md5(f.read()).hexdigest()
|
return hashlib.md5(f.read()).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
defs = gettestcases()
|
normal_test_cases = gettestcases()
|
||||||
|
webpage_test_cases = getwebpagetestcases()
|
||||||
|
tests_counter = collections.defaultdict(collections.Counter)
|
||||||
|
|
||||||
|
|
||||||
@is_download_test
|
@is_download_test
|
||||||
@@ -72,24 +77,13 @@ class TestDownload(unittest.TestCase):
|
|||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
"""Identify each test with the `add_ie` attribute, if available."""
|
"""Identify each test with the `add_ie` attribute, if available."""
|
||||||
|
cls, add_ie = type(self), getattr(self, self._testMethodName).add_ie
|
||||||
|
return f'{self._testMethodName} ({cls.__module__}.{cls.__name__}){f" [{add_ie}]" if add_ie else ""}:'
|
||||||
|
|
||||||
def strclass(cls):
|
|
||||||
"""From 2.7's unittest; 2.6 had _strclass so we can't import it."""
|
|
||||||
return f'{cls.__module__}.{cls.__name__}'
|
|
||||||
|
|
||||||
add_ie = getattr(self, self._testMethodName).add_ie
|
|
||||||
return '%s (%s)%s:' % (self._testMethodName,
|
|
||||||
strclass(self.__class__),
|
|
||||||
' [%s]' % add_ie if add_ie else '')
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.defs = defs
|
|
||||||
|
|
||||||
# Dynamically generate tests
|
# Dynamically generate tests
|
||||||
|
|
||||||
|
|
||||||
def generator(test_case, tname):
|
def generator(test_case, tname):
|
||||||
|
|
||||||
def test_template(self):
|
def test_template(self):
|
||||||
if self.COMPLETED_TESTS.get(tname):
|
if self.COMPLETED_TESTS.get(tname):
|
||||||
return
|
return
|
||||||
@@ -111,11 +105,11 @@ def generator(test_case, tname):
|
|||||||
info_dict = tc.get('info_dict', {})
|
info_dict = tc.get('info_dict', {})
|
||||||
params = tc.get('params', {})
|
params = tc.get('params', {})
|
||||||
if not info_dict.get('id'):
|
if not info_dict.get('id'):
|
||||||
raise Exception('Test definition incorrect. \'id\' key is not present')
|
raise Exception(f'Test {tname} definition incorrect - "id" key is not present')
|
||||||
elif not info_dict.get('ext'):
|
elif not info_dict.get('ext') and info_dict.get('_type', 'video') == 'video':
|
||||||
if params.get('skip_download') and params.get('ignore_no_formats_error'):
|
if params.get('skip_download') and params.get('ignore_no_formats_error'):
|
||||||
continue
|
continue
|
||||||
raise Exception('Test definition incorrect. The output file cannot be known. \'ext\' key is not present')
|
raise Exception(f'Test {tname} definition incorrect - "ext" key must be present to define the output file')
|
||||||
|
|
||||||
if 'skip' in test_case:
|
if 'skip' in test_case:
|
||||||
print_skipping(test_case['skip'])
|
print_skipping(test_case['skip'])
|
||||||
@@ -128,7 +122,8 @@ def generator(test_case, tname):
|
|||||||
params['outtmpl'] = tname + '_' + params['outtmpl']
|
params['outtmpl'] = tname + '_' + params['outtmpl']
|
||||||
if is_playlist and 'playlist' not in test_case:
|
if is_playlist and 'playlist' not in test_case:
|
||||||
params.setdefault('extract_flat', 'in_playlist')
|
params.setdefault('extract_flat', 'in_playlist')
|
||||||
params.setdefault('playlistend', test_case.get('playlist_mincount'))
|
params.setdefault('playlistend', test_case.get(
|
||||||
|
'playlist_mincount', test_case.get('playlist_count', -2) + 1))
|
||||||
params.setdefault('skip_download', True)
|
params.setdefault('skip_download', True)
|
||||||
|
|
||||||
ydl = YoutubeDL(params, auto_init=False)
|
ydl = YoutubeDL(params, auto_init=False)
|
||||||
@@ -167,7 +162,9 @@ def generator(test_case, tname):
|
|||||||
force_generic_extractor=params.get('force_generic_extractor', False))
|
force_generic_extractor=params.get('force_generic_extractor', False))
|
||||||
except (DownloadError, ExtractorError) as err:
|
except (DownloadError, ExtractorError) as err:
|
||||||
# Check if the exception is not a network related one
|
# Check if the exception is not a network related one
|
||||||
if not err.exc_info[0] in (urllib.error.URLError, socket.timeout, UnavailableVideoError, http.client.BadStatusLine) or (err.exc_info[0] == urllib.error.HTTPError and err.exc_info[1].code == 503):
|
if (err.exc_info[0] not in (urllib.error.URLError, socket.timeout, UnavailableVideoError, http.client.BadStatusLine)
|
||||||
|
or (err.exc_info[0] == urllib.error.HTTPError and err.exc_info[1].code == 503)):
|
||||||
|
err.msg = f'{getattr(err, "msg", err)} ({tname})'
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if try_num == RETRIES:
|
if try_num == RETRIES:
|
||||||
@@ -216,6 +213,8 @@ def generator(test_case, tname):
|
|||||||
tc_res_dict = res_dict['entries'][tc_num]
|
tc_res_dict = res_dict['entries'][tc_num]
|
||||||
# First, check test cases' data against extracted data alone
|
# First, check test cases' data against extracted data alone
|
||||||
expect_info_dict(self, tc_res_dict, tc.get('info_dict', {}))
|
expect_info_dict(self, tc_res_dict, tc.get('info_dict', {}))
|
||||||
|
if tc_res_dict.get('_type', 'video') != 'video':
|
||||||
|
continue
|
||||||
# Now, check downloaded file consistency
|
# Now, check downloaded file consistency
|
||||||
tc_filename = get_tc_filename(tc)
|
tc_filename = get_tc_filename(tc)
|
||||||
if not test_case.get('params', {}).get('skip_download', False):
|
if not test_case.get('params', {}).get('skip_download', False):
|
||||||
@@ -255,39 +254,43 @@ def generator(test_case, tname):
|
|||||||
|
|
||||||
|
|
||||||
# And add them to TestDownload
|
# And add them to TestDownload
|
||||||
tests_counter = {}
|
def inject_tests(test_cases, label=''):
|
||||||
for test_case in defs:
|
for test_case in test_cases:
|
||||||
name = test_case['name']
|
name = test_case['name']
|
||||||
i = tests_counter.get(name, 0)
|
tname = join_nonempty('test', name, label, tests_counter[name][label], delim='_')
|
||||||
tests_counter[name] = i + 1
|
tests_counter[name][label] += 1
|
||||||
tname = f'test_{name}_{i}' if i else f'test_{name}'
|
|
||||||
test_method = generator(test_case, tname)
|
test_method = generator(test_case, tname)
|
||||||
test_method.__name__ = str(tname)
|
test_method.__name__ = tname
|
||||||
ie_list = test_case.get('add_ie')
|
test_method.add_ie = ','.join(test_case.get('add_ie', []))
|
||||||
test_method.add_ie = ie_list and ','.join(ie_list)
|
setattr(TestDownload, test_method.__name__, test_method)
|
||||||
setattr(TestDownload, test_method.__name__, test_method)
|
|
||||||
del test_method
|
|
||||||
|
|
||||||
|
|
||||||
def batch_generator(name, num_tests):
|
inject_tests(normal_test_cases)
|
||||||
|
|
||||||
|
# TODO: disable redirection to the IE to ensure we are actually testing the webpage extraction
|
||||||
|
inject_tests(webpage_test_cases, 'webpage')
|
||||||
|
|
||||||
|
|
||||||
|
def batch_generator(name):
|
||||||
def test_template(self):
|
def test_template(self):
|
||||||
for i in range(num_tests):
|
for label, num_tests in tests_counter[name].items():
|
||||||
test_name = f'test_{name}_{i}' if i else f'test_{name}'
|
for i in range(num_tests):
|
||||||
try:
|
test_name = join_nonempty('test', name, label, i, delim='_')
|
||||||
getattr(self, test_name)()
|
try:
|
||||||
except unittest.SkipTest:
|
getattr(self, test_name)()
|
||||||
print(f'Skipped {test_name}')
|
except unittest.SkipTest:
|
||||||
|
print(f'Skipped {test_name}')
|
||||||
|
|
||||||
return test_template
|
return test_template
|
||||||
|
|
||||||
|
|
||||||
for name, num_tests in tests_counter.items():
|
for name in tests_counter:
|
||||||
test_method = batch_generator(name, num_tests)
|
test_method = batch_generator(name)
|
||||||
test_method.__name__ = f'test_{name}_all'
|
test_method.__name__ = f'test_{name}_all'
|
||||||
test_method.add_ie = ''
|
test_method.add_ie = ''
|
||||||
setattr(TestDownload, test_method.__name__, test_method)
|
setattr(TestDownload, test_method.__name__, test_method)
|
||||||
del test_method
|
del test_method
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -95,8 +95,8 @@ class TestHttpFD(unittest.TestCase):
|
|||||||
try_rm(encodeFilename(filename))
|
try_rm(encodeFilename(filename))
|
||||||
self.assertTrue(downloader.real_download(filename, {
|
self.assertTrue(downloader.real_download(filename, {
|
||||||
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
|
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
|
||||||
}))
|
}), ep)
|
||||||
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE)
|
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep)
|
||||||
try_rm(encodeFilename(filename))
|
try_rm(encodeFilename(filename))
|
||||||
|
|
||||||
def download_all(self, params):
|
def download_all(self, params):
|
||||||
|
|||||||
@@ -11,41 +11,46 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|||||||
import contextlib
|
import contextlib
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from yt_dlp.utils import encodeArgument
|
from yt_dlp.utils import Popen
|
||||||
|
|
||||||
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
LAZY_EXTRACTORS = 'yt_dlp/extractor/lazy_extractors.py'
|
||||||
|
|
||||||
try:
|
|
||||||
_DEV_NULL = subprocess.DEVNULL
|
|
||||||
except AttributeError:
|
|
||||||
_DEV_NULL = open(os.devnull, 'wb')
|
|
||||||
|
|
||||||
|
|
||||||
class TestExecution(unittest.TestCase):
|
class TestExecution(unittest.TestCase):
|
||||||
def test_import(self):
|
def run_yt_dlp(self, exe=(sys.executable, 'yt_dlp/__main__.py'), opts=('--version', )):
|
||||||
subprocess.check_call([sys.executable, '-c', 'import yt_dlp'], cwd=rootDir)
|
stdout, stderr, returncode = Popen.run(
|
||||||
|
[*exe, '--ignore-config', *opts], cwd=rootDir, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
def test_module_exec(self):
|
print(stderr, file=sys.stderr)
|
||||||
subprocess.check_call([sys.executable, '-m', 'yt_dlp', '--ignore-config', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
self.assertEqual(returncode, 0)
|
||||||
|
return stdout.strip(), stderr.strip()
|
||||||
|
|
||||||
def test_main_exec(self):
|
def test_main_exec(self):
|
||||||
subprocess.check_call([sys.executable, 'yt_dlp/__main__.py', '--ignore-config', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
self.run_yt_dlp()
|
||||||
|
|
||||||
|
def test_import(self):
|
||||||
|
self.run_yt_dlp(exe=(sys.executable, '-c', 'import yt_dlp'))
|
||||||
|
|
||||||
|
def test_module_exec(self):
|
||||||
|
self.run_yt_dlp(exe=(sys.executable, '-m', 'yt_dlp'))
|
||||||
|
|
||||||
def test_cmdline_umlauts(self):
|
def test_cmdline_umlauts(self):
|
||||||
p = subprocess.Popen(
|
_, stderr = self.run_yt_dlp(opts=('ä', '--version'))
|
||||||
[sys.executable, 'yt_dlp/__main__.py', '--ignore-config', encodeArgument('ä'), '--version'],
|
|
||||||
cwd=rootDir, stdout=_DEV_NULL, stderr=subprocess.PIPE)
|
|
||||||
_, stderr = p.communicate()
|
|
||||||
self.assertFalse(stderr)
|
self.assertFalse(stderr)
|
||||||
|
|
||||||
def test_lazy_extractors(self):
|
def test_lazy_extractors(self):
|
||||||
try:
|
try:
|
||||||
subprocess.check_call([sys.executable, 'devscripts/make_lazy_extractors.py', 'yt_dlp/extractor/lazy_extractors.py'], cwd=rootDir, stdout=_DEV_NULL)
|
subprocess.check_call([sys.executable, 'devscripts/make_lazy_extractors.py', LAZY_EXTRACTORS],
|
||||||
subprocess.check_call([sys.executable, 'test/test_all_urls.py'], cwd=rootDir, stdout=_DEV_NULL)
|
cwd=rootDir, stdout=subprocess.DEVNULL)
|
||||||
|
self.assertTrue(os.path.exists(LAZY_EXTRACTORS))
|
||||||
|
|
||||||
|
_, stderr = self.run_yt_dlp(opts=('-s', 'test:'))
|
||||||
|
self.assertFalse(stderr)
|
||||||
|
|
||||||
|
subprocess.check_call([sys.executable, 'test/test_all_urls.py'], cwd=rootDir, stdout=subprocess.DEVNULL)
|
||||||
finally:
|
finally:
|
||||||
with contextlib.suppress(OSError):
|
with contextlib.suppress(OSError):
|
||||||
os.remove('yt_dlp/extractor/lazy_extractors.py')
|
os.remove(LAZY_EXTRACTORS)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ class TestHTTPS(unittest.TestCase):
|
|||||||
|
|
||||||
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
|
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
|
||||||
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
|
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
|
||||||
self.assertEqual(r['entries'][0]['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
|
self.assertEqual(r['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
|
||||||
|
|
||||||
|
|
||||||
class TestClientCert(unittest.TestCase):
|
class TestClientCert(unittest.TestCase):
|
||||||
@@ -113,7 +113,7 @@ class TestClientCert(unittest.TestCase):
|
|||||||
**params,
|
**params,
|
||||||
})
|
})
|
||||||
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
|
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
|
||||||
self.assertEqual(r['entries'][0]['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
|
self.assertEqual(r['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
|
||||||
|
|
||||||
def test_certificate_combined_nopass(self):
|
def test_certificate_combined_nopass(self):
|
||||||
self._run_test(client_certificate=os.path.join(self.certdir, 'clientwithkey.crt'))
|
self._run_test(client_certificate=os.path.join(self.certdir, 'clientwithkey.crt'))
|
||||||
|
|||||||
@@ -7,8 +7,10 @@ import unittest
|
|||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
import math
|
||||||
|
import re
|
||||||
|
|
||||||
from yt_dlp.jsinterp import JSInterpreter
|
from yt_dlp.jsinterp import JS_Undefined, JSInterpreter
|
||||||
|
|
||||||
|
|
||||||
class TestJSInterpreter(unittest.TestCase):
|
class TestJSInterpreter(unittest.TestCase):
|
||||||
@@ -19,6 +21,9 @@ class TestJSInterpreter(unittest.TestCase):
|
|||||||
jsi = JSInterpreter('function x3(){return 42;}')
|
jsi = JSInterpreter('function x3(){return 42;}')
|
||||||
self.assertEqual(jsi.call_function('x3'), 42)
|
self.assertEqual(jsi.call_function('x3'), 42)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function x3(){42}')
|
||||||
|
self.assertEqual(jsi.call_function('x3'), None)
|
||||||
|
|
||||||
jsi = JSInterpreter('var x5 = function(){return 42;}')
|
jsi = JSInterpreter('var x5 = function(){return 42;}')
|
||||||
self.assertEqual(jsi.call_function('x5'), 42)
|
self.assertEqual(jsi.call_function('x5'), 42)
|
||||||
|
|
||||||
@@ -45,14 +50,32 @@ class TestJSInterpreter(unittest.TestCase):
|
|||||||
jsi = JSInterpreter('function f(){return 1 << 5;}')
|
jsi = JSInterpreter('function f(){return 1 << 5;}')
|
||||||
self.assertEqual(jsi.call_function('f'), 32)
|
self.assertEqual(jsi.call_function('f'), 32)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f(){return 2 ** 5}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 32)
|
||||||
|
|
||||||
jsi = JSInterpreter('function f(){return 19 & 21;}')
|
jsi = JSInterpreter('function f(){return 19 & 21;}')
|
||||||
self.assertEqual(jsi.call_function('f'), 17)
|
self.assertEqual(jsi.call_function('f'), 17)
|
||||||
|
|
||||||
jsi = JSInterpreter('function f(){return 11 >> 2;}')
|
jsi = JSInterpreter('function f(){return 11 >> 2;}')
|
||||||
self.assertEqual(jsi.call_function('f'), 2)
|
self.assertEqual(jsi.call_function('f'), 2)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f(){return []? 2+3: 4;}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 5)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f(){return 1 == 2}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), False)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f(){return 0 && 1 || 2;}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 2)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f(){return 0 ?? 42;}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 0)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f(){return "life, the universe and everything" < 42;}')
|
||||||
|
self.assertFalse(jsi.call_function('f'))
|
||||||
|
|
||||||
def test_array_access(self):
|
def test_array_access(self):
|
||||||
jsi = JSInterpreter('function f(){var x = [1,2,3]; x[0] = 4; x[0] = 5; x[2] = 7; return x;}')
|
jsi = JSInterpreter('function f(){var x = [1,2,3]; x[0] = 4; x[0] = 5; x[2.0] = 7; return x;}')
|
||||||
self.assertEqual(jsi.call_function('f'), [5, 2, 7])
|
self.assertEqual(jsi.call_function('f'), [5, 2, 7])
|
||||||
|
|
||||||
def test_parens(self):
|
def test_parens(self):
|
||||||
@@ -62,6 +85,10 @@ class TestJSInterpreter(unittest.TestCase):
|
|||||||
jsi = JSInterpreter('function f(){return (1 + 2) * 3;}')
|
jsi = JSInterpreter('function f(){return (1 + 2) * 3;}')
|
||||||
self.assertEqual(jsi.call_function('f'), 9)
|
self.assertEqual(jsi.call_function('f'), 9)
|
||||||
|
|
||||||
|
def test_quotes(self):
|
||||||
|
jsi = JSInterpreter(R'function f(){return "a\"\\("}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), R'a"\(')
|
||||||
|
|
||||||
def test_assignments(self):
|
def test_assignments(self):
|
||||||
jsi = JSInterpreter('function f(){var x = 20; x = 30 + 1; return x;}')
|
jsi = JSInterpreter('function f(){var x = 20; x = 30 + 1; return x;}')
|
||||||
self.assertEqual(jsi.call_function('f'), 31)
|
self.assertEqual(jsi.call_function('f'), 31)
|
||||||
@@ -104,17 +131,33 @@ class TestJSInterpreter(unittest.TestCase):
|
|||||||
}''')
|
}''')
|
||||||
self.assertEqual(jsi.call_function('x'), [20, 20, 30, 40, 50])
|
self.assertEqual(jsi.call_function('x'), [20, 20, 30, 40, 50])
|
||||||
|
|
||||||
|
def test_builtins(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return NaN }
|
||||||
|
''')
|
||||||
|
self.assertTrue(math.isnan(jsi.call_function('x')))
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return new Date('Wednesday 31 December 1969 18:01:26 MDT') - 0; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 86000)
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x(dt) { return new Date(dt) - 0; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x', 'Wednesday 31 December 1969 18:01:26 MDT'), 86000)
|
||||||
|
|
||||||
def test_call(self):
|
def test_call(self):
|
||||||
jsi = JSInterpreter('''
|
jsi = JSInterpreter('''
|
||||||
function x() { return 2; }
|
function x() { return 2; }
|
||||||
function y(a) { return x() + a; }
|
function y(a) { return x() + (a?a:0); }
|
||||||
function z() { return y(3); }
|
function z() { return y(3); }
|
||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('z'), 5)
|
self.assertEqual(jsi.call_function('z'), 5)
|
||||||
|
self.assertEqual(jsi.call_function('y'), 2)
|
||||||
|
|
||||||
def test_for_loop(self):
|
def test_for_loop(self):
|
||||||
jsi = JSInterpreter('''
|
jsi = JSInterpreter('''
|
||||||
function x() { a=0; for (i=0; i-10; i++) {a++} a }
|
function x() { a=0; for (i=0; i-10; i++) {a++} return a }
|
||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('x'), 10)
|
self.assertEqual(jsi.call_function('x'), 10)
|
||||||
|
|
||||||
@@ -153,21 +196,53 @@ class TestJSInterpreter(unittest.TestCase):
|
|||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('x'), 10)
|
self.assertEqual(jsi.call_function('x'), 10)
|
||||||
|
|
||||||
|
def test_catch(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { try{throw 10} catch(e){return 5} }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 5)
|
||||||
|
|
||||||
|
def test_finally(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { try{throw 10} finally {return 42} }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 42)
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { try{throw 10} catch(e){return 5} finally {return 42} }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 42)
|
||||||
|
|
||||||
|
def test_nested_try(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() {try {
|
||||||
|
try{throw 10} finally {throw 42}
|
||||||
|
} catch(e){return 5} }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 5)
|
||||||
|
|
||||||
def test_for_loop_continue(self):
|
def test_for_loop_continue(self):
|
||||||
jsi = JSInterpreter('''
|
jsi = JSInterpreter('''
|
||||||
function x() { a=0; for (i=0; i-10; i++) { continue; a++ } a }
|
function x() { a=0; for (i=0; i-10; i++) { continue; a++ } return a }
|
||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('x'), 0)
|
self.assertEqual(jsi.call_function('x'), 0)
|
||||||
|
|
||||||
def test_for_loop_break(self):
|
def test_for_loop_break(self):
|
||||||
jsi = JSInterpreter('''
|
jsi = JSInterpreter('''
|
||||||
function x() { a=0; for (i=0; i-10; i++) { break; a++ } a }
|
function x() { a=0; for (i=0; i-10; i++) { break; a++ } return a }
|
||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('x'), 0)
|
self.assertEqual(jsi.call_function('x'), 0)
|
||||||
|
|
||||||
|
def test_for_loop_try(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() {
|
||||||
|
for (i=0; i-10; i++) { try { if (i == 5) throw i} catch {return 10} finally {break} };
|
||||||
|
return 42 }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 42)
|
||||||
|
|
||||||
def test_literal_list(self):
|
def test_literal_list(self):
|
||||||
jsi = JSInterpreter('''
|
jsi = JSInterpreter('''
|
||||||
function x() { [1, 2, "asdf", [5, 6, 7]][3] }
|
function x() { return [1, 2, "asdf", [5, 6, 7]][3] }
|
||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('x'), [5, 6, 7])
|
self.assertEqual(jsi.call_function('x'), [5, 6, 7])
|
||||||
|
|
||||||
@@ -177,6 +252,167 @@ class TestJSInterpreter(unittest.TestCase):
|
|||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('x'), 7)
|
self.assertEqual(jsi.call_function('x'), 7)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { a=5; return (a -= 1, a+=3, a); }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 7)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return (l=[0,1,2,3], function(a, b){return a+b})((l[1], l[2]), l[3]) }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 5)
|
||||||
|
|
||||||
|
def test_void(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return void 42; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), None)
|
||||||
|
|
||||||
|
def test_return_function(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return [1, function(){return 1}][1] }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x')([]), 1)
|
||||||
|
|
||||||
|
def test_null(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return null; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), None)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return [null > 0, null < 0, null == 0, null === 0]; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [False, False, False, False])
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return [null >= 0, null <= 0]; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [True, True])
|
||||||
|
|
||||||
|
def test_undefined(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return undefined === undefined; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), True)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return undefined; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), JS_Undefined)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { let v; return v; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), JS_Undefined)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return [undefined === undefined, undefined == undefined, undefined < undefined, undefined > undefined]; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [True, True, False, False])
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return [undefined === 0, undefined == 0, undefined < 0, undefined > 0]; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [False, False, False, False])
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return [undefined >= 0, undefined <= 0]; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [False, False])
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return [undefined > null, undefined < null, undefined == null, undefined === null]; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [False, False, True, False])
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return [undefined === null, undefined == null, undefined < null, undefined > null]; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [False, True, False, False])
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { let v; return [42+v, v+42, v**42, 42**v, 0**v]; }
|
||||||
|
''')
|
||||||
|
for y in jsi.call_function('x'):
|
||||||
|
self.assertTrue(math.isnan(y))
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { let v; return v**0; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 1)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { let v; return [v>42, v<=42, v&&42, 42&&v]; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [False, False, JS_Undefined, JS_Undefined])
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function x(){return undefined ?? 42; }')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 42)
|
||||||
|
|
||||||
|
def test_object(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { return {}; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), {})
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { let a = {m1: 42, m2: 0 }; return [a["m1"], a.m2]; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [42, 0])
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { let a; return a?.qq; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), JS_Undefined)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { let a = {m1: 42, m2: 0 }; return a?.qq; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), JS_Undefined)
|
||||||
|
|
||||||
|
def test_regex(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { let a=/,,[/,913,/](,)}/; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), None)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { let a=/,,[/,913,/](,)}/; return a; }
|
||||||
|
''')
|
||||||
|
self.assertIsInstance(jsi.call_function('x'), re.Pattern)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { let a=/,,[/,913,/](,)}/i; return a; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x').flags & re.I, re.I)
|
||||||
|
|
||||||
|
jsi = JSInterpreter(R'''
|
||||||
|
function x() { let a=/,][}",],()}(\[)/; return a; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x').pattern, r',][}",],()}(\[)')
|
||||||
|
|
||||||
|
jsi = JSInterpreter(R'''
|
||||||
|
function x() { let a=[/[)\\]/]; return a[0]; }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x').pattern, r'[)\\]')
|
||||||
|
|
||||||
|
def test_char_code_at(self):
|
||||||
|
jsi = JSInterpreter('function x(i){return "test".charCodeAt(i)}')
|
||||||
|
self.assertEqual(jsi.call_function('x', 0), 116)
|
||||||
|
self.assertEqual(jsi.call_function('x', 1), 101)
|
||||||
|
self.assertEqual(jsi.call_function('x', 2), 115)
|
||||||
|
self.assertEqual(jsi.call_function('x', 3), 116)
|
||||||
|
self.assertEqual(jsi.call_function('x', 4), None)
|
||||||
|
self.assertEqual(jsi.call_function('x', 'not_a_number'), 116)
|
||||||
|
|
||||||
|
def test_bitwise_operators_overflow(self):
|
||||||
|
jsi = JSInterpreter('function x(){return -524999584 << 5}')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 379882496)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function x(){return 1236566549 << 5}')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 915423904)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
73
test/test_plugins.py
Normal file
73
test/test_plugins.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
import importlib
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
TEST_DATA_DIR = Path(os.path.dirname(os.path.abspath(__file__)), 'testdata')
|
||||||
|
sys.path.append(str(TEST_DATA_DIR))
|
||||||
|
importlib.invalidate_caches()
|
||||||
|
|
||||||
|
from yt_dlp.plugins import PACKAGE_NAME, directories, load_plugins
|
||||||
|
|
||||||
|
|
||||||
|
class TestPlugins(unittest.TestCase):
|
||||||
|
|
||||||
|
TEST_PLUGIN_DIR = TEST_DATA_DIR / PACKAGE_NAME
|
||||||
|
|
||||||
|
def test_directories_containing_plugins(self):
|
||||||
|
self.assertIn(self.TEST_PLUGIN_DIR, map(Path, directories()))
|
||||||
|
|
||||||
|
def test_extractor_classes(self):
|
||||||
|
for module_name in tuple(sys.modules):
|
||||||
|
if module_name.startswith(f'{PACKAGE_NAME}.extractor'):
|
||||||
|
del sys.modules[module_name]
|
||||||
|
plugins_ie = load_plugins('extractor', 'IE')
|
||||||
|
|
||||||
|
self.assertIn(f'{PACKAGE_NAME}.extractor.normal', sys.modules.keys())
|
||||||
|
self.assertIn('NormalPluginIE', plugins_ie.keys())
|
||||||
|
|
||||||
|
# don't load modules with underscore prefix
|
||||||
|
self.assertFalse(
|
||||||
|
f'{PACKAGE_NAME}.extractor._ignore' in sys.modules.keys(),
|
||||||
|
'loaded module beginning with underscore')
|
||||||
|
self.assertNotIn('IgnorePluginIE', plugins_ie.keys())
|
||||||
|
|
||||||
|
# Don't load extractors with underscore prefix
|
||||||
|
self.assertNotIn('_IgnoreUnderscorePluginIE', plugins_ie.keys())
|
||||||
|
|
||||||
|
# Don't load extractors not specified in __all__ (if supplied)
|
||||||
|
self.assertNotIn('IgnoreNotInAllPluginIE', plugins_ie.keys())
|
||||||
|
self.assertIn('InAllPluginIE', plugins_ie.keys())
|
||||||
|
|
||||||
|
def test_postprocessor_classes(self):
|
||||||
|
plugins_pp = load_plugins('postprocessor', 'PP')
|
||||||
|
self.assertIn('NormalPluginPP', plugins_pp.keys())
|
||||||
|
|
||||||
|
def test_importing_zipped_module(self):
|
||||||
|
zip_path = TEST_DATA_DIR / 'zipped_plugins.zip'
|
||||||
|
shutil.make_archive(str(zip_path)[:-4], 'zip', str(zip_path)[:-4])
|
||||||
|
sys.path.append(str(zip_path)) # add zip to search paths
|
||||||
|
importlib.invalidate_caches() # reset the import caches
|
||||||
|
|
||||||
|
try:
|
||||||
|
for plugin_type in ('extractor', 'postprocessor'):
|
||||||
|
package = importlib.import_module(f'{PACKAGE_NAME}.{plugin_type}')
|
||||||
|
self.assertIn(zip_path / PACKAGE_NAME / plugin_type, map(Path, package.__path__))
|
||||||
|
|
||||||
|
plugins_ie = load_plugins('extractor', 'IE')
|
||||||
|
self.assertIn('ZippedPluginIE', plugins_ie.keys())
|
||||||
|
|
||||||
|
plugins_pp = load_plugins('postprocessor', 'PP')
|
||||||
|
self.assertIn('ZippedPluginPP', plugins_pp.keys())
|
||||||
|
|
||||||
|
finally:
|
||||||
|
sys.path.remove(str(zip_path))
|
||||||
|
os.remove(zip_path)
|
||||||
|
importlib.invalidate_caches() # reset the import caches
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -16,6 +16,7 @@ from yt_dlp.postprocessor import (
|
|||||||
MetadataFromFieldPP,
|
MetadataFromFieldPP,
|
||||||
MetadataParserPP,
|
MetadataParserPP,
|
||||||
ModifyChaptersPP,
|
ModifyChaptersPP,
|
||||||
|
SponsorBlockPP,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -76,11 +77,15 @@ class TestModifyChaptersPP(unittest.TestCase):
|
|||||||
self._pp = ModifyChaptersPP(YoutubeDL())
|
self._pp = ModifyChaptersPP(YoutubeDL())
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _sponsor_chapter(start, end, cat, remove=False):
|
def _sponsor_chapter(start, end, cat, remove=False, title=None):
|
||||||
c = {'start_time': start, 'end_time': end, '_categories': [(cat, start, end)]}
|
if title is None:
|
||||||
if remove:
|
title = SponsorBlockPP.CATEGORIES[cat]
|
||||||
c['remove'] = True
|
return {
|
||||||
return c
|
'start_time': start,
|
||||||
|
'end_time': end,
|
||||||
|
'_categories': [(cat, start, end, title)],
|
||||||
|
**({'remove': True} if remove else {}),
|
||||||
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _chapter(start, end, title=None, remove=False):
|
def _chapter(start, end, title=None, remove=False):
|
||||||
@@ -130,6 +135,19 @@ class TestModifyChaptersPP(unittest.TestCase):
|
|||||||
'c', '[SponsorBlock]: Filler Tangent', 'c'])
|
'c', '[SponsorBlock]: Filler Tangent', 'c'])
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SponsorBlockChapters(self):
|
||||||
|
chapters = self._chapters([70], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 20, 'chapter', title='sb c1'),
|
||||||
|
self._sponsor_chapter(15, 16, 'chapter', title='sb c2'),
|
||||||
|
self._sponsor_chapter(30, 40, 'preview'),
|
||||||
|
self._sponsor_chapter(50, 60, 'filler')]
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 15, 16, 20, 30, 40, 50, 60, 70],
|
||||||
|
['c', '[SponsorBlock]: sb c1', '[SponsorBlock]: sb c1, sb c2', '[SponsorBlock]: sb c1',
|
||||||
|
'c', '[SponsorBlock]: Preview/Recap',
|
||||||
|
'c', '[SponsorBlock]: Filler Tangent', 'c'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self):
|
def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self):
|
||||||
chapters = self._chapters([120], ['c']) + [
|
chapters = self._chapters([120], ['c']) + [
|
||||||
self._sponsor_chapter(10, 45, 'sponsor'), self._sponsor_chapter(20, 40, 'selfpromo'),
|
self._sponsor_chapter(10, 45, 'sponsor'), self._sponsor_chapter(20, 40, 'selfpromo'),
|
||||||
@@ -173,7 +191,7 @@ class TestModifyChaptersPP(unittest.TestCase):
|
|||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithCutHidingSponsor(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithCutHidingSponsor(self):
|
||||||
cuts = [self._sponsor_chapter(20, 50, 'selpromo', remove=True)]
|
cuts = [self._sponsor_chapter(20, 50, 'selfpromo', remove=True)]
|
||||||
chapters = self._chapters([60], ['c']) + [
|
chapters = self._chapters([60], ['c']) + [
|
||||||
self._sponsor_chapter(10, 20, 'intro'),
|
self._sponsor_chapter(10, 20, 'intro'),
|
||||||
self._sponsor_chapter(30, 40, 'sponsor'),
|
self._sponsor_chapter(30, 40, 'sponsor'),
|
||||||
@@ -199,7 +217,7 @@ class TestModifyChaptersPP(unittest.TestCase):
|
|||||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||||
self._sponsor_chapter(20, 30, 'interaction', remove=True),
|
self._sponsor_chapter(20, 30, 'interaction', remove=True),
|
||||||
self._chapter(30, 40, remove=True),
|
self._chapter(30, 40, remove=True),
|
||||||
self._sponsor_chapter(40, 50, 'selpromo', remove=True),
|
self._sponsor_chapter(40, 50, 'selfpromo', remove=True),
|
||||||
self._sponsor_chapter(50, 60, 'interaction')]
|
self._sponsor_chapter(50, 60, 'interaction')]
|
||||||
expected = self._chapters([10, 20, 30, 40],
|
expected = self._chapters([10, 20, 30, 40],
|
||||||
['c', '[SponsorBlock]: Sponsor',
|
['c', '[SponsorBlock]: Sponsor',
|
||||||
@@ -282,7 +300,7 @@ class TestModifyChaptersPP(unittest.TestCase):
|
|||||||
chapters = self._chapters([70], ['c']) + [
|
chapters = self._chapters([70], ['c']) + [
|
||||||
self._sponsor_chapter(10, 30, 'sponsor'),
|
self._sponsor_chapter(10, 30, 'sponsor'),
|
||||||
self._sponsor_chapter(20, 50, 'interaction'),
|
self._sponsor_chapter(20, 50, 'interaction'),
|
||||||
self._sponsor_chapter(30, 50, 'selpromo', remove=True),
|
self._sponsor_chapter(30, 50, 'selfpromo', remove=True),
|
||||||
self._sponsor_chapter(40, 60, 'sponsor'),
|
self._sponsor_chapter(40, 60, 'sponsor'),
|
||||||
self._sponsor_chapter(50, 60, 'interaction')]
|
self._sponsor_chapter(50, 60, 'interaction')]
|
||||||
expected = self._chapters(
|
expected = self._chapters(
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
@@ -53,6 +54,7 @@ from yt_dlp.utils import (
|
|||||||
fix_xml_ampersands,
|
fix_xml_ampersands,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
format_bytes,
|
format_bytes,
|
||||||
|
get_compatible_ext,
|
||||||
get_element_by_attribute,
|
get_element_by_attribute,
|
||||||
get_element_by_class,
|
get_element_by_class,
|
||||||
get_element_html_by_attribute,
|
get_element_html_by_attribute,
|
||||||
@@ -108,6 +110,7 @@ from yt_dlp.utils import (
|
|||||||
strip_or_none,
|
strip_or_none,
|
||||||
subtitles_filename,
|
subtitles_filename,
|
||||||
timeconvert,
|
timeconvert,
|
||||||
|
traverse_obj,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
@@ -139,13 +142,13 @@ class TestUtil(unittest.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(sanitize_filename('123'), '123')
|
self.assertEqual(sanitize_filename('123'), '123')
|
||||||
|
|
||||||
self.assertEqual('abc_de', sanitize_filename('abc/de'))
|
self.assertEqual('abc⧸de', sanitize_filename('abc/de'))
|
||||||
self.assertFalse('/' in sanitize_filename('abc/de///'))
|
self.assertFalse('/' in sanitize_filename('abc/de///'))
|
||||||
|
|
||||||
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
|
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', is_id=False))
|
||||||
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
|
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', is_id=False))
|
||||||
self.assertEqual('yes no', sanitize_filename('yes? no'))
|
self.assertEqual('yes no', sanitize_filename('yes? no', is_id=False))
|
||||||
self.assertEqual('this - that', sanitize_filename('this: that'))
|
self.assertEqual('this - that', sanitize_filename('this: that', is_id=False))
|
||||||
|
|
||||||
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
|
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
|
||||||
aumlaut = 'ä'
|
aumlaut = 'ä'
|
||||||
@@ -368,6 +371,7 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
||||||
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
|
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
|
||||||
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
||||||
|
self.assertEqual(unified_strdate('31-07-2022 20:00'), '20220731')
|
||||||
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
|
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
|
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
|
||||||
@@ -411,6 +415,10 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
|
self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
|
||||||
self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
|
self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
|
||||||
|
|
||||||
|
self.assertEqual(unified_timestamp('December 31 1969 20:00:01 EDT'), 1)
|
||||||
|
self.assertEqual(unified_timestamp('Wednesday 31 December 1969 18:01:26 MDT'), 86)
|
||||||
|
self.assertEqual(unified_timestamp('12/31/1969 20:01:18 EDT', False), 78)
|
||||||
|
|
||||||
def test_determine_ext(self):
|
def test_determine_ext(self):
|
||||||
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
|
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
|
||||||
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
|
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
|
||||||
@@ -560,6 +568,7 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/')
|
self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/')
|
||||||
self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/')
|
self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/')
|
||||||
self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/')
|
self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/')
|
||||||
|
self.assertEqual(base_url('http://foo.de/bar/baz&x=z&w=y/x/c'), 'http://foo.de/bar/baz&x=z&w=y/x/')
|
||||||
|
|
||||||
def test_urljoin(self):
|
def test_urljoin(self):
|
||||||
self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
|
self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
|
||||||
@@ -945,6 +954,85 @@ class TestUtil(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
||||||
|
|
||||||
|
def test_js_to_json_vars_strings(self):
|
||||||
|
self.assertDictEqual(
|
||||||
|
json.loads(js_to_json(
|
||||||
|
'''{
|
||||||
|
'null': a,
|
||||||
|
'nullStr': b,
|
||||||
|
'true': c,
|
||||||
|
'trueStr': d,
|
||||||
|
'false': e,
|
||||||
|
'falseStr': f,
|
||||||
|
'unresolvedVar': g,
|
||||||
|
}''',
|
||||||
|
{
|
||||||
|
'a': 'null',
|
||||||
|
'b': '"null"',
|
||||||
|
'c': 'true',
|
||||||
|
'd': '"true"',
|
||||||
|
'e': 'false',
|
||||||
|
'f': '"false"',
|
||||||
|
'g': 'var',
|
||||||
|
}
|
||||||
|
)),
|
||||||
|
{
|
||||||
|
'null': None,
|
||||||
|
'nullStr': 'null',
|
||||||
|
'true': True,
|
||||||
|
'trueStr': 'true',
|
||||||
|
'false': False,
|
||||||
|
'falseStr': 'false',
|
||||||
|
'unresolvedVar': 'var'
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertDictEqual(
|
||||||
|
json.loads(js_to_json(
|
||||||
|
'''{
|
||||||
|
'int': a,
|
||||||
|
'intStr': b,
|
||||||
|
'float': c,
|
||||||
|
'floatStr': d,
|
||||||
|
}''',
|
||||||
|
{
|
||||||
|
'a': '123',
|
||||||
|
'b': '"123"',
|
||||||
|
'c': '1.23',
|
||||||
|
'd': '"1.23"',
|
||||||
|
}
|
||||||
|
)),
|
||||||
|
{
|
||||||
|
'int': 123,
|
||||||
|
'intStr': '123',
|
||||||
|
'float': 1.23,
|
||||||
|
'floatStr': '1.23',
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertDictEqual(
|
||||||
|
json.loads(js_to_json(
|
||||||
|
'''{
|
||||||
|
'object': a,
|
||||||
|
'objectStr': b,
|
||||||
|
'array': c,
|
||||||
|
'arrayStr': d,
|
||||||
|
}''',
|
||||||
|
{
|
||||||
|
'a': '{}',
|
||||||
|
'b': '"{}"',
|
||||||
|
'c': '[]',
|
||||||
|
'd': '"[]"',
|
||||||
|
}
|
||||||
|
)),
|
||||||
|
{
|
||||||
|
'object': {},
|
||||||
|
'objectStr': '{}',
|
||||||
|
'array': [],
|
||||||
|
'arrayStr': '[]',
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
def test_js_to_json_realworld(self):
|
def test_js_to_json_realworld(self):
|
||||||
inp = '''{
|
inp = '''{
|
||||||
'clip':{'provider':'pseudo'}
|
'clip':{'provider':'pseudo'}
|
||||||
@@ -1091,6 +1179,12 @@ class TestUtil(unittest.TestCase):
|
|||||||
on = js_to_json('[1,//{},\n2]')
|
on = js_to_json('[1,//{},\n2]')
|
||||||
self.assertEqual(json.loads(on), [1, 2])
|
self.assertEqual(json.loads(on), [1, 2])
|
||||||
|
|
||||||
|
on = js_to_json(R'"\^\$\#"')
|
||||||
|
self.assertEqual(json.loads(on), R'^$#', msg='Unnecessary escapes should be stripped')
|
||||||
|
|
||||||
|
on = js_to_json('\'"\\""\'')
|
||||||
|
self.assertEqual(json.loads(on), '"""', msg='Unnecessary quote escape should be escaped')
|
||||||
|
|
||||||
def test_js_to_json_malformed(self):
|
def test_js_to_json_malformed(self):
|
||||||
self.assertEqual(js_to_json('42a1'), '42"a1"')
|
self.assertEqual(js_to_json('42a1'), '42"a1"')
|
||||||
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
|
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
|
||||||
@@ -1670,6 +1764,9 @@ Line 1
|
|||||||
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'foo', html)), [])
|
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'foo', html)), [])
|
||||||
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'no-such-foo', html)), [])
|
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'no-such-foo', html)), [])
|
||||||
|
|
||||||
|
self.assertEqual(list(get_elements_text_and_html_by_attribute(
|
||||||
|
'class', 'foo', '<a class="foo">nice</a><span class="foo">nice</span>', tag='a')), [('nice', '<a class="foo">nice</a>')])
|
||||||
|
|
||||||
GET_ELEMENT_BY_TAG_TEST_STRING = '''
|
GET_ELEMENT_BY_TAG_TEST_STRING = '''
|
||||||
random text lorem ipsum</p>
|
random text lorem ipsum</p>
|
||||||
<div>
|
<div>
|
||||||
@@ -1842,6 +1939,257 @@ Line 1
|
|||||||
self.assertEqual(determine_file_encoding('# coding: utf-32-be'.encode('utf-32-be')), ('utf-32-be', 0))
|
self.assertEqual(determine_file_encoding('# coding: utf-32-be'.encode('utf-32-be')), ('utf-32-be', 0))
|
||||||
self.assertEqual(determine_file_encoding('# coding: utf-16-le'.encode('utf-16-le')), ('utf-16-le', 0))
|
self.assertEqual(determine_file_encoding('# coding: utf-16-le'.encode('utf-16-le')), ('utf-16-le', 0))
|
||||||
|
|
||||||
|
def test_get_compatible_ext(self):
|
||||||
|
self.assertEqual(get_compatible_ext(
|
||||||
|
vcodecs=[None], acodecs=[None, None], vexts=['mp4'], aexts=['m4a', 'm4a']), 'mkv')
|
||||||
|
self.assertEqual(get_compatible_ext(
|
||||||
|
vcodecs=[None], acodecs=[None], vexts=['flv'], aexts=['flv']), 'flv')
|
||||||
|
|
||||||
|
self.assertEqual(get_compatible_ext(
|
||||||
|
vcodecs=[None], acodecs=[None], vexts=['mp4'], aexts=['m4a']), 'mp4')
|
||||||
|
self.assertEqual(get_compatible_ext(
|
||||||
|
vcodecs=[None], acodecs=[None], vexts=['mp4'], aexts=['webm']), 'mkv')
|
||||||
|
self.assertEqual(get_compatible_ext(
|
||||||
|
vcodecs=[None], acodecs=[None], vexts=['webm'], aexts=['m4a']), 'mkv')
|
||||||
|
self.assertEqual(get_compatible_ext(
|
||||||
|
vcodecs=[None], acodecs=[None], vexts=['webm'], aexts=['webm']), 'webm')
|
||||||
|
self.assertEqual(get_compatible_ext(
|
||||||
|
vcodecs=[None], acodecs=[None], vexts=['webm'], aexts=['weba']), 'webm')
|
||||||
|
|
||||||
|
self.assertEqual(get_compatible_ext(
|
||||||
|
vcodecs=['h264'], acodecs=['mp4a'], vexts=['mov'], aexts=['m4a']), 'mp4')
|
||||||
|
self.assertEqual(get_compatible_ext(
|
||||||
|
vcodecs=['av01.0.12M.08'], acodecs=['opus'], vexts=['mp4'], aexts=['webm']), 'webm')
|
||||||
|
|
||||||
|
self.assertEqual(get_compatible_ext(
|
||||||
|
vcodecs=['vp9'], acodecs=['opus'], vexts=['webm'], aexts=['webm'], preferences=['flv', 'mp4']), 'mp4')
|
||||||
|
self.assertEqual(get_compatible_ext(
|
||||||
|
vcodecs=['av1'], acodecs=['mp4a'], vexts=['webm'], aexts=['m4a'], preferences=('webm', 'mkv')), 'mkv')
|
||||||
|
|
||||||
|
def test_traverse_obj(self):
|
||||||
|
_TEST_DATA = {
|
||||||
|
100: 100,
|
||||||
|
1.2: 1.2,
|
||||||
|
'str': 'str',
|
||||||
|
'None': None,
|
||||||
|
'...': ...,
|
||||||
|
'urls': [
|
||||||
|
{'index': 0, 'url': 'https://www.example.com/0'},
|
||||||
|
{'index': 1, 'url': 'https://www.example.com/1'},
|
||||||
|
],
|
||||||
|
'data': (
|
||||||
|
{'index': 2},
|
||||||
|
{'index': 3},
|
||||||
|
),
|
||||||
|
'dict': {},
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test base functionality
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, ('str',)), 'str',
|
||||||
|
msg='allow tuple path')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, ['str']), 'str',
|
||||||
|
msg='allow list path')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, (value for value in ("str",))), 'str',
|
||||||
|
msg='allow iterable path')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, 'str'), 'str',
|
||||||
|
msg='single items should be treated as a path')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, None), _TEST_DATA)
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, 100), 100)
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, 1.2), 1.2)
|
||||||
|
|
||||||
|
# Test Ellipsis behavior
|
||||||
|
self.assertCountEqual(traverse_obj(_TEST_DATA, ...),
|
||||||
|
(item for item in _TEST_DATA.values() if item is not None),
|
||||||
|
msg='`...` should give all values except `None`')
|
||||||
|
self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', 0, ...)), _TEST_DATA['urls'][0].values(),
|
||||||
|
msg='`...` selection for dicts should select all values')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, (..., ..., 'url')),
|
||||||
|
['https://www.example.com/0', 'https://www.example.com/1'],
|
||||||
|
msg='nested `...` queries should work')
|
||||||
|
self.assertCountEqual(traverse_obj(_TEST_DATA, (..., ..., 'index')), range(4),
|
||||||
|
msg='`...` query result should be flattened')
|
||||||
|
|
||||||
|
# Test function as key
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, lambda x, y: x == 'urls' and isinstance(y, list)),
|
||||||
|
[_TEST_DATA['urls']],
|
||||||
|
msg='function as query key should perform a filter based on (key, value)')
|
||||||
|
self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)), {'str'},
|
||||||
|
msg='exceptions in the query function should be catched')
|
||||||
|
|
||||||
|
# Test alternative paths
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'str'), 'str',
|
||||||
|
msg='multiple `paths` should be treated as alternative paths')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, 'str', 100), 'str',
|
||||||
|
msg='alternatives should exit early')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'fail'), None,
|
||||||
|
msg='alternatives should return `default` if exhausted')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, (..., 'fail'), 100), 100,
|
||||||
|
msg='alternatives should track their own branching return')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, ('dict', ...), ('data', ...)), list(_TEST_DATA['data']),
|
||||||
|
msg='alternatives on empty objects should search further')
|
||||||
|
|
||||||
|
# Test branch and path nesting
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, ('urls', (3, 0), 'url')), ['https://www.example.com/0'],
|
||||||
|
msg='tuple as key should be treated as branches')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, ('urls', [3, 0], 'url')), ['https://www.example.com/0'],
|
||||||
|
msg='list as key should be treated as branches')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ((1, 'fail'), (0, 'url')))), ['https://www.example.com/0'],
|
||||||
|
msg='double nesting in path should be treated as paths')
|
||||||
|
self.assertEqual(traverse_obj(['0', [1, 2]], [(0, 1), 0]), [1],
|
||||||
|
msg='do not fail early on branching')
|
||||||
|
self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', ((1, ('fail', 'url')), (0, 'url')))),
|
||||||
|
['https://www.example.com/0', 'https://www.example.com/1'],
|
||||||
|
msg='tripple nesting in path should be treated as branches')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ('fail', (..., 'url')))),
|
||||||
|
['https://www.example.com/0', 'https://www.example.com/1'],
|
||||||
|
msg='ellipsis as branch path start gets flattened')
|
||||||
|
|
||||||
|
# Test dictionary as key
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}), {0: 100, 1: 1.2},
|
||||||
|
msg='dict key should result in a dict with the same keys')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', 0, 'url')}),
|
||||||
|
{0: 'https://www.example.com/0'},
|
||||||
|
msg='dict key should allow paths')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', (3, 0), 'url')}),
|
||||||
|
{0: ['https://www.example.com/0']},
|
||||||
|
msg='tuple in dict path should be treated as branches')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, 'fail'), (0, 'url')))}),
|
||||||
|
{0: ['https://www.example.com/0']},
|
||||||
|
msg='double nesting in dict path should be treated as paths')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, ('fail', 'url')), (0, 'url')))}),
|
||||||
|
{0: ['https://www.example.com/1', 'https://www.example.com/0']},
|
||||||
|
msg='tripple nesting in dict path should be treated as branches')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}), {},
|
||||||
|
msg='remove `None` values when dict key')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}, default=...), {0: ...},
|
||||||
|
msg='do not remove `None` values if `default`')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}), {0: {}},
|
||||||
|
msg='do not remove empty values when dict key')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}, default=...), {0: {}},
|
||||||
|
msg='do not remove empty values when dict key and a default')
|
||||||
|
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('dict', ...)}), {0: []},
|
||||||
|
msg='if branch in dict key not successful, return `[]`')
|
||||||
|
|
||||||
|
# Testing default parameter behavior
|
||||||
|
_DEFAULT_DATA = {'None': None, 'int': 0, 'list': []}
|
||||||
|
self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail'), None,
|
||||||
|
msg='default value should be `None`')
|
||||||
|
self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', 'fail', default=...), ...,
|
||||||
|
msg='chained fails should result in default')
|
||||||
|
self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', 'int'), 0,
|
||||||
|
msg='should not short cirquit on `None`')
|
||||||
|
self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', default=1), 1,
|
||||||
|
msg='invalid dict key should result in `default`')
|
||||||
|
self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', default=1), 1,
|
||||||
|
msg='`None` is a deliberate sentinel and should become `default`')
|
||||||
|
self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', 10)), None,
|
||||||
|
msg='`IndexError` should result in `default`')
|
||||||
|
self.assertEqual(traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=1), 1,
|
||||||
|
msg='if branched but not successful return `default` if defined, not `[]`')
|
||||||
|
self.assertEqual(traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=None), None,
|
||||||
|
msg='if branched but not successful return `default` even if `default` is `None`')
|
||||||
|
self.assertEqual(traverse_obj(_DEFAULT_DATA, (..., 'fail')), [],
|
||||||
|
msg='if branched but not successful return `[]`, not `default`')
|
||||||
|
self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', ...)), [],
|
||||||
|
msg='if branched but object is empty return `[]`, not `default`')
|
||||||
|
|
||||||
|
# Testing expected_type behavior
|
||||||
|
_EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0}
|
||||||
|
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str), 'str',
|
||||||
|
msg='accept matching `expected_type` type')
|
||||||
|
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int), None,
|
||||||
|
msg='reject non matching `expected_type` type')
|
||||||
|
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)), '0',
|
||||||
|
msg='transform type using type function')
|
||||||
|
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str',
|
||||||
|
expected_type=lambda _: 1 / 0), None,
|
||||||
|
msg='wrap expected_type fuction in try_call')
|
||||||
|
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, ..., expected_type=str), ['str'],
|
||||||
|
msg='eliminate items that expected_type fails on')
|
||||||
|
|
||||||
|
# Test get_all behavior
|
||||||
|
_GET_ALL_DATA = {'key': [0, 1, 2]}
|
||||||
|
self.assertEqual(traverse_obj(_GET_ALL_DATA, ('key', ...), get_all=False), 0,
|
||||||
|
msg='if not `get_all`, return only first matching value')
|
||||||
|
self.assertEqual(traverse_obj(_GET_ALL_DATA, ..., get_all=False), [0, 1, 2],
|
||||||
|
msg='do not overflatten if not `get_all`')
|
||||||
|
|
||||||
|
# Test casesense behavior
|
||||||
|
_CASESENSE_DATA = {
|
||||||
|
'KeY': 'value0',
|
||||||
|
0: {
|
||||||
|
'KeY': 'value1',
|
||||||
|
0: {'KeY': 'value2'},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
self.assertEqual(traverse_obj(_CASESENSE_DATA, 'key'), None,
|
||||||
|
msg='dict keys should be case sensitive unless `casesense`')
|
||||||
|
self.assertEqual(traverse_obj(_CASESENSE_DATA, 'keY',
|
||||||
|
casesense=False), 'value0',
|
||||||
|
msg='allow non matching key case if `casesense`')
|
||||||
|
self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ('keY',)),
|
||||||
|
casesense=False), ['value1'],
|
||||||
|
msg='allow non matching key case in branch if `casesense`')
|
||||||
|
self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ((0, 'keY'),)),
|
||||||
|
casesense=False), ['value2'],
|
||||||
|
msg='allow non matching key case in branch path if `casesense`')
|
||||||
|
|
||||||
|
# Test traverse_string behavior
|
||||||
|
_TRAVERSE_STRING_DATA = {'str': 'str', 1.2: 1.2}
|
||||||
|
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0)), None,
|
||||||
|
msg='do not traverse into string if not `traverse_string`')
|
||||||
|
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0),
|
||||||
|
traverse_string=True), 's',
|
||||||
|
msg='traverse into string if `traverse_string`')
|
||||||
|
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, (1.2, 1),
|
||||||
|
traverse_string=True), '.',
|
||||||
|
msg='traverse into converted data if `traverse_string`')
|
||||||
|
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', ...),
|
||||||
|
traverse_string=True), list('str'),
|
||||||
|
msg='`...` branching into string should result in list')
|
||||||
|
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)),
|
||||||
|
traverse_string=True), ['s', 'r'],
|
||||||
|
msg='branching into string should result in list')
|
||||||
|
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda _, x: x),
|
||||||
|
traverse_string=True), list('str'),
|
||||||
|
msg='function branching into string should result in list')
|
||||||
|
|
||||||
|
# Test is_user_input behavior
|
||||||
|
_IS_USER_INPUT_DATA = {'range8': list(range(8))}
|
||||||
|
self.assertEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', '3'),
|
||||||
|
is_user_input=True), 3,
|
||||||
|
msg='allow for string indexing if `is_user_input`')
|
||||||
|
self.assertCountEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', '3:'),
|
||||||
|
is_user_input=True), tuple(range(8))[3:],
|
||||||
|
msg='allow for string slice if `is_user_input`')
|
||||||
|
self.assertCountEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', ':4:2'),
|
||||||
|
is_user_input=True), tuple(range(8))[:4:2],
|
||||||
|
msg='allow step in string slice if `is_user_input`')
|
||||||
|
self.assertCountEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', ':'),
|
||||||
|
is_user_input=True), range(8),
|
||||||
|
msg='`:` should be treated as `...` if `is_user_input`')
|
||||||
|
with self.assertRaises(TypeError, msg='too many params should result in error'):
|
||||||
|
traverse_obj(_IS_USER_INPUT_DATA, ('range8', ':::'), is_user_input=True)
|
||||||
|
|
||||||
|
# Test re.Match as input obj
|
||||||
|
mobj = re.fullmatch(r'0(12)(?P<group>3)(4)?', '0123')
|
||||||
|
self.assertEqual(traverse_obj(mobj, ...), [x for x in mobj.groups() if x is not None],
|
||||||
|
msg='`...` on a `re.Match` should give its `groups()`')
|
||||||
|
self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 2)), ['0123', '3'],
|
||||||
|
msg='function on a `re.Match` should give groupno, value starting at 0')
|
||||||
|
self.assertEqual(traverse_obj(mobj, 'group'), '3',
|
||||||
|
msg='str key on a `re.Match` should give group with that name')
|
||||||
|
self.assertEqual(traverse_obj(mobj, 2), '3',
|
||||||
|
msg='int key on a `re.Match` should give group with that name')
|
||||||
|
self.assertEqual(traverse_obj(mobj, 'gRoUp', casesense=False), '3',
|
||||||
|
msg='str key on a `re.Match` should respect casesense')
|
||||||
|
self.assertEqual(traverse_obj(mobj, 'fail'), None,
|
||||||
|
msg='failing str key on a `re.Match` should return `default`')
|
||||||
|
self.assertEqual(traverse_obj(mobj, 'gRoUpS', casesense=False), None,
|
||||||
|
msg='failing str key on a `re.Match` should return `default`')
|
||||||
|
self.assertEqual(traverse_obj(mobj, 8), None,
|
||||||
|
msg='failing int key on a `re.Match` should return `default`')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|||||||
|
|
||||||
from test.helper import FakeYDL, is_download_test
|
from test.helper import FakeYDL, is_download_test
|
||||||
from yt_dlp.extractor import YoutubeIE, YoutubeTabIE
|
from yt_dlp.extractor import YoutubeIE, YoutubeTabIE
|
||||||
|
from yt_dlp.utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
@is_download_test
|
@is_download_test
|
||||||
@@ -53,6 +54,18 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
self.assertEqual(video['duration'], 10)
|
self.assertEqual(video['duration'], 10)
|
||||||
self.assertEqual(video['uploader'], 'Philipp Hagemeister')
|
self.assertEqual(video['uploader'], 'Philipp Hagemeister')
|
||||||
|
|
||||||
|
def test_youtube_channel_no_uploads(self):
|
||||||
|
dl = FakeYDL()
|
||||||
|
dl.params['extract_flat'] = True
|
||||||
|
ie = YoutubeTabIE(dl)
|
||||||
|
# no uploads
|
||||||
|
with self.assertRaisesRegex(ExtractorError, r'no uploads'):
|
||||||
|
ie.extract('https://www.youtube.com/channel/UC2yXPzFejc422buOIzn_0CA')
|
||||||
|
|
||||||
|
# no uploads and no UCID given
|
||||||
|
with self.assertRaisesRegex(ExtractorError, r'no uploads'):
|
||||||
|
ie.extract('https://www.youtube.com/news')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -94,6 +94,46 @@ _NSIG_TESTS = [
|
|||||||
'https://www.youtube.com/s/player/5dd88d1d/player-plasma-ias-phone-en_US.vflset/base.js',
|
'https://www.youtube.com/s/player/5dd88d1d/player-plasma-ias-phone-en_US.vflset/base.js',
|
||||||
'kSxKFLeqzv_ZyHSAt', 'n8gS8oRlHOxPFA',
|
'kSxKFLeqzv_ZyHSAt', 'n8gS8oRlHOxPFA',
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/324f67b9/player_ias.vflset/en_US/base.js',
|
||||||
|
'xdftNy7dh9QGnhW', '22qLGxrmX8F1rA',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/4c3f79c5/player_ias.vflset/en_US/base.js',
|
||||||
|
'TDCstCG66tEAO5pR9o', 'dbxNtZ14c-yWyw',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/c81bbb4a/player_ias.vflset/en_US/base.js',
|
||||||
|
'gre3EcLurNY2vqp94', 'Z9DfGxWP115WTg',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/1f7d5369/player_ias.vflset/en_US/base.js',
|
||||||
|
'batNX7sYqIJdkJ', 'IhOkL_zxbkOZBw',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/009f1d77/player_ias.vflset/en_US/base.js',
|
||||||
|
'5dwFHw8aFWQUQtffRq', 'audescmLUzI3jw',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/dc0c6770/player_ias.vflset/en_US/base.js',
|
||||||
|
'5EHDMgYLV6HPGk_Mu-kk', 'n9lUJLHbxUI0GQ',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/113ca41c/player_ias.vflset/en_US/base.js',
|
||||||
|
'cgYl-tlYkhjT7A', 'hI7BBr2zUgcmMg',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/c57c113c/player_ias.vflset/en_US/base.js',
|
||||||
|
'M92UUMHa8PdvPd3wyM', '3hPqLJsiNZx7yA',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/5a3b6271/player_ias.vflset/en_US/base.js',
|
||||||
|
'B2j7f_UPT4rfje85Lu_e', 'm5DmNymaGQ5RdQ',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/7a062b77/player_ias.vflset/en_US/base.js',
|
||||||
|
'NRcE3y3mVtm_cV-W', 'VbsCYUATvqlt5w',
|
||||||
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@@ -101,6 +141,7 @@ _NSIG_TESTS = [
|
|||||||
class TestPlayerInfo(unittest.TestCase):
|
class TestPlayerInfo(unittest.TestCase):
|
||||||
def test_youtube_extract_player_info(self):
|
def test_youtube_extract_player_info(self):
|
||||||
PLAYER_URLS = (
|
PLAYER_URLS = (
|
||||||
|
('https://www.youtube.com/s/player/4c3f79c5/player_ias.vflset/en_US/base.js', '4c3f79c5'),
|
||||||
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'),
|
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'),
|
||||||
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/fr_FR/base.js', '64dddad9'),
|
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/fr_FR/base.js', '64dddad9'),
|
||||||
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'),
|
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'),
|
||||||
|
|||||||
1
test/testdata/ism/ec-3_test.Manifest
vendored
Normal file
1
test/testdata/ism/ec-3_test.Manifest
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?><!--Transformed by VSMT using XSL stylesheet for rule Identity--><!-- Created with Unified Streaming Platform (version=1.10.12-18737) --><SmoothStreamingMedia MajorVersion="2" MinorVersion="0" TimeScale="10000000" Duration="370000000"><StreamIndex Type="audio" QualityLevels="1" TimeScale="10000000" Language="deu" Name="audio_deu" Chunks="19" Url="QualityLevels({bitrate})/Fragments(audio_deu={start time})?noStreamProfile=1"><QualityLevel Index="0" Bitrate="127802" CodecPrivateData="1190" SamplingRate="48000" Channels="2" BitsPerSample="16" PacketSize="4" AudioTag="255" FourCC="AACL" /><c t="0" d="20053333" /><c d="20053334" /><c d="20053333" /><c d="19840000" /><c d="20053333" /><c d="20053334" /><c d="20053333" /><c d="19840000" /><c d="20053333" /><c d="20053334" /><c d="20053333" /><c d="19840000" /><c d="20053333" /><c d="20053334" /><c d="20053333" /><c d="19840000" /><c d="20053333" /><c d="20053334" /><c d="7253333" /></StreamIndex><StreamIndex Type="audio" QualityLevels="1" TimeScale="10000000" Language="deu" Name="audio_deu_1" Chunks="19" Url="QualityLevels({bitrate})/Fragments(audio_deu_1={start time})?noStreamProfile=1"><QualityLevel Index="0" Bitrate="224000" CodecPrivateData="00063F000000AF87FBA7022DFB42A4D405CD93843BDD0700200F00" FourCCData="0700200F00" SamplingRate="48000" Channels="6" BitsPerSample="16" PacketSize="896" AudioTag="65534" FourCC="EC-3" /><c t="0" d="20160000" /><c d="19840000" /><c d="20160000" /><c d="19840000" /><c d="20160000" /><c d="19840000" /><c d="20160000" /><c d="19840000" /><c d="20160000" /><c d="19840000" /><c d="20160000" /><c d="19840000" /><c d="20160000" /><c d="19840000" /><c d="20160000" /><c d="19840000" /><c d="20160000" /><c d="19840000" /><c d="8320000" /></StreamIndex><StreamIndex Type="video" QualityLevels="8" TimeScale="10000000" Language="deu" Name="video_deu" Chunks="19" Url="QualityLevels({bitrate})/Fragments(video_deu={start time})?noStreamProfile=1" MaxWidth="1920" MaxHeight="1080" DisplayWidth="1920" DisplayHeight="1080"><QualityLevel Index="0" Bitrate="23909" CodecPrivateData="000000016742C00CDB06077E5C05A808080A00000300020000030009C0C02EE0177CC6300F142AE00000000168CA8DC8" MaxWidth="384" MaxHeight="216" FourCC="AVC1" /><QualityLevel Index="1" Bitrate="403188" CodecPrivateData="00000001674D4014E98323B602D4040405000003000100000300320F1429380000000168EAECF2" MaxWidth="400" MaxHeight="224" FourCC="AVC1" /><QualityLevel Index="2" Bitrate="680365" CodecPrivateData="00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2" MaxWidth="640" MaxHeight="360" FourCC="AVC1" /><QualityLevel Index="3" Bitrate="1253465" CodecPrivateData="00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2" MaxWidth="640" MaxHeight="360" FourCC="AVC1" /><QualityLevel Index="4" Bitrate="2121558" CodecPrivateData="00000001674D401EECA0601BD80B50101014000003000400000300C83C58B6580000000168E93B3C80" MaxWidth="768" MaxHeight="432" FourCC="AVC1" /><QualityLevel Index="5" Bitrate="3275545" CodecPrivateData="00000001674D4020ECA02802DD80B501010140000003004000000C83C60C65800000000168E93B3C80" MaxWidth="1280" MaxHeight="720" FourCC="AVC1" /><QualityLevel Index="6" Bitrate="5300196" CodecPrivateData="00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80" MaxWidth="1920" MaxHeight="1080" FourCC="AVC1" /><QualityLevel Index="7" Bitrate="8079312" CodecPrivateData="00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80" MaxWidth="1920" MaxHeight="1080" FourCC="AVC1" /><c t="0" d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="20000000" /><c d="10000000" /></StreamIndex></SmoothStreamingMedia>
|
||||||
5
test/testdata/yt_dlp_plugins/extractor/_ignore.py
vendored
Normal file
5
test/testdata/yt_dlp_plugins/extractor/_ignore.py
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
from yt_dlp.extractor.common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class IgnorePluginIE(InfoExtractor):
|
||||||
|
pass
|
||||||
12
test/testdata/yt_dlp_plugins/extractor/ignore.py
vendored
Normal file
12
test/testdata/yt_dlp_plugins/extractor/ignore.py
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
from yt_dlp.extractor.common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class IgnoreNotInAllPluginIE(InfoExtractor):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InAllPluginIE(InfoExtractor):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['InAllPluginIE']
|
||||||
9
test/testdata/yt_dlp_plugins/extractor/normal.py
vendored
Normal file
9
test/testdata/yt_dlp_plugins/extractor/normal.py
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
from yt_dlp.extractor.common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class NormalPluginIE(InfoExtractor):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class _IgnoreUnderscorePluginIE(InfoExtractor):
|
||||||
|
pass
|
||||||
5
test/testdata/yt_dlp_plugins/postprocessor/normal.py
vendored
Normal file
5
test/testdata/yt_dlp_plugins/postprocessor/normal.py
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
from yt_dlp.postprocessor.common import PostProcessor
|
||||||
|
|
||||||
|
|
||||||
|
class NormalPluginPP(PostProcessor):
|
||||||
|
pass
|
||||||
5
test/testdata/zipped_plugins/yt_dlp_plugins/extractor/zipped.py
vendored
Normal file
5
test/testdata/zipped_plugins/yt_dlp_plugins/extractor/zipped.py
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
from yt_dlp.extractor.common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class ZippedPluginIE(InfoExtractor):
|
||||||
|
pass
|
||||||
5
test/testdata/zipped_plugins/yt_dlp_plugins/postprocessor/zipped.py
vendored
Normal file
5
test/testdata/zipped_plugins/yt_dlp_plugins/postprocessor/zipped.py
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
from yt_dlp.postprocessor.common import PostProcessor
|
||||||
|
|
||||||
|
|
||||||
|
class ZippedPluginPP(PostProcessor):
|
||||||
|
pass
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,8 @@
|
|||||||
f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541
|
try:
|
||||||
|
import contextvars # noqa: F401
|
||||||
|
except Exception:
|
||||||
|
raise Exception(
|
||||||
|
f'You are using an unsupported version of Python. Only Python versions 3.7 and above are supported by yt-dlp') # noqa: F541
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'Public Domain'
|
||||||
|
|
||||||
@@ -12,14 +16,14 @@ import sys
|
|||||||
|
|
||||||
from .compat import compat_shlex_quote
|
from .compat import compat_shlex_quote
|
||||||
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
||||||
from .downloader import FileDownloader
|
|
||||||
from .downloader.external import get_external_downloader
|
from .downloader.external import get_external_downloader
|
||||||
from .extractor import list_extractor_classes
|
from .extractor import list_extractor_classes
|
||||||
from .extractor.adobepass import MSO_INFO
|
from .extractor.adobepass import MSO_INFO
|
||||||
from .extractor.common import InfoExtractor
|
|
||||||
from .options import parseOpts
|
from .options import parseOpts
|
||||||
from .postprocessor import (
|
from .postprocessor import (
|
||||||
FFmpegExtractAudioPP,
|
FFmpegExtractAudioPP,
|
||||||
|
FFmpegMergerPP,
|
||||||
|
FFmpegPostProcessor,
|
||||||
FFmpegSubtitlesConvertorPP,
|
FFmpegSubtitlesConvertorPP,
|
||||||
FFmpegThumbnailsConvertorPP,
|
FFmpegThumbnailsConvertorPP,
|
||||||
FFmpegVideoConvertorPP,
|
FFmpegVideoConvertorPP,
|
||||||
@@ -34,6 +38,7 @@ from .utils import (
|
|||||||
DateRange,
|
DateRange,
|
||||||
DownloadCancelled,
|
DownloadCancelled,
|
||||||
DownloadError,
|
DownloadError,
|
||||||
|
FormatSorter,
|
||||||
GeoUtils,
|
GeoUtils,
|
||||||
PlaylistEntries,
|
PlaylistEntries,
|
||||||
SameFileError,
|
SameFileError,
|
||||||
@@ -44,6 +49,7 @@ from .utils import (
|
|||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
match_filter_func,
|
match_filter_func,
|
||||||
|
parse_bytes,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
preferredencoding,
|
preferredencoding,
|
||||||
read_batch_urls,
|
read_batch_urls,
|
||||||
@@ -57,6 +63,8 @@ from .utils import (
|
|||||||
)
|
)
|
||||||
from .YoutubeDL import YoutubeDL
|
from .YoutubeDL import YoutubeDL
|
||||||
|
|
||||||
|
_IN_CLI = False
|
||||||
|
|
||||||
|
|
||||||
def _exit(status=0, *args):
|
def _exit(status=0, *args):
|
||||||
for msg in args:
|
for msg in args:
|
||||||
@@ -83,12 +91,11 @@ def get_urls(urls, batchfile, verbose):
|
|||||||
|
|
||||||
|
|
||||||
def print_extractor_information(opts, urls):
|
def print_extractor_information(opts, urls):
|
||||||
# Importing GenericIE is currently slow since it imports other extractors
|
|
||||||
# TODO: Move this back to module level after generalization of embed detection
|
|
||||||
from .extractor.generic import GenericIE
|
|
||||||
|
|
||||||
out = ''
|
out = ''
|
||||||
if opts.list_extractors:
|
if opts.list_extractors:
|
||||||
|
# Importing GenericIE is currently slow since it imports YoutubeIE
|
||||||
|
from .extractor.generic import GenericIE
|
||||||
|
|
||||||
urls = dict.fromkeys(urls, False)
|
urls = dict.fromkeys(urls, False)
|
||||||
for ie in list_extractor_classes(opts.age_limit):
|
for ie in list_extractor_classes(opts.age_limit):
|
||||||
out += ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie.working() else '') + '\n'
|
out += ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie.working() else '') + '\n'
|
||||||
@@ -144,7 +151,7 @@ def set_compat_opts(opts):
|
|||||||
else:
|
else:
|
||||||
opts.embed_infojson = False
|
opts.embed_infojson = False
|
||||||
if 'format-sort' in opts.compat_opts:
|
if 'format-sort' in opts.compat_opts:
|
||||||
opts.format_sort.extend(InfoExtractor.FormatSort.ytdl_default)
|
opts.format_sort.extend(FormatSorter.ytdl_default)
|
||||||
_video_multistreams_set = set_default_compat('multistreams', 'allow_multiple_video_streams', False, remove_compat=False)
|
_video_multistreams_set = set_default_compat('multistreams', 'allow_multiple_video_streams', False, remove_compat=False)
|
||||||
_audio_multistreams_set = set_default_compat('multistreams', 'allow_multiple_audio_streams', False, remove_compat=False)
|
_audio_multistreams_set = set_default_compat('multistreams', 'allow_multiple_audio_streams', False, remove_compat=False)
|
||||||
if _video_multistreams_set is False and _audio_multistreams_set is False:
|
if _video_multistreams_set is False and _audio_multistreams_set is False:
|
||||||
@@ -219,9 +226,11 @@ def validate_options(opts):
|
|||||||
|
|
||||||
# Format sort
|
# Format sort
|
||||||
for f in opts.format_sort:
|
for f in opts.format_sort:
|
||||||
validate_regex('format sorting', f, InfoExtractor.FormatSort.regex)
|
validate_regex('format sorting', f, FormatSorter.regex)
|
||||||
|
|
||||||
# Postprocessor formats
|
# Postprocessor formats
|
||||||
|
validate_regex('merge output format', opts.merge_output_format,
|
||||||
|
r'({0})(/({0}))*'.format('|'.join(map(re.escape, FFmpegMergerPP.SUPPORTED_EXTS))))
|
||||||
validate_regex('audio format', opts.audioformat, FFmpegExtractAudioPP.FORMAT_RE)
|
validate_regex('audio format', opts.audioformat, FFmpegExtractAudioPP.FORMAT_RE)
|
||||||
validate_in('subtitle format', opts.convertsubtitles, FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS)
|
validate_in('subtitle format', opts.convertsubtitles, FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS)
|
||||||
validate_regex('thumbnail format', opts.convertthumbnails, FFmpegThumbnailsConvertorPP.FORMAT_RE)
|
validate_regex('thumbnail format', opts.convertthumbnails, FFmpegThumbnailsConvertorPP.FORMAT_RE)
|
||||||
@@ -271,19 +280,19 @@ def validate_options(opts):
|
|||||||
raise ValueError(f'invalid {key} retry sleep expression {expr!r}')
|
raise ValueError(f'invalid {key} retry sleep expression {expr!r}')
|
||||||
|
|
||||||
# Bytes
|
# Bytes
|
||||||
def parse_bytes(name, value):
|
def validate_bytes(name, value):
|
||||||
if value is None:
|
if value is None:
|
||||||
return None
|
return None
|
||||||
numeric_limit = FileDownloader.parse_bytes(value)
|
numeric_limit = parse_bytes(value)
|
||||||
validate(numeric_limit is not None, 'rate limit', value)
|
validate(numeric_limit is not None, 'rate limit', value)
|
||||||
return numeric_limit
|
return numeric_limit
|
||||||
|
|
||||||
opts.ratelimit = parse_bytes('rate limit', opts.ratelimit)
|
opts.ratelimit = validate_bytes('rate limit', opts.ratelimit)
|
||||||
opts.throttledratelimit = parse_bytes('throttled rate limit', opts.throttledratelimit)
|
opts.throttledratelimit = validate_bytes('throttled rate limit', opts.throttledratelimit)
|
||||||
opts.min_filesize = parse_bytes('min filesize', opts.min_filesize)
|
opts.min_filesize = validate_bytes('min filesize', opts.min_filesize)
|
||||||
opts.max_filesize = parse_bytes('max filesize', opts.max_filesize)
|
opts.max_filesize = validate_bytes('max filesize', opts.max_filesize)
|
||||||
opts.buffersize = parse_bytes('buffer size', opts.buffersize)
|
opts.buffersize = validate_bytes('buffer size', opts.buffersize)
|
||||||
opts.http_chunk_size = parse_bytes('http chunk size', opts.http_chunk_size)
|
opts.http_chunk_size = validate_bytes('http chunk size', opts.http_chunk_size)
|
||||||
|
|
||||||
# Output templates
|
# Output templates
|
||||||
def validate_outtmpl(tmpl, msg):
|
def validate_outtmpl(tmpl, msg):
|
||||||
@@ -316,14 +325,15 @@ def validate_options(opts):
|
|||||||
|
|
||||||
def parse_chapters(name, value):
|
def parse_chapters(name, value):
|
||||||
chapters, ranges = [], []
|
chapters, ranges = [], []
|
||||||
|
parse_timestamp = lambda x: float('inf') if x in ('inf', 'infinite') else parse_duration(x)
|
||||||
for regex in value or []:
|
for regex in value or []:
|
||||||
if regex.startswith('*'):
|
if regex.startswith('*'):
|
||||||
for range in regex[1:].split(','):
|
for range_ in map(str.strip, regex[1:].split(',')):
|
||||||
dur = tuple(map(parse_duration, range.strip().split('-')))
|
mobj = range_ != '-' and re.fullmatch(r'([^-]+)?\s*-\s*([^-]+)?', range_)
|
||||||
if len(dur) == 2 and all(t is not None for t in dur):
|
dur = mobj and (parse_timestamp(mobj.group(1) or '0'), parse_timestamp(mobj.group(2) or 'inf'))
|
||||||
ranges.append(dur)
|
if None in (dur or [None]):
|
||||||
else:
|
raise ValueError(f'invalid {name} time range "{regex}". Must be of the form "*start-end"')
|
||||||
raise ValueError(f'invalid {name} time range "{regex}". Must be of the form *start-end')
|
ranges.append(dur)
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
chapters.append(re.compile(regex))
|
chapters.append(re.compile(regex))
|
||||||
@@ -336,10 +346,16 @@ def validate_options(opts):
|
|||||||
|
|
||||||
# Cookies from browser
|
# Cookies from browser
|
||||||
if opts.cookiesfrombrowser:
|
if opts.cookiesfrombrowser:
|
||||||
mobj = re.match(r'(?P<name>[^+:]+)(\s*\+\s*(?P<keyring>[^:]+))?(\s*:(?P<profile>.+))?', opts.cookiesfrombrowser)
|
container = None
|
||||||
|
mobj = re.fullmatch(r'''(?x)
|
||||||
|
(?P<name>[^+:]+)
|
||||||
|
(?:\s*\+\s*(?P<keyring>[^:]+))?
|
||||||
|
(?:\s*:\s*(?!:)(?P<profile>.+?))?
|
||||||
|
(?:\s*::\s*(?P<container>.+))?
|
||||||
|
''', opts.cookiesfrombrowser)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
raise ValueError(f'invalid cookies from browser arguments: {opts.cookiesfrombrowser}')
|
raise ValueError(f'invalid cookies from browser arguments: {opts.cookiesfrombrowser}')
|
||||||
browser_name, keyring, profile = mobj.group('name', 'keyring', 'profile')
|
browser_name, keyring, profile, container = mobj.group('name', 'keyring', 'profile', 'container')
|
||||||
browser_name = browser_name.lower()
|
browser_name = browser_name.lower()
|
||||||
if browser_name not in SUPPORTED_BROWSERS:
|
if browser_name not in SUPPORTED_BROWSERS:
|
||||||
raise ValueError(f'unsupported browser specified for cookies: "{browser_name}". '
|
raise ValueError(f'unsupported browser specified for cookies: "{browser_name}". '
|
||||||
@@ -349,7 +365,7 @@ def validate_options(opts):
|
|||||||
if keyring not in SUPPORTED_KEYRINGS:
|
if keyring not in SUPPORTED_KEYRINGS:
|
||||||
raise ValueError(f'unsupported keyring specified for cookies: "{keyring}". '
|
raise ValueError(f'unsupported keyring specified for cookies: "{keyring}". '
|
||||||
f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}')
|
f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}')
|
||||||
opts.cookiesfrombrowser = (browser_name, profile, keyring)
|
opts.cookiesfrombrowser = (browser_name, profile, keyring, container)
|
||||||
|
|
||||||
# MetadataParser
|
# MetadataParser
|
||||||
def metadataparser_actions(f):
|
def metadataparser_actions(f):
|
||||||
@@ -370,10 +386,12 @@ def validate_options(opts):
|
|||||||
raise ValueError(f'{cmd} is invalid; {err}')
|
raise ValueError(f'{cmd} is invalid; {err}')
|
||||||
yield action
|
yield action
|
||||||
|
|
||||||
parse_metadata = opts.parse_metadata or []
|
|
||||||
if opts.metafromtitle is not None:
|
if opts.metafromtitle is not None:
|
||||||
parse_metadata.append('title:%s' % opts.metafromtitle)
|
opts.parse_metadata.setdefault('pre_process', []).append('title:%s' % opts.metafromtitle)
|
||||||
opts.parse_metadata = list(itertools.chain(*map(metadataparser_actions, parse_metadata)))
|
opts.parse_metadata = {
|
||||||
|
k: list(itertools.chain(*map(metadataparser_actions, v)))
|
||||||
|
for k, v in opts.parse_metadata.items()
|
||||||
|
}
|
||||||
|
|
||||||
# Other options
|
# Other options
|
||||||
if opts.playlist_items is not None:
|
if opts.playlist_items is not None:
|
||||||
@@ -394,6 +412,9 @@ def validate_options(opts):
|
|||||||
if opts.download_archive is not None:
|
if opts.download_archive is not None:
|
||||||
opts.download_archive = expand_path(opts.download_archive)
|
opts.download_archive = expand_path(opts.download_archive)
|
||||||
|
|
||||||
|
if opts.ffmpeg_location is not None:
|
||||||
|
opts.ffmpeg_location = expand_path(opts.ffmpeg_location)
|
||||||
|
|
||||||
if opts.user_agent is not None:
|
if opts.user_agent is not None:
|
||||||
opts.headers.setdefault('User-Agent', opts.user_agent)
|
opts.headers.setdefault('User-Agent', opts.user_agent)
|
||||||
if opts.referer is not None:
|
if opts.referer is not None:
|
||||||
@@ -469,7 +490,7 @@ def validate_options(opts):
|
|||||||
val1=opts.sponskrub and opts.sponskrub_cut)
|
val1=opts.sponskrub and opts.sponskrub_cut)
|
||||||
|
|
||||||
# Conflicts with --allow-unplayable-formats
|
# Conflicts with --allow-unplayable-formats
|
||||||
report_conflict('--add-metadata', 'addmetadata')
|
report_conflict('--embed-metadata', 'addmetadata')
|
||||||
report_conflict('--embed-chapters', 'addchapters')
|
report_conflict('--embed-chapters', 'addchapters')
|
||||||
report_conflict('--embed-info-json', 'embed_infojson')
|
report_conflict('--embed-info-json', 'embed_infojson')
|
||||||
report_conflict('--embed-subs', 'embedsubtitles')
|
report_conflict('--embed-subs', 'embedsubtitles')
|
||||||
@@ -542,11 +563,11 @@ def validate_options(opts):
|
|||||||
def get_postprocessors(opts):
|
def get_postprocessors(opts):
|
||||||
yield from opts.add_postprocessors
|
yield from opts.add_postprocessors
|
||||||
|
|
||||||
if opts.parse_metadata:
|
for when, actions in opts.parse_metadata.items():
|
||||||
yield {
|
yield {
|
||||||
'key': 'MetadataParser',
|
'key': 'MetadataParser',
|
||||||
'actions': opts.parse_metadata,
|
'actions': actions,
|
||||||
'when': 'pre_process'
|
'when': when
|
||||||
}
|
}
|
||||||
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
||||||
if sponsorblock_query:
|
if sponsorblock_query:
|
||||||
@@ -682,7 +703,7 @@ def parse_options(argv=None):
|
|||||||
|
|
||||||
postprocessors = list(get_postprocessors(opts))
|
postprocessors = list(get_postprocessors(opts))
|
||||||
|
|
||||||
print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[2:])
|
print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:])
|
||||||
any_getting = any(getattr(opts, k) for k in (
|
any_getting = any(getattr(opts, k) for k in (
|
||||||
'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename',
|
'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename',
|
||||||
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl'
|
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl'
|
||||||
@@ -758,6 +779,7 @@ def parse_options(argv=None):
|
|||||||
'windowsfilenames': opts.windowsfilenames,
|
'windowsfilenames': opts.windowsfilenames,
|
||||||
'ignoreerrors': opts.ignoreerrors,
|
'ignoreerrors': opts.ignoreerrors,
|
||||||
'force_generic_extractor': opts.force_generic_extractor,
|
'force_generic_extractor': opts.force_generic_extractor,
|
||||||
|
'allowed_extractors': opts.allowed_extractors or ['default'],
|
||||||
'ratelimit': opts.ratelimit,
|
'ratelimit': opts.ratelimit,
|
||||||
'throttledratelimit': opts.throttledratelimit,
|
'throttledratelimit': opts.throttledratelimit,
|
||||||
'overwrites': opts.overwrites,
|
'overwrites': opts.overwrites,
|
||||||
@@ -833,6 +855,7 @@ def parse_options(argv=None):
|
|||||||
'legacyserverconnect': opts.legacy_server_connect,
|
'legacyserverconnect': opts.legacy_server_connect,
|
||||||
'nocheckcertificate': opts.no_check_certificate,
|
'nocheckcertificate': opts.no_check_certificate,
|
||||||
'prefer_insecure': opts.prefer_insecure,
|
'prefer_insecure': opts.prefer_insecure,
|
||||||
|
'enable_file_urls': opts.enable_file_urls,
|
||||||
'http_headers': opts.headers,
|
'http_headers': opts.headers,
|
||||||
'proxy': opts.proxy,
|
'proxy': opts.proxy,
|
||||||
'socket_timeout': opts.socket_timeout,
|
'socket_timeout': opts.socket_timeout,
|
||||||
@@ -899,6 +922,11 @@ def _real_main(argv=None):
|
|||||||
if print_extractor_information(opts, all_urls):
|
if print_extractor_information(opts, all_urls):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# We may need ffmpeg_location without having access to the YoutubeDL instance
|
||||||
|
# See https://github.com/yt-dlp/yt-dlp/issues/2191
|
||||||
|
if opts.ffmpeg_location:
|
||||||
|
FFmpegPostProcessor._ffmpeg_location.set(opts.ffmpeg_location)
|
||||||
|
|
||||||
with YoutubeDL(ydl_opts) as ydl:
|
with YoutubeDL(ydl_opts) as ydl:
|
||||||
pre_process = opts.update_self or opts.rm_cachedir
|
pre_process = opts.update_self or opts.rm_cachedir
|
||||||
actual_use = all_urls or opts.load_info_filename
|
actual_use = all_urls or opts.load_info_filename
|
||||||
@@ -936,6 +964,8 @@ def _real_main(argv=None):
|
|||||||
|
|
||||||
|
|
||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
|
global _IN_CLI
|
||||||
|
_IN_CLI = True
|
||||||
try:
|
try:
|
||||||
_exit(*variadic(_real_main(argv)))
|
_exit(*variadic(_real_main(argv)))
|
||||||
except DownloadError:
|
except DownloadError:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
if __package__ is None and not hasattr(sys, 'frozen'):
|
if __package__ is None and not getattr(sys, 'frozen', False):
|
||||||
# direct call of __main__.py
|
# direct call of __main__.py
|
||||||
import os.path
|
import os.path
|
||||||
path = os.path.realpath(os.path.abspath(__file__))
|
path = os.path.realpath(os.path.abspath(__file__))
|
||||||
|
|||||||
@@ -28,11 +28,23 @@ def aes_cbc_encrypt_bytes(data, key, iv, **kwargs):
|
|||||||
return intlist_to_bytes(aes_cbc_encrypt(*map(bytes_to_intlist, (data, key, iv)), **kwargs))
|
return intlist_to_bytes(aes_cbc_encrypt(*map(bytes_to_intlist, (data, key, iv)), **kwargs))
|
||||||
|
|
||||||
|
|
||||||
|
BLOCK_SIZE_BYTES = 16
|
||||||
|
|
||||||
|
|
||||||
def unpad_pkcs7(data):
|
def unpad_pkcs7(data):
|
||||||
return data[:-compat_ord(data[-1])]
|
return data[:-compat_ord(data[-1])]
|
||||||
|
|
||||||
|
|
||||||
BLOCK_SIZE_BYTES = 16
|
def pkcs7_padding(data):
|
||||||
|
"""
|
||||||
|
PKCS#7 padding
|
||||||
|
|
||||||
|
@param {int[]} data cleartext
|
||||||
|
@returns {int[]} padding data
|
||||||
|
"""
|
||||||
|
|
||||||
|
remaining_length = BLOCK_SIZE_BYTES - len(data) % BLOCK_SIZE_BYTES
|
||||||
|
return data + [remaining_length] * remaining_length
|
||||||
|
|
||||||
|
|
||||||
def pad_block(block, padding_mode):
|
def pad_block(block, padding_mode):
|
||||||
@@ -64,7 +76,7 @@ def pad_block(block, padding_mode):
|
|||||||
|
|
||||||
def aes_ecb_encrypt(data, key, iv=None):
|
def aes_ecb_encrypt(data, key, iv=None):
|
||||||
"""
|
"""
|
||||||
Encrypt with aes in ECB mode
|
Encrypt with aes in ECB mode. Using PKCS#7 padding
|
||||||
|
|
||||||
@param {int[]} data cleartext
|
@param {int[]} data cleartext
|
||||||
@param {int[]} key 16/24/32-Byte cipher key
|
@param {int[]} key 16/24/32-Byte cipher key
|
||||||
@@ -77,8 +89,7 @@ def aes_ecb_encrypt(data, key, iv=None):
|
|||||||
encrypted_data = []
|
encrypted_data = []
|
||||||
for i in range(block_count):
|
for i in range(block_count):
|
||||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||||
encrypted_data += aes_encrypt(block, expanded_key)
|
encrypted_data += aes_encrypt(pkcs7_padding(block), expanded_key)
|
||||||
encrypted_data = encrypted_data[:len(data)]
|
|
||||||
|
|
||||||
return encrypted_data
|
return encrypted_data
|
||||||
|
|
||||||
@@ -551,5 +562,6 @@ __all__ = [
|
|||||||
|
|
||||||
'key_expansion',
|
'key_expansion',
|
||||||
'pad_block',
|
'pad_block',
|
||||||
|
'pkcs7_padding',
|
||||||
'unpad_pkcs7',
|
'unpad_pkcs7',
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -5,8 +5,10 @@ import os
|
|||||||
import re
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
import traceback
|
import traceback
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .utils import expand_path, write_json_file
|
from .utils import expand_path, traverse_obj, version_tuple, write_json_file
|
||||||
|
from .version import __version__
|
||||||
|
|
||||||
|
|
||||||
class Cache:
|
class Cache:
|
||||||
@@ -21,11 +23,9 @@ class Cache:
|
|||||||
return expand_path(res)
|
return expand_path(res)
|
||||||
|
|
||||||
def _get_cache_fn(self, section, key, dtype):
|
def _get_cache_fn(self, section, key, dtype):
|
||||||
assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \
|
assert re.match(r'^[\w.-]+$', section), f'invalid section {section!r}'
|
||||||
'invalid section %r' % section
|
key = urllib.parse.quote(key, safe='').replace('%', ',') # encode non-ascii characters
|
||||||
assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key
|
return os.path.join(self._get_root_dir(), section, f'{key}.{dtype}')
|
||||||
return os.path.join(
|
|
||||||
self._get_root_dir(), section, f'{key}.{dtype}')
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def enabled(self):
|
def enabled(self):
|
||||||
@@ -45,12 +45,20 @@ class Cache:
|
|||||||
if ose.errno != errno.EEXIST:
|
if ose.errno != errno.EEXIST:
|
||||||
raise
|
raise
|
||||||
self._ydl.write_debug(f'Saving {section}.{key} to cache')
|
self._ydl.write_debug(f'Saving {section}.{key} to cache')
|
||||||
write_json_file(data, fn)
|
write_json_file({'yt-dlp_version': __version__, 'data': data}, fn)
|
||||||
except Exception:
|
except Exception:
|
||||||
tb = traceback.format_exc()
|
tb = traceback.format_exc()
|
||||||
self._ydl.report_warning(f'Writing cache to {fn!r} failed: {tb}')
|
self._ydl.report_warning(f'Writing cache to {fn!r} failed: {tb}')
|
||||||
|
|
||||||
def load(self, section, key, dtype='json', default=None):
|
def _validate(self, data, min_ver):
|
||||||
|
version = traverse_obj(data, 'yt-dlp_version')
|
||||||
|
if not version: # Backward compatibility
|
||||||
|
data, version = {'data': data}, '2022.08.19'
|
||||||
|
if not min_ver or version_tuple(version) >= version_tuple(min_ver):
|
||||||
|
return data['data']
|
||||||
|
self._ydl.write_debug(f'Discarding old cache from version {version} (needs {min_ver})')
|
||||||
|
|
||||||
|
def load(self, section, key, dtype='json', default=None, *, min_ver=None):
|
||||||
assert dtype in ('json',)
|
assert dtype in ('json',)
|
||||||
|
|
||||||
if not self.enabled:
|
if not self.enabled:
|
||||||
@@ -61,8 +69,8 @@ class Cache:
|
|||||||
try:
|
try:
|
||||||
with open(cache_fn, encoding='utf-8') as cachef:
|
with open(cache_fn, encoding='utf-8') as cachef:
|
||||||
self._ydl.write_debug(f'Loading {section}.{key} from cache')
|
self._ydl.write_debug(f'Loading {section}.{key} from cache')
|
||||||
return json.load(cachef)
|
return self._validate(json.load(cachef), min_ver)
|
||||||
except ValueError:
|
except (ValueError, KeyError):
|
||||||
try:
|
try:
|
||||||
file_size = os.path.getsize(cache_fn)
|
file_size = os.path.getsize(cache_fn)
|
||||||
except OSError as oe:
|
except OSError as oe:
|
||||||
|
|||||||
@@ -3,19 +3,18 @@ import sys
|
|||||||
import warnings
|
import warnings
|
||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
from . import re
|
|
||||||
from ._deprecated import * # noqa: F401, F403
|
from ._deprecated import * # noqa: F401, F403
|
||||||
from .compat_utils import passthrough_module
|
from .compat_utils import passthrough_module
|
||||||
|
|
||||||
# XXX: Implement this the same way as other DeprecationWarnings without circular import
|
# XXX: Implement this the same way as other DeprecationWarnings without circular import
|
||||||
passthrough_module(__name__, '._legacy', callback=lambda attr: warnings.warn(
|
passthrough_module(__name__, '._legacy', callback=lambda attr: warnings.warn(
|
||||||
DeprecationWarning(f'{__name__}.{attr} is deprecated'), stacklevel=2))
|
DeprecationWarning(f'{__name__}.{attr} is deprecated'), stacklevel=3))
|
||||||
|
|
||||||
|
|
||||||
# HTMLParseError has been deprecated in Python 3.3 and removed in
|
# HTMLParseError has been deprecated in Python 3.3 and removed in
|
||||||
# Python 3.5. Introducing dummy exception for Python >3.5 for compatible
|
# Python 3.5. Introducing dummy exception for Python >3.5 for compatible
|
||||||
# and uniform cross-version exception handling
|
# and uniform cross-version exception handling
|
||||||
class compat_HTMLParseError(Exception):
|
class compat_HTMLParseError(ValueError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@@ -33,6 +32,7 @@ compat_os_name = os._name if os.name == 'java' else os.name
|
|||||||
|
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
def compat_shlex_quote(s):
|
def compat_shlex_quote(s):
|
||||||
|
import re
|
||||||
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
|
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
|
||||||
else:
|
else:
|
||||||
from shlex import quote as compat_shlex_quote # noqa: F401
|
from shlex import quote as compat_shlex_quote # noqa: F401
|
||||||
|
|||||||
@@ -22,10 +22,14 @@ import urllib.request
|
|||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
from subprocess import DEVNULL
|
from subprocess import DEVNULL
|
||||||
|
|
||||||
from .compat_utils import passthrough_module # isort: split
|
# isort: split
|
||||||
from .asyncio import run as compat_asyncio_run # noqa: F401
|
import asyncio # noqa: F401
|
||||||
from .re import Pattern as compat_Pattern # noqa: F401
|
import re # noqa: F401
|
||||||
from .re import match as compat_Match # noqa: F401
|
from asyncio import run as compat_asyncio_run # noqa: F401
|
||||||
|
from re import Pattern as compat_Pattern # noqa: F401
|
||||||
|
from re import match as compat_Match # noqa: F401
|
||||||
|
|
||||||
|
from .compat_utils import passthrough_module
|
||||||
from ..dependencies import Cryptodome_AES as compat_pycrypto_AES # noqa: F401
|
from ..dependencies import Cryptodome_AES as compat_pycrypto_AES # noqa: F401
|
||||||
from ..dependencies import brotli as compat_brotli # noqa: F401
|
from ..dependencies import brotli as compat_brotli # noqa: F401
|
||||||
from ..dependencies import websockets as compat_websockets # noqa: F401
|
from ..dependencies import websockets as compat_websockets # noqa: F401
|
||||||
@@ -44,6 +48,7 @@ def compat_setenv(key, value, env=os.environ):
|
|||||||
|
|
||||||
|
|
||||||
compat_basestring = str
|
compat_basestring = str
|
||||||
|
compat_casefold = str.casefold
|
||||||
compat_chr = chr
|
compat_chr = chr
|
||||||
compat_collections_abc = collections.abc
|
compat_collections_abc = collections.abc
|
||||||
compat_cookiejar = http.cookiejar
|
compat_cookiejar = http.cookiejar
|
||||||
|
|||||||
@@ -1,23 +0,0 @@
|
|||||||
# flake8: noqa: F405
|
|
||||||
from asyncio import * # noqa: F403
|
|
||||||
|
|
||||||
from .compat_utils import passthrough_module
|
|
||||||
|
|
||||||
passthrough_module(__name__, 'asyncio')
|
|
||||||
del passthrough_module
|
|
||||||
|
|
||||||
try:
|
|
||||||
run # >= 3.7
|
|
||||||
except NameError:
|
|
||||||
def run(coro):
|
|
||||||
try:
|
|
||||||
loop = get_event_loop()
|
|
||||||
except RuntimeError:
|
|
||||||
loop = new_event_loop()
|
|
||||||
set_event_loop(loop)
|
|
||||||
loop.run_until_complete(coro)
|
|
||||||
|
|
||||||
try:
|
|
||||||
all_tasks # >= 3.7
|
|
||||||
except NameError:
|
|
||||||
all_tasks = Task.all_tasks
|
|
||||||
@@ -2,13 +2,15 @@ tests = {
|
|||||||
'webp': lambda h: h[0:4] == b'RIFF' and h[8:] == b'WEBP',
|
'webp': lambda h: h[0:4] == b'RIFF' and h[8:] == b'WEBP',
|
||||||
'png': lambda h: h[:8] == b'\211PNG\r\n\032\n',
|
'png': lambda h: h[:8] == b'\211PNG\r\n\032\n',
|
||||||
'jpeg': lambda h: h[6:10] in (b'JFIF', b'Exif'),
|
'jpeg': lambda h: h[6:10] in (b'JFIF', b'Exif'),
|
||||||
|
'gif': lambda h: h[:6] in (b'GIF87a', b'GIF89a'),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def what(path):
|
def what(file=None, h=None):
|
||||||
"""Detect format of image (Currently supports jpeg, png, webp only)
|
"""Detect format of image (Currently supports jpeg, png, webp, gif only)
|
||||||
Ref: https://github.com/python/cpython/blob/3.10/Lib/imghdr.py
|
Ref: https://github.com/python/cpython/blob/3.10/Lib/imghdr.py
|
||||||
"""
|
"""
|
||||||
with open(path, 'rb') as f:
|
if h is None:
|
||||||
head = f.read(12)
|
with open(file, 'rb') as f:
|
||||||
return next((type_ for type_, test in tests.items() if test(head)), None)
|
h = f.read(12)
|
||||||
|
return next((type_ for type_, test in tests.items() if test(h)), None)
|
||||||
|
|||||||
@@ -1,18 +0,0 @@
|
|||||||
# flake8: noqa: F405
|
|
||||||
from re import * # F403
|
|
||||||
|
|
||||||
from .compat_utils import passthrough_module
|
|
||||||
|
|
||||||
passthrough_module(__name__, 're')
|
|
||||||
del passthrough_module
|
|
||||||
|
|
||||||
try:
|
|
||||||
Pattern # >= 3.7
|
|
||||||
except NameError:
|
|
||||||
Pattern = type(compile(''))
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
Match # >= 3.7
|
|
||||||
except NameError:
|
|
||||||
Match = type(compile('').match(''))
|
|
||||||
30
yt_dlp/compat/shutil.py
Normal file
30
yt_dlp/compat/shutil.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
# flake8: noqa: F405
|
||||||
|
from shutil import * # noqa: F403
|
||||||
|
|
||||||
|
from .compat_utils import passthrough_module
|
||||||
|
|
||||||
|
passthrough_module(__name__, 'shutil')
|
||||||
|
del passthrough_module
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
if sys.platform.startswith('freebsd'):
|
||||||
|
import errno
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
# Workaround for PermissionError when using restricted ACL mode on FreeBSD
|
||||||
|
def copy2(src, dst, *args, **kwargs):
|
||||||
|
if os.path.isdir(dst):
|
||||||
|
dst = os.path.join(dst, os.path.basename(src))
|
||||||
|
shutil.copyfile(src, dst, *args, **kwargs)
|
||||||
|
try:
|
||||||
|
shutil.copystat(src, dst, *args, **kwargs)
|
||||||
|
except PermissionError as e:
|
||||||
|
if e.errno != getattr(errno, 'EPERM', None):
|
||||||
|
raise
|
||||||
|
return dst
|
||||||
|
|
||||||
|
def move(*args, copy_function=copy2, **kwargs):
|
||||||
|
return shutil.move(*args, copy_function=copy_function, **kwargs)
|
||||||
@@ -1,9 +1,10 @@
|
|||||||
import base64
|
import base64
|
||||||
import contextlib
|
import contextlib
|
||||||
import ctypes
|
|
||||||
import http.cookiejar
|
import http.cookiejar
|
||||||
|
import http.cookies
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
import struct
|
import struct
|
||||||
import subprocess
|
import subprocess
|
||||||
@@ -25,7 +26,14 @@ from .dependencies import (
|
|||||||
sqlite3,
|
sqlite3,
|
||||||
)
|
)
|
||||||
from .minicurses import MultilinePrinter, QuietMultilinePrinter
|
from .minicurses import MultilinePrinter, QuietMultilinePrinter
|
||||||
from .utils import Popen, YoutubeDLCookieJar, error_to_str, expand_path
|
from .utils import (
|
||||||
|
Popen,
|
||||||
|
YoutubeDLCookieJar,
|
||||||
|
error_to_str,
|
||||||
|
expand_path,
|
||||||
|
is_path_like,
|
||||||
|
try_call,
|
||||||
|
)
|
||||||
|
|
||||||
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
|
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
|
||||||
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
|
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
|
||||||
@@ -86,11 +94,12 @@ def _create_progress_bar(logger):
|
|||||||
def load_cookies(cookie_file, browser_specification, ydl):
|
def load_cookies(cookie_file, browser_specification, ydl):
|
||||||
cookie_jars = []
|
cookie_jars = []
|
||||||
if browser_specification is not None:
|
if browser_specification is not None:
|
||||||
browser_name, profile, keyring = _parse_browser_specification(*browser_specification)
|
browser_name, profile, keyring, container = _parse_browser_specification(*browser_specification)
|
||||||
cookie_jars.append(extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring))
|
cookie_jars.append(
|
||||||
|
extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring, container=container))
|
||||||
|
|
||||||
if cookie_file is not None:
|
if cookie_file is not None:
|
||||||
is_filename = YoutubeDLCookieJar.is_path(cookie_file)
|
is_filename = is_path_like(cookie_file)
|
||||||
if is_filename:
|
if is_filename:
|
||||||
cookie_file = expand_path(cookie_file)
|
cookie_file = expand_path(cookie_file)
|
||||||
|
|
||||||
@@ -102,9 +111,9 @@ def load_cookies(cookie_file, browser_specification, ydl):
|
|||||||
return _merge_cookie_jars(cookie_jars)
|
return _merge_cookie_jars(cookie_jars)
|
||||||
|
|
||||||
|
|
||||||
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None):
|
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None, container=None):
|
||||||
if browser_name == 'firefox':
|
if browser_name == 'firefox':
|
||||||
return _extract_firefox_cookies(profile, logger)
|
return _extract_firefox_cookies(profile, container, logger)
|
||||||
elif browser_name == 'safari':
|
elif browser_name == 'safari':
|
||||||
return _extract_safari_cookies(profile, logger)
|
return _extract_safari_cookies(profile, logger)
|
||||||
elif browser_name in CHROMIUM_BASED_BROWSERS:
|
elif browser_name in CHROMIUM_BASED_BROWSERS:
|
||||||
@@ -113,7 +122,7 @@ def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(),
|
|||||||
raise ValueError(f'unknown browser: {browser_name}')
|
raise ValueError(f'unknown browser: {browser_name}')
|
||||||
|
|
||||||
|
|
||||||
def _extract_firefox_cookies(profile, logger):
|
def _extract_firefox_cookies(profile, container, logger):
|
||||||
logger.info('Extracting cookies from firefox')
|
logger.info('Extracting cookies from firefox')
|
||||||
if not sqlite3:
|
if not sqlite3:
|
||||||
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
||||||
@@ -132,11 +141,36 @@ def _extract_firefox_cookies(profile, logger):
|
|||||||
raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
|
raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
|
||||||
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
|
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
|
||||||
|
|
||||||
|
container_id = None
|
||||||
|
if container not in (None, 'none'):
|
||||||
|
containers_path = os.path.join(os.path.dirname(cookie_database_path), 'containers.json')
|
||||||
|
if not os.path.isfile(containers_path) or not os.access(containers_path, os.R_OK):
|
||||||
|
raise FileNotFoundError(f'could not read containers.json in {search_root}')
|
||||||
|
with open(containers_path) as containers:
|
||||||
|
identities = json.load(containers).get('identities', [])
|
||||||
|
container_id = next((context.get('userContextId') for context in identities if container in (
|
||||||
|
context.get('name'),
|
||||||
|
try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group())
|
||||||
|
)), None)
|
||||||
|
if not isinstance(container_id, int):
|
||||||
|
raise ValueError(f'could not find firefox container "{container}" in containers.json')
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
||||||
cursor = None
|
cursor = None
|
||||||
try:
|
try:
|
||||||
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
||||||
cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies')
|
if isinstance(container_id, int):
|
||||||
|
logger.debug(
|
||||||
|
f'Only loading cookies from firefox container "{container}", ID {container_id}')
|
||||||
|
cursor.execute(
|
||||||
|
'SELECT host, name, value, path, expiry, isSecure FROM moz_cookies WHERE originAttributes LIKE ? OR originAttributes LIKE ?',
|
||||||
|
(f'%userContextId={container_id}', f'%userContextId={container_id}&%'))
|
||||||
|
elif container == 'none':
|
||||||
|
logger.debug('Only loading cookies not belonging to any container')
|
||||||
|
cursor.execute(
|
||||||
|
'SELECT host, name, value, path, expiry, isSecure FROM moz_cookies WHERE NOT INSTR(originAttributes,"userContextId=")')
|
||||||
|
else:
|
||||||
|
cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies')
|
||||||
jar = YoutubeDLCookieJar()
|
jar = YoutubeDLCookieJar()
|
||||||
with _create_progress_bar(logger) as progress_bar:
|
with _create_progress_bar(logger) as progress_bar:
|
||||||
table = cursor.fetchall()
|
table = cursor.fetchall()
|
||||||
@@ -811,12 +845,15 @@ def _get_linux_keyring_password(browser_keyring_name, keyring, logger):
|
|||||||
def _get_mac_keyring_password(browser_keyring_name, logger):
|
def _get_mac_keyring_password(browser_keyring_name, logger):
|
||||||
logger.debug('using find-generic-password to obtain password from OSX keychain')
|
logger.debug('using find-generic-password to obtain password from OSX keychain')
|
||||||
try:
|
try:
|
||||||
stdout, _, _ = Popen.run(
|
stdout, _, returncode = Popen.run(
|
||||||
['security', 'find-generic-password',
|
['security', 'find-generic-password',
|
||||||
'-w', # write password to stdout
|
'-w', # write password to stdout
|
||||||
'-a', browser_keyring_name, # match 'account'
|
'-a', browser_keyring_name, # match 'account'
|
||||||
'-s', f'{browser_keyring_name} Safe Storage'], # match 'service'
|
'-s', f'{browser_keyring_name} Safe Storage'], # match 'service'
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||||
|
if returncode:
|
||||||
|
logger.warning('find-generic-password failed')
|
||||||
|
return None
|
||||||
return stdout.rstrip(b'\n')
|
return stdout.rstrip(b'\n')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f'exception running find-generic-password: {error_to_str(e)}')
|
logger.warning(f'exception running find-generic-password: {error_to_str(e)}')
|
||||||
@@ -876,10 +913,12 @@ def _decrypt_windows_dpapi(ciphertext, logger):
|
|||||||
References:
|
References:
|
||||||
- https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata
|
- https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata
|
||||||
"""
|
"""
|
||||||
from ctypes.wintypes import DWORD
|
|
||||||
|
import ctypes
|
||||||
|
import ctypes.wintypes
|
||||||
|
|
||||||
class DATA_BLOB(ctypes.Structure):
|
class DATA_BLOB(ctypes.Structure):
|
||||||
_fields_ = [('cbData', DWORD),
|
_fields_ = [('cbData', ctypes.wintypes.DWORD),
|
||||||
('pbData', ctypes.POINTER(ctypes.c_char))]
|
('pbData', ctypes.POINTER(ctypes.c_char))]
|
||||||
|
|
||||||
buffer = ctypes.create_string_buffer(ciphertext)
|
buffer = ctypes.create_string_buffer(ciphertext)
|
||||||
@@ -947,11 +986,102 @@ def _is_path(value):
|
|||||||
return os.path.sep in value
|
return os.path.sep in value
|
||||||
|
|
||||||
|
|
||||||
def _parse_browser_specification(browser_name, profile=None, keyring=None):
|
def _parse_browser_specification(browser_name, profile=None, keyring=None, container=None):
|
||||||
if browser_name not in SUPPORTED_BROWSERS:
|
if browser_name not in SUPPORTED_BROWSERS:
|
||||||
raise ValueError(f'unsupported browser: "{browser_name}"')
|
raise ValueError(f'unsupported browser: "{browser_name}"')
|
||||||
if keyring not in (None, *SUPPORTED_KEYRINGS):
|
if keyring not in (None, *SUPPORTED_KEYRINGS):
|
||||||
raise ValueError(f'unsupported keyring: "{keyring}"')
|
raise ValueError(f'unsupported keyring: "{keyring}"')
|
||||||
if profile is not None and _is_path(profile):
|
if profile is not None and _is_path(expand_path(profile)):
|
||||||
profile = os.path.expanduser(profile)
|
profile = expand_path(profile)
|
||||||
return browser_name, profile, keyring
|
return browser_name, profile, keyring, container
|
||||||
|
|
||||||
|
|
||||||
|
class LenientSimpleCookie(http.cookies.SimpleCookie):
|
||||||
|
"""More lenient version of http.cookies.SimpleCookie"""
|
||||||
|
# From https://github.com/python/cpython/blob/v3.10.7/Lib/http/cookies.py
|
||||||
|
# We use Morsel's legal key chars to avoid errors on setting values
|
||||||
|
_LEGAL_KEY_CHARS = r'\w\d' + re.escape('!#$%&\'*+-.:^_`|~')
|
||||||
|
_LEGAL_VALUE_CHARS = _LEGAL_KEY_CHARS + re.escape('(),/<=>?@[]{}')
|
||||||
|
|
||||||
|
_RESERVED = {
|
||||||
|
"expires",
|
||||||
|
"path",
|
||||||
|
"comment",
|
||||||
|
"domain",
|
||||||
|
"max-age",
|
||||||
|
"secure",
|
||||||
|
"httponly",
|
||||||
|
"version",
|
||||||
|
"samesite",
|
||||||
|
}
|
||||||
|
|
||||||
|
_FLAGS = {"secure", "httponly"}
|
||||||
|
|
||||||
|
# Added 'bad' group to catch the remaining value
|
||||||
|
_COOKIE_PATTERN = re.compile(r"""
|
||||||
|
\s* # Optional whitespace at start of cookie
|
||||||
|
(?P<key> # Start of group 'key'
|
||||||
|
[""" + _LEGAL_KEY_CHARS + r"""]+?# Any word of at least one letter
|
||||||
|
) # End of group 'key'
|
||||||
|
( # Optional group: there may not be a value.
|
||||||
|
\s*=\s* # Equal Sign
|
||||||
|
( # Start of potential value
|
||||||
|
(?P<val> # Start of group 'val'
|
||||||
|
"(?:[^\\"]|\\.)*" # Any doublequoted string
|
||||||
|
| # or
|
||||||
|
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
|
||||||
|
| # or
|
||||||
|
[""" + _LEGAL_VALUE_CHARS + r"""]* # Any word or empty string
|
||||||
|
) # End of group 'val'
|
||||||
|
| # or
|
||||||
|
(?P<bad>(?:\\;|[^;])*?) # 'bad' group fallback for invalid values
|
||||||
|
) # End of potential value
|
||||||
|
)? # End of optional value group
|
||||||
|
\s* # Any number of spaces.
|
||||||
|
(\s+|;|$) # Ending either at space, semicolon, or EOS.
|
||||||
|
""", re.ASCII | re.VERBOSE)
|
||||||
|
|
||||||
|
def load(self, data):
|
||||||
|
# Workaround for https://github.com/yt-dlp/yt-dlp/issues/4776
|
||||||
|
if not isinstance(data, str):
|
||||||
|
return super().load(data)
|
||||||
|
|
||||||
|
morsel = None
|
||||||
|
for match in self._COOKIE_PATTERN.finditer(data):
|
||||||
|
if match.group('bad'):
|
||||||
|
morsel = None
|
||||||
|
continue
|
||||||
|
|
||||||
|
key, value = match.group('key', 'val')
|
||||||
|
|
||||||
|
is_attribute = False
|
||||||
|
if key.startswith('$'):
|
||||||
|
key = key[1:]
|
||||||
|
is_attribute = True
|
||||||
|
|
||||||
|
lower_key = key.lower()
|
||||||
|
if lower_key in self._RESERVED:
|
||||||
|
if morsel is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if value is None:
|
||||||
|
if lower_key not in self._FLAGS:
|
||||||
|
morsel = None
|
||||||
|
continue
|
||||||
|
value = True
|
||||||
|
else:
|
||||||
|
value, _ = self.value_decode(value)
|
||||||
|
|
||||||
|
morsel[key] = value
|
||||||
|
|
||||||
|
elif is_attribute:
|
||||||
|
morsel = None
|
||||||
|
|
||||||
|
elif value is not None:
|
||||||
|
morsel = self.get(key, http.cookies.Morsel())
|
||||||
|
real_value, coded_value = self.value_decode(value)
|
||||||
|
morsel.set(key, real_value, coded_value)
|
||||||
|
self[key] = morsel
|
||||||
|
|
||||||
|
else:
|
||||||
|
morsel = None
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
try:
|
try:
|
||||||
from Crypto.Cipher import AES as Cryptodome_AES
|
from Crypto.Cipher import AES as Cryptodome_AES
|
||||||
except ImportError:
|
except (ImportError, SyntaxError): # Old Crypto gives SyntaxError in newer Python
|
||||||
Cryptodome_AES = None
|
Cryptodome_AES = None
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import contextlib
|
import contextlib
|
||||||
import errno
|
import errno
|
||||||
|
import functools
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
@@ -12,16 +13,19 @@ from ..minicurses import (
|
|||||||
QuietMultilinePrinter,
|
QuietMultilinePrinter,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
NUMBER_RE,
|
IDENTITY,
|
||||||
|
NO_DEFAULT,
|
||||||
LockingUnsupportedError,
|
LockingUnsupportedError,
|
||||||
Namespace,
|
Namespace,
|
||||||
|
RetryManager,
|
||||||
classproperty,
|
classproperty,
|
||||||
decodeArgument,
|
decodeArgument,
|
||||||
|
deprecation_warning,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
error_to_compat_str,
|
|
||||||
float_or_none,
|
|
||||||
format_bytes,
|
format_bytes,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
|
parse_bytes,
|
||||||
|
remove_start,
|
||||||
sanitize_open,
|
sanitize_open,
|
||||||
shell_quote,
|
shell_quote,
|
||||||
timeconvert,
|
timeconvert,
|
||||||
@@ -90,6 +94,7 @@ class FileDownloader:
|
|||||||
|
|
||||||
for func in (
|
for func in (
|
||||||
'deprecation_warning',
|
'deprecation_warning',
|
||||||
|
'deprecated_feature',
|
||||||
'report_error',
|
'report_error',
|
||||||
'report_file_already_downloaded',
|
'report_file_already_downloaded',
|
||||||
'report_warning',
|
'report_warning',
|
||||||
@@ -117,11 +122,11 @@ class FileDownloader:
|
|||||||
time = timetuple_from_msec(seconds * 1000)
|
time = timetuple_from_msec(seconds * 1000)
|
||||||
if time.hours > 99:
|
if time.hours > 99:
|
||||||
return '--:--:--'
|
return '--:--:--'
|
||||||
if not time.hours:
|
|
||||||
return '%02d:%02d' % time[1:-1]
|
|
||||||
return '%02d:%02d:%02d' % time[:-1]
|
return '%02d:%02d:%02d' % time[:-1]
|
||||||
|
|
||||||
format_eta = format_seconds
|
@classmethod
|
||||||
|
def format_eta(cls, seconds):
|
||||||
|
return f'{remove_start(cls.format_seconds(seconds), "00:"):>8s}'
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def calc_percent(byte_counter, data_len):
|
def calc_percent(byte_counter, data_len):
|
||||||
@@ -176,12 +181,9 @@ class FileDownloader:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_bytes(bytestr):
|
def parse_bytes(bytestr):
|
||||||
"""Parse a string indicating a byte quantity into an integer."""
|
"""Parse a string indicating a byte quantity into an integer."""
|
||||||
matchobj = re.match(rf'(?i)^({NUMBER_RE})([kMGTPEZY]?)$', bytestr)
|
deprecation_warning('yt_dlp.FileDownloader.parse_bytes is deprecated and '
|
||||||
if matchobj is None:
|
'may be removed in the future. Use yt_dlp.utils.parse_bytes instead')
|
||||||
return None
|
return parse_bytes(bytestr)
|
||||||
number = float(matchobj.group(1))
|
|
||||||
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
|
|
||||||
return int(round(number * multiplier))
|
|
||||||
|
|
||||||
def slow_down(self, start_time, now, byte_counter):
|
def slow_down(self, start_time, now, byte_counter):
|
||||||
"""Sleep if the download speed is over the rate limit."""
|
"""Sleep if the download speed is over the rate limit."""
|
||||||
@@ -215,27 +217,24 @@ class FileDownloader:
|
|||||||
return filename + '.ytdl'
|
return filename + '.ytdl'
|
||||||
|
|
||||||
def wrap_file_access(action, *, fatal=False):
|
def wrap_file_access(action, *, fatal=False):
|
||||||
def outer(func):
|
def error_callback(err, count, retries, *, fd):
|
||||||
def inner(self, *args, **kwargs):
|
return RetryManager.report_retry(
|
||||||
file_access_retries = self.params.get('file_access_retries', 0)
|
err, count, retries, info=fd.__to_screen,
|
||||||
retry = 0
|
warn=lambda e: (time.sleep(0.01), fd.to_screen(f'[download] Unable to {action} file: {e}')),
|
||||||
while True:
|
error=None if fatal else lambda e: fd.report_error(f'Unable to {action} file: {e}'),
|
||||||
try:
|
sleep_func=fd.params.get('retry_sleep_functions', {}).get('file_access'))
|
||||||
return func(self, *args, **kwargs)
|
|
||||||
except OSError as err:
|
def wrapper(self, func, *args, **kwargs):
|
||||||
retry = retry + 1
|
for retry in RetryManager(self.params.get('file_access_retries'), error_callback, fd=self):
|
||||||
if retry > file_access_retries or err.errno not in (errno.EACCES, errno.EINVAL):
|
try:
|
||||||
if not fatal:
|
return func(self, *args, **kwargs)
|
||||||
self.report_error(f'unable to {action} file: {err}')
|
except OSError as err:
|
||||||
return
|
if err.errno in (errno.EACCES, errno.EINVAL):
|
||||||
raise
|
retry.error = err
|
||||||
self.to_screen(
|
continue
|
||||||
f'[download] Unable to {action} file due to file access error. '
|
retry.error_callback(err, 1, 0)
|
||||||
f'Retrying (attempt {retry} of {self.format_retries(file_access_retries)}) ...')
|
|
||||||
if not self.sleep_retry('file_access', retry):
|
return functools.partial(functools.partialmethod, wrapper)
|
||||||
time.sleep(0.01)
|
|
||||||
return inner
|
|
||||||
return outer
|
|
||||||
|
|
||||||
@wrap_file_access('open', fatal=True)
|
@wrap_file_access('open', fatal=True)
|
||||||
def sanitize_open(self, filename, open_mode):
|
def sanitize_open(self, filename, open_mode):
|
||||||
@@ -332,11 +331,16 @@ class FileDownloader:
|
|||||||
return tmpl
|
return tmpl
|
||||||
return default
|
return default
|
||||||
|
|
||||||
|
_format_bytes = lambda k: f'{format_bytes(s.get(k)):>10s}'
|
||||||
|
|
||||||
if s['status'] == 'finished':
|
if s['status'] == 'finished':
|
||||||
if self.params.get('noprogress'):
|
if self.params.get('noprogress'):
|
||||||
self.to_screen('[download] Download completed')
|
self.to_screen('[download] Download completed')
|
||||||
|
speed = try_call(lambda: s['total_bytes'] / s['elapsed'])
|
||||||
s.update({
|
s.update({
|
||||||
'_total_bytes_str': format_bytes(s.get('total_bytes')),
|
'speed': speed,
|
||||||
|
'_speed_str': self.format_speed(speed).strip(),
|
||||||
|
'_total_bytes_str': _format_bytes('total_bytes'),
|
||||||
'_elapsed_str': self.format_seconds(s.get('elapsed')),
|
'_elapsed_str': self.format_seconds(s.get('elapsed')),
|
||||||
'_percent_str': self.format_percent(100),
|
'_percent_str': self.format_percent(100),
|
||||||
})
|
})
|
||||||
@@ -344,21 +348,22 @@ class FileDownloader:
|
|||||||
'100%%',
|
'100%%',
|
||||||
with_fields(('total_bytes', 'of %(_total_bytes_str)s')),
|
with_fields(('total_bytes', 'of %(_total_bytes_str)s')),
|
||||||
with_fields(('elapsed', 'in %(_elapsed_str)s')),
|
with_fields(('elapsed', 'in %(_elapsed_str)s')),
|
||||||
|
with_fields(('speed', 'at %(_speed_str)s')),
|
||||||
delim=' '))
|
delim=' '))
|
||||||
|
|
||||||
if s['status'] != 'downloading':
|
if s['status'] != 'downloading':
|
||||||
return
|
return
|
||||||
|
|
||||||
s.update({
|
s.update({
|
||||||
'_eta_str': self.format_eta(s.get('eta')),
|
'_eta_str': self.format_eta(s.get('eta')).strip(),
|
||||||
'_speed_str': self.format_speed(s.get('speed')),
|
'_speed_str': self.format_speed(s.get('speed')),
|
||||||
'_percent_str': self.format_percent(try_call(
|
'_percent_str': self.format_percent(try_call(
|
||||||
lambda: 100 * s['downloaded_bytes'] / s['total_bytes'],
|
lambda: 100 * s['downloaded_bytes'] / s['total_bytes'],
|
||||||
lambda: 100 * s['downloaded_bytes'] / s['total_bytes_estimate'],
|
lambda: 100 * s['downloaded_bytes'] / s['total_bytes_estimate'],
|
||||||
lambda: s['downloaded_bytes'] == 0 and 0)),
|
lambda: s['downloaded_bytes'] == 0 and 0)),
|
||||||
'_total_bytes_str': format_bytes(s.get('total_bytes')),
|
'_total_bytes_str': _format_bytes('total_bytes'),
|
||||||
'_total_bytes_estimate_str': format_bytes(s.get('total_bytes_estimate')),
|
'_total_bytes_estimate_str': _format_bytes('total_bytes_estimate'),
|
||||||
'_downloaded_bytes_str': format_bytes(s.get('downloaded_bytes')),
|
'_downloaded_bytes_str': _format_bytes('downloaded_bytes'),
|
||||||
'_elapsed_str': self.format_seconds(s.get('elapsed')),
|
'_elapsed_str': self.format_seconds(s.get('elapsed')),
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -378,25 +383,20 @@ class FileDownloader:
|
|||||||
"""Report attempt to resume at given byte."""
|
"""Report attempt to resume at given byte."""
|
||||||
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
||||||
|
|
||||||
def report_retry(self, err, count, retries):
|
def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True):
|
||||||
"""Report retry in case of HTTP error 5xx"""
|
"""Report retry"""
|
||||||
self.__to_screen(
|
is_frag = False if frag_index is NO_DEFAULT else 'fragment'
|
||||||
'[download] Got server HTTP error: %s. Retrying (attempt %d of %s) ...'
|
RetryManager.report_retry(
|
||||||
% (error_to_compat_str(err), count, self.format_retries(retries)))
|
err, count, retries, info=self.__to_screen,
|
||||||
self.sleep_retry('http', count)
|
warn=lambda msg: self.__to_screen(f'[download] Got error: {msg}'),
|
||||||
|
error=IDENTITY if not fatal else lambda e: self.report_error(f'\r[download] Got error: {e}'),
|
||||||
|
sleep_func=self.params.get('retry_sleep_functions', {}).get(is_frag or 'http'),
|
||||||
|
suffix=f'fragment{"s" if frag_index is None else f" {frag_index}"}' if is_frag else None)
|
||||||
|
|
||||||
def report_unable_to_resume(self):
|
def report_unable_to_resume(self):
|
||||||
"""Report it was impossible to resume download."""
|
"""Report it was impossible to resume download."""
|
||||||
self.to_screen('[download] Unable to resume')
|
self.to_screen('[download] Unable to resume')
|
||||||
|
|
||||||
def sleep_retry(self, retry_type, count):
|
|
||||||
sleep_func = self.params.get('retry_sleep_functions', {}).get(retry_type)
|
|
||||||
delay = float_or_none(sleep_func(n=count - 1)) if sleep_func else None
|
|
||||||
if delay:
|
|
||||||
self.__to_screen(f'Sleeping {delay:.2f} seconds ...')
|
|
||||||
time.sleep(delay)
|
|
||||||
return sleep_func is not None
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def supports_manifest(manifest):
|
def supports_manifest(manifest):
|
||||||
""" Whether the downloader can download the fragments from the manifest.
|
""" Whether the downloader can download the fragments from the manifest.
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
import time
|
import time
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from . import get_suitable_downloader
|
from . import get_suitable_downloader
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..utils import urljoin
|
from ..utils import update_url_query, urljoin
|
||||||
|
|
||||||
|
|
||||||
class DashSegmentsFD(FragmentFD):
|
class DashSegmentsFD(FragmentFD):
|
||||||
@@ -40,7 +41,12 @@ class DashSegmentsFD(FragmentFD):
|
|||||||
self._prepare_and_start_frag_download(ctx, fmt)
|
self._prepare_and_start_frag_download(ctx, fmt)
|
||||||
ctx['start'] = real_start
|
ctx['start'] = real_start
|
||||||
|
|
||||||
fragments_to_download = self._get_fragments(fmt, ctx)
|
extra_query = None
|
||||||
|
extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')
|
||||||
|
if extra_param_to_segment_url:
|
||||||
|
extra_query = urllib.parse.parse_qs(extra_param_to_segment_url)
|
||||||
|
|
||||||
|
fragments_to_download = self._get_fragments(fmt, ctx, extra_query)
|
||||||
|
|
||||||
if real_downloader:
|
if real_downloader:
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
@@ -51,13 +57,13 @@ class DashSegmentsFD(FragmentFD):
|
|||||||
|
|
||||||
args.append([ctx, fragments_to_download, fmt])
|
args.append([ctx, fragments_to_download, fmt])
|
||||||
|
|
||||||
return self.download_and_append_fragments_multiple(*args)
|
return self.download_and_append_fragments_multiple(*args, is_fatal=lambda idx: idx == 0)
|
||||||
|
|
||||||
def _resolve_fragments(self, fragments, ctx):
|
def _resolve_fragments(self, fragments, ctx):
|
||||||
fragments = fragments(ctx) if callable(fragments) else fragments
|
fragments = fragments(ctx) if callable(fragments) else fragments
|
||||||
return [next(iter(fragments))] if self.params.get('test') else fragments
|
return [next(iter(fragments))] if self.params.get('test') else fragments
|
||||||
|
|
||||||
def _get_fragments(self, fmt, ctx):
|
def _get_fragments(self, fmt, ctx, extra_query):
|
||||||
fragment_base_url = fmt.get('fragment_base_url')
|
fragment_base_url = fmt.get('fragment_base_url')
|
||||||
fragments = self._resolve_fragments(fmt['fragments'], ctx)
|
fragments = self._resolve_fragments(fmt['fragments'], ctx)
|
||||||
|
|
||||||
@@ -70,6 +76,8 @@ class DashSegmentsFD(FragmentFD):
|
|||||||
if not fragment_url:
|
if not fragment_url:
|
||||||
assert fragment_base_url
|
assert fragment_base_url
|
||||||
fragment_url = urljoin(fragment_base_url, fragment['path'])
|
fragment_url = urljoin(fragment_base_url, fragment['path'])
|
||||||
|
if extra_query:
|
||||||
|
fragment_url = update_url_query(fragment_url, extra_query)
|
||||||
|
|
||||||
yield {
|
yield {
|
||||||
'frag_index': frag_index,
|
'frag_index': frag_index,
|
||||||
|
|||||||
@@ -1,15 +1,18 @@
|
|||||||
import enum
|
import enum
|
||||||
|
import json
|
||||||
import os.path
|
import os.path
|
||||||
import re
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
import uuid
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import functools
|
from ..compat import functools
|
||||||
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
|
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
Popen,
|
Popen,
|
||||||
|
RetryManager,
|
||||||
_configuration_args,
|
_configuration_args,
|
||||||
check_executable,
|
check_executable,
|
||||||
classproperty,
|
classproperty,
|
||||||
@@ -19,8 +22,10 @@ from ..utils import (
|
|||||||
determine_ext,
|
determine_ext,
|
||||||
encodeArgument,
|
encodeArgument,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
|
find_available_port,
|
||||||
handle_youtubedl_headers,
|
handle_youtubedl_headers,
|
||||||
remove_end,
|
remove_end,
|
||||||
|
sanitized_Request,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -59,7 +64,6 @@ class ExternalFD(FragmentFD):
|
|||||||
}
|
}
|
||||||
if filename != '-':
|
if filename != '-':
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
self.to_screen(f'\r[{self.get_basename()}] Downloaded {fsize} bytes')
|
|
||||||
self.try_rename(tmpfilename, filename)
|
self.try_rename(tmpfilename, filename)
|
||||||
status.update({
|
status.update({
|
||||||
'downloaded_bytes': fsize,
|
'downloaded_bytes': fsize,
|
||||||
@@ -128,35 +132,27 @@ class ExternalFD(FragmentFD):
|
|||||||
self._debug_cmd(cmd)
|
self._debug_cmd(cmd)
|
||||||
|
|
||||||
if 'fragments' not in info_dict:
|
if 'fragments' not in info_dict:
|
||||||
_, stderr, returncode = Popen.run(
|
_, stderr, returncode = self._call_process(cmd, info_dict)
|
||||||
cmd, text=True, stderr=subprocess.PIPE if self._CAPTURE_STDERR else None)
|
|
||||||
if returncode and stderr:
|
if returncode and stderr:
|
||||||
self.to_stderr(stderr)
|
self.to_stderr(stderr)
|
||||||
return returncode
|
return returncode
|
||||||
|
|
||||||
fragment_retries = self.params.get('fragment_retries', 0)
|
|
||||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||||
|
|
||||||
count = 0
|
retry_manager = RetryManager(self.params.get('fragment_retries'), self.report_retry,
|
||||||
while count <= fragment_retries:
|
frag_index=None, fatal=not skip_unavailable_fragments)
|
||||||
_, stderr, returncode = Popen.run(cmd, text=True, stderr=subprocess.PIPE)
|
for retry in retry_manager:
|
||||||
|
_, stderr, returncode = self._call_process(cmd, info_dict)
|
||||||
if not returncode:
|
if not returncode:
|
||||||
break
|
break
|
||||||
|
|
||||||
# TODO: Decide whether to retry based on error code
|
# TODO: Decide whether to retry based on error code
|
||||||
# https://aria2.github.io/manual/en/html/aria2c.html#exit-status
|
# https://aria2.github.io/manual/en/html/aria2c.html#exit-status
|
||||||
if stderr:
|
if stderr:
|
||||||
self.to_stderr(stderr)
|
self.to_stderr(stderr)
|
||||||
count += 1
|
retry.error = Exception()
|
||||||
if count <= fragment_retries:
|
continue
|
||||||
self.to_screen(
|
if not skip_unavailable_fragments and retry_manager.error:
|
||||||
'[%s] Got error. Retrying fragments (attempt %d of %s)...'
|
return -1
|
||||||
% (self.get_basename(), count, self.format_retries(fragment_retries)))
|
|
||||||
self.sleep_retry('fragment', count)
|
|
||||||
if count > fragment_retries:
|
|
||||||
if not skip_unavailable_fragments:
|
|
||||||
self.report_error('Giving up after %s fragment retries' % fragment_retries)
|
|
||||||
return -1
|
|
||||||
|
|
||||||
decrypt_fragment = self.decrypter(info_dict)
|
decrypt_fragment = self.decrypter(info_dict)
|
||||||
dest, _ = self.sanitize_open(tmpfilename, 'wb')
|
dest, _ = self.sanitize_open(tmpfilename, 'wb')
|
||||||
@@ -178,6 +174,9 @@ class ExternalFD(FragmentFD):
|
|||||||
self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
def _call_process(self, cmd, info_dict):
|
||||||
|
return Popen.run(cmd, text=True, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
|
||||||
class CurlFD(ExternalFD):
|
class CurlFD(ExternalFD):
|
||||||
AVAILABLE_OPT = '-V'
|
AVAILABLE_OPT = '-V'
|
||||||
@@ -258,6 +257,18 @@ class Aria2cFD(ExternalFD):
|
|||||||
check_results = (not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES)
|
check_results = (not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES)
|
||||||
return all(check_results)
|
return all(check_results)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _aria2c_filename(fn):
|
||||||
|
return fn if os.path.isabs(fn) else f'.{os.path.sep}{fn}'
|
||||||
|
|
||||||
|
def _call_downloader(self, tmpfilename, info_dict):
|
||||||
|
if 'no-external-downloader-progress' not in self.params.get('compat_opts', []):
|
||||||
|
info_dict['__rpc'] = {
|
||||||
|
'port': find_available_port() or 19190,
|
||||||
|
'secret': str(uuid.uuid4()),
|
||||||
|
}
|
||||||
|
return super()._call_downloader(tmpfilename, info_dict)
|
||||||
|
|
||||||
def _make_cmd(self, tmpfilename, info_dict):
|
def _make_cmd(self, tmpfilename, info_dict):
|
||||||
cmd = [self.exe, '-c',
|
cmd = [self.exe, '-c',
|
||||||
'--console-log-level=warn', '--summary-interval=0', '--download-result=hide',
|
'--console-log-level=warn', '--summary-interval=0', '--download-result=hide',
|
||||||
@@ -278,6 +289,12 @@ class Aria2cFD(ExternalFD):
|
|||||||
cmd += self._bool_option('--show-console-readout', 'noprogress', 'false', 'true', '=')
|
cmd += self._bool_option('--show-console-readout', 'noprogress', 'false', 'true', '=')
|
||||||
cmd += self._configuration_args()
|
cmd += self._configuration_args()
|
||||||
|
|
||||||
|
if '__rpc' in info_dict:
|
||||||
|
cmd += [
|
||||||
|
'--enable-rpc',
|
||||||
|
f'--rpc-listen-port={info_dict["__rpc"]["port"]}',
|
||||||
|
f'--rpc-secret={info_dict["__rpc"]["secret"]}']
|
||||||
|
|
||||||
# aria2c strips out spaces from the beginning/end of filenames and paths.
|
# aria2c strips out spaces from the beginning/end of filenames and paths.
|
||||||
# We work around this issue by adding a "./" to the beginning of the
|
# We work around this issue by adding a "./" to the beginning of the
|
||||||
# filename and relative path, and adding a "/" at the end of the path.
|
# filename and relative path, and adding a "/" at the end of the path.
|
||||||
@@ -286,11 +303,9 @@ class Aria2cFD(ExternalFD):
|
|||||||
# https://github.com/aria2/aria2/issues/1373
|
# https://github.com/aria2/aria2/issues/1373
|
||||||
dn = os.path.dirname(tmpfilename)
|
dn = os.path.dirname(tmpfilename)
|
||||||
if dn:
|
if dn:
|
||||||
if not os.path.isabs(dn):
|
cmd += ['--dir', self._aria2c_filename(dn) + os.path.sep]
|
||||||
dn = f'.{os.path.sep}{dn}'
|
|
||||||
cmd += ['--dir', dn + os.path.sep]
|
|
||||||
if 'fragments' not in info_dict:
|
if 'fragments' not in info_dict:
|
||||||
cmd += ['--out', f'.{os.path.sep}{os.path.basename(tmpfilename)}']
|
cmd += ['--out', self._aria2c_filename(os.path.basename(tmpfilename))]
|
||||||
cmd += ['--auto-file-renaming=false']
|
cmd += ['--auto-file-renaming=false']
|
||||||
|
|
||||||
if 'fragments' in info_dict:
|
if 'fragments' in info_dict:
|
||||||
@@ -299,15 +314,97 @@ class Aria2cFD(ExternalFD):
|
|||||||
url_list = []
|
url_list = []
|
||||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||||
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
|
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
|
||||||
url_list.append('%s\n\tout=%s' % (fragment['url'], fragment_filename))
|
url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename)))
|
||||||
stream, _ = self.sanitize_open(url_list_file, 'wb')
|
stream, _ = self.sanitize_open(url_list_file, 'wb')
|
||||||
stream.write('\n'.join(url_list).encode())
|
stream.write('\n'.join(url_list).encode())
|
||||||
stream.close()
|
stream.close()
|
||||||
cmd += ['-i', url_list_file]
|
cmd += ['-i', self._aria2c_filename(url_list_file)]
|
||||||
else:
|
else:
|
||||||
cmd += ['--', info_dict['url']]
|
cmd += ['--', info_dict['url']]
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
def aria2c_rpc(self, rpc_port, rpc_secret, method, params=()):
|
||||||
|
# Does not actually need to be UUID, just unique
|
||||||
|
sanitycheck = str(uuid.uuid4())
|
||||||
|
d = json.dumps({
|
||||||
|
'jsonrpc': '2.0',
|
||||||
|
'id': sanitycheck,
|
||||||
|
'method': method,
|
||||||
|
'params': [f'token:{rpc_secret}', *params],
|
||||||
|
}).encode('utf-8')
|
||||||
|
request = sanitized_Request(
|
||||||
|
f'http://localhost:{rpc_port}/jsonrpc',
|
||||||
|
data=d, headers={
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Content-Length': f'{len(d)}',
|
||||||
|
'Ytdl-request-proxy': '__noproxy__',
|
||||||
|
})
|
||||||
|
with self.ydl.urlopen(request) as r:
|
||||||
|
resp = json.load(r)
|
||||||
|
assert resp.get('id') == sanitycheck, 'Something went wrong with RPC server'
|
||||||
|
return resp['result']
|
||||||
|
|
||||||
|
def _call_process(self, cmd, info_dict):
|
||||||
|
if '__rpc' not in info_dict:
|
||||||
|
return super()._call_process(cmd, info_dict)
|
||||||
|
|
||||||
|
send_rpc = functools.partial(self.aria2c_rpc, info_dict['__rpc']['port'], info_dict['__rpc']['secret'])
|
||||||
|
started = time.time()
|
||||||
|
|
||||||
|
fragmented = 'fragments' in info_dict
|
||||||
|
frag_count = len(info_dict['fragments']) if fragmented else 1
|
||||||
|
status = {
|
||||||
|
'filename': info_dict.get('_filename'),
|
||||||
|
'status': 'downloading',
|
||||||
|
'elapsed': 0,
|
||||||
|
'downloaded_bytes': 0,
|
||||||
|
'fragment_count': frag_count if fragmented else None,
|
||||||
|
'fragment_index': 0 if fragmented else None,
|
||||||
|
}
|
||||||
|
self._hook_progress(status, info_dict)
|
||||||
|
|
||||||
|
def get_stat(key, *obj, average=False):
|
||||||
|
val = tuple(filter(None, map(float, traverse_obj(obj, (..., ..., key))))) or [0]
|
||||||
|
return sum(val) / (len(val) if average else 1)
|
||||||
|
|
||||||
|
with Popen(cmd, text=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE) as p:
|
||||||
|
# Add a small sleep so that RPC client can receive response,
|
||||||
|
# or the connection stalls infinitely
|
||||||
|
time.sleep(0.2)
|
||||||
|
retval = p.poll()
|
||||||
|
while retval is None:
|
||||||
|
# We don't use tellStatus as we won't know the GID without reading stdout
|
||||||
|
# Ref: https://aria2.github.io/manual/en/html/aria2c.html#aria2.tellActive
|
||||||
|
active = send_rpc('aria2.tellActive')
|
||||||
|
completed = send_rpc('aria2.tellStopped', [0, frag_count])
|
||||||
|
|
||||||
|
downloaded = get_stat('totalLength', completed) + get_stat('completedLength', active)
|
||||||
|
speed = get_stat('downloadSpeed', active)
|
||||||
|
total = frag_count * get_stat('totalLength', active, completed, average=True)
|
||||||
|
if total < downloaded:
|
||||||
|
total = None
|
||||||
|
|
||||||
|
status.update({
|
||||||
|
'downloaded_bytes': int(downloaded),
|
||||||
|
'speed': speed,
|
||||||
|
'total_bytes': None if fragmented else total,
|
||||||
|
'total_bytes_estimate': total,
|
||||||
|
'eta': (total - downloaded) / (speed or 1),
|
||||||
|
'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None,
|
||||||
|
'elapsed': time.time() - started
|
||||||
|
})
|
||||||
|
self._hook_progress(status, info_dict)
|
||||||
|
|
||||||
|
if not active and len(completed) >= frag_count:
|
||||||
|
send_rpc('aria2.shutdown')
|
||||||
|
retval = p.wait()
|
||||||
|
break
|
||||||
|
|
||||||
|
time.sleep(0.1)
|
||||||
|
retval = p.poll()
|
||||||
|
|
||||||
|
return '', p.stderr.read(), retval
|
||||||
|
|
||||||
|
|
||||||
class HttpieFD(ExternalFD):
|
class HttpieFD(ExternalFD):
|
||||||
AVAILABLE_OPT = '--version'
|
AVAILABLE_OPT = '--version'
|
||||||
@@ -346,7 +443,6 @@ class FFmpegFD(ExternalFD):
|
|||||||
and cls.can_download(info_dict))
|
and cls.can_download(info_dict))
|
||||||
|
|
||||||
def _call_downloader(self, tmpfilename, info_dict):
|
def _call_downloader(self, tmpfilename, info_dict):
|
||||||
urls = [f['url'] for f in info_dict.get('requested_formats', [])] or [info_dict['url']]
|
|
||||||
ffpp = FFmpegPostProcessor(downloader=self)
|
ffpp = FFmpegPostProcessor(downloader=self)
|
||||||
if not ffpp.available:
|
if not ffpp.available:
|
||||||
self.report_error('m3u8 download detected but ffmpeg could not be found. Please install')
|
self.report_error('m3u8 download detected but ffmpeg could not be found. Please install')
|
||||||
@@ -376,16 +472,6 @@ class FFmpegFD(ExternalFD):
|
|||||||
# http://trac.ffmpeg.org/ticket/6125#comment:10
|
# http://trac.ffmpeg.org/ticket/6125#comment:10
|
||||||
args += ['-seekable', '1' if seekable else '0']
|
args += ['-seekable', '1' if seekable else '0']
|
||||||
|
|
||||||
http_headers = None
|
|
||||||
if info_dict.get('http_headers'):
|
|
||||||
youtubedl_headers = handle_youtubedl_headers(info_dict['http_headers'])
|
|
||||||
http_headers = [
|
|
||||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
|
||||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
|
||||||
'-headers',
|
|
||||||
''.join(f'{key}: {val}\r\n' for key, val in youtubedl_headers.items())
|
|
||||||
]
|
|
||||||
|
|
||||||
env = None
|
env = None
|
||||||
proxy = self.params.get('proxy')
|
proxy = self.params.get('proxy')
|
||||||
if proxy:
|
if proxy:
|
||||||
@@ -438,21 +524,26 @@ class FFmpegFD(ExternalFD):
|
|||||||
|
|
||||||
start_time, end_time = info_dict.get('section_start') or 0, info_dict.get('section_end')
|
start_time, end_time = info_dict.get('section_start') or 0, info_dict.get('section_end')
|
||||||
|
|
||||||
for i, url in enumerate(urls):
|
selected_formats = info_dict.get('requested_formats') or [info_dict]
|
||||||
if http_headers is not None and re.match(r'^https?://', url):
|
for i, fmt in enumerate(selected_formats):
|
||||||
args += http_headers
|
if fmt.get('http_headers') and re.match(r'^https?://', fmt['url']):
|
||||||
|
headers_dict = handle_youtubedl_headers(fmt['http_headers'])
|
||||||
|
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
||||||
|
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||||
|
args.extend(['-headers', ''.join(f'{key}: {val}\r\n' for key, val in headers_dict.items())])
|
||||||
|
|
||||||
if start_time:
|
if start_time:
|
||||||
args += ['-ss', str(start_time)]
|
args += ['-ss', str(start_time)]
|
||||||
if end_time:
|
if end_time:
|
||||||
args += ['-t', str(end_time - start_time)]
|
args += ['-t', str(end_time - start_time)]
|
||||||
|
|
||||||
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', url]
|
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', fmt['url']]
|
||||||
|
|
||||||
if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'):
|
if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'):
|
||||||
args += ['-c', 'copy']
|
args += ['-c', 'copy']
|
||||||
|
|
||||||
if info_dict.get('requested_formats') or protocol == 'http_dash_segments':
|
if info_dict.get('requested_formats') or protocol == 'http_dash_segments':
|
||||||
for (i, fmt) in enumerate(info_dict.get('requested_formats') or [info_dict]):
|
for i, fmt in enumerate(selected_formats):
|
||||||
stream_number = fmt.get('manifest_stream_number', 0)
|
stream_number = fmt.get('manifest_stream_number', 0)
|
||||||
args.extend(['-map', f'{i}:{stream_number}'])
|
args.extend(['-map', f'{i}:{stream_number}'])
|
||||||
|
|
||||||
@@ -492,8 +583,9 @@ class FFmpegFD(ExternalFD):
|
|||||||
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
||||||
self._debug_cmd(args)
|
self._debug_cmd(args)
|
||||||
|
|
||||||
|
piped = any(fmt['url'] in ('-', 'pipe:') for fmt in selected_formats)
|
||||||
with Popen(args, stdin=subprocess.PIPE, env=env) as proc:
|
with Popen(args, stdin=subprocess.PIPE, env=env) as proc:
|
||||||
if url in ('-', 'pipe:'):
|
if piped:
|
||||||
self.on_process_started(proc, proc.stdin)
|
self.on_process_started(proc, proc.stdin)
|
||||||
try:
|
try:
|
||||||
retval = proc.wait()
|
retval = proc.wait()
|
||||||
@@ -503,7 +595,7 @@ class FFmpegFD(ExternalFD):
|
|||||||
# produces a file that is playable (this is mostly useful for live
|
# produces a file that is playable (this is mostly useful for live
|
||||||
# streams). Note that Windows is not affected and produces playable
|
# streams). Note that Windows is not affected and produces playable
|
||||||
# files (see https://github.com/ytdl-org/youtube-dl/issues/8300).
|
# files (see https://github.com/ytdl-org/youtube-dl/issues/8300).
|
||||||
if isinstance(e, KeyboardInterrupt) and sys.platform != 'win32' and url not in ('-', 'pipe:'):
|
if isinstance(e, KeyboardInterrupt) and sys.platform != 'win32' and not piped:
|
||||||
proc.communicate_or_kill(b'q')
|
proc.communicate_or_kill(b'q')
|
||||||
else:
|
else:
|
||||||
proc.kill(timeout=None)
|
proc.kill(timeout=None)
|
||||||
@@ -521,16 +613,14 @@ _BY_NAME = {
|
|||||||
if name.endswith('FD') and name not in ('ExternalFD', 'FragmentFD')
|
if name.endswith('FD') and name not in ('ExternalFD', 'FragmentFD')
|
||||||
}
|
}
|
||||||
|
|
||||||
_BY_EXE = {klass.EXE_NAME: klass for klass in _BY_NAME.values()}
|
|
||||||
|
|
||||||
|
|
||||||
def list_external_downloaders():
|
def list_external_downloaders():
|
||||||
return sorted(_BY_NAME.keys())
|
return sorted(_BY_NAME.keys())
|
||||||
|
|
||||||
|
|
||||||
def get_external_downloader(external_downloader):
|
def get_external_downloader(external_downloader):
|
||||||
""" Given the name of the executable, see whether we support the given
|
""" Given the name of the executable, see whether we support the given downloader """
|
||||||
downloader . """
|
|
||||||
# Drop .exe extension on Windows
|
|
||||||
bn = os.path.splitext(os.path.basename(external_downloader))[0]
|
bn = os.path.splitext(os.path.basename(external_downloader))[0]
|
||||||
return _BY_NAME.get(bn, _BY_EXE.get(bn))
|
return _BY_NAME.get(bn) or next((
|
||||||
|
klass for klass in _BY_NAME.values() if klass.EXE_NAME in bn
|
||||||
|
), None)
|
||||||
|
|||||||
@@ -184,7 +184,7 @@ def build_fragments_list(boot_info):
|
|||||||
first_frag_number = fragment_run_entry_table[0]['first']
|
first_frag_number = fragment_run_entry_table[0]['first']
|
||||||
fragments_counter = itertools.count(first_frag_number)
|
fragments_counter = itertools.count(first_frag_number)
|
||||||
for segment, fragments_count in segment_run_table['segment_run']:
|
for segment, fragments_count in segment_run_table['segment_run']:
|
||||||
# In some live HDS streams (for example Rai), `fragments_count` is
|
# In some live HDS streams (e.g. Rai), `fragments_count` is
|
||||||
# abnormal and causing out-of-memory errors. It's OK to change the
|
# abnormal and causing out-of-memory errors. It's OK to change the
|
||||||
# number of fragments for live streams as they are updated periodically
|
# number of fragments for live streams as they are updated periodically
|
||||||
if fragments_count == 4294967295 and boot_info['live']:
|
if fragments_count == 4294967295 and boot_info['live']:
|
||||||
@@ -424,6 +424,4 @@ class F4mFD(FragmentFD):
|
|||||||
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
|
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
|
||||||
self.report_warning(msg)
|
self.report_warning(msg)
|
||||||
|
|
||||||
self._finish_frag_download(ctx, info_dict)
|
return self._finish_frag_download(ctx, info_dict)
|
||||||
|
|
||||||
return True
|
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
|||||||
from ..compat import compat_os_name
|
from ..compat import compat_os_name
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
DownloadError,
|
DownloadError,
|
||||||
|
RetryManager,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
error_to_compat_str,
|
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
)
|
)
|
||||||
@@ -65,10 +65,9 @@ class FragmentFD(FileDownloader):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def report_retry_fragment(self, err, frag_index, count, retries):
|
def report_retry_fragment(self, err, frag_index, count, retries):
|
||||||
self.to_screen(
|
self.deprecation_warning('yt_dlp.downloader.FragmentFD.report_retry_fragment is deprecated. '
|
||||||
'\r[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s) ...'
|
'Use yt_dlp.downloader.FileDownloader.report_retry instead')
|
||||||
% (error_to_compat_str(err), frag_index, count, self.format_retries(retries)))
|
return self.report_retry(err, count, retries, frag_index)
|
||||||
self.sleep_retry('fragment', count)
|
|
||||||
|
|
||||||
def report_skip_fragment(self, frag_index, err=None):
|
def report_skip_fragment(self, frag_index, err=None):
|
||||||
err = f' {err};' if err else ''
|
err = f' {err};' if err else ''
|
||||||
@@ -296,16 +295,23 @@ class FragmentFD(FileDownloader):
|
|||||||
self.try_remove(ytdl_filename)
|
self.try_remove(ytdl_filename)
|
||||||
elapsed = time.time() - ctx['started']
|
elapsed = time.time() - ctx['started']
|
||||||
|
|
||||||
if ctx['tmpfilename'] == '-':
|
to_file = ctx['tmpfilename'] != '-'
|
||||||
downloaded_bytes = ctx['complete_frags_downloaded_bytes']
|
if to_file:
|
||||||
|
downloaded_bytes = os.path.getsize(encodeFilename(ctx['tmpfilename']))
|
||||||
else:
|
else:
|
||||||
|
downloaded_bytes = ctx['complete_frags_downloaded_bytes']
|
||||||
|
|
||||||
|
if not downloaded_bytes:
|
||||||
|
if to_file:
|
||||||
|
self.try_remove(ctx['tmpfilename'])
|
||||||
|
self.report_error('The downloaded file is empty')
|
||||||
|
return False
|
||||||
|
elif to_file:
|
||||||
self.try_rename(ctx['tmpfilename'], ctx['filename'])
|
self.try_rename(ctx['tmpfilename'], ctx['filename'])
|
||||||
if self.params.get('updatetime', True):
|
filetime = ctx.get('fragment_filetime')
|
||||||
filetime = ctx.get('fragment_filetime')
|
if self.params.get('updatetime', True) and filetime:
|
||||||
if filetime:
|
with contextlib.suppress(Exception):
|
||||||
with contextlib.suppress(Exception):
|
os.utime(ctx['filename'], (time.time(), filetime))
|
||||||
os.utime(ctx['filename'], (time.time(), filetime))
|
|
||||||
downloaded_bytes = os.path.getsize(encodeFilename(ctx['filename']))
|
|
||||||
|
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'downloaded_bytes': downloaded_bytes,
|
'downloaded_bytes': downloaded_bytes,
|
||||||
@@ -317,6 +323,7 @@ class FragmentFD(FileDownloader):
|
|||||||
'max_progress': ctx.get('max_progress'),
|
'max_progress': ctx.get('max_progress'),
|
||||||
'progress_idx': ctx.get('progress_idx'),
|
'progress_idx': ctx.get('progress_idx'),
|
||||||
}, info_dict)
|
}, info_dict)
|
||||||
|
return True
|
||||||
|
|
||||||
def _prepare_external_frag_download(self, ctx):
|
def _prepare_external_frag_download(self, ctx):
|
||||||
if 'live' not in ctx:
|
if 'live' not in ctx:
|
||||||
@@ -347,6 +354,8 @@ class FragmentFD(FileDownloader):
|
|||||||
return _key_cache[url]
|
return _key_cache[url]
|
||||||
|
|
||||||
def decrypt_fragment(fragment, frag_content):
|
def decrypt_fragment(fragment, frag_content):
|
||||||
|
if frag_content is None:
|
||||||
|
return
|
||||||
decrypt_info = fragment.get('decrypt_info')
|
decrypt_info = fragment.get('decrypt_info')
|
||||||
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
||||||
return frag_content
|
return frag_content
|
||||||
@@ -361,7 +370,7 @@ class FragmentFD(FileDownloader):
|
|||||||
|
|
||||||
return decrypt_fragment
|
return decrypt_fragment
|
||||||
|
|
||||||
def download_and_append_fragments_multiple(self, *args, pack_func=None, finish_func=None):
|
def download_and_append_fragments_multiple(self, *args, **kwargs):
|
||||||
'''
|
'''
|
||||||
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
||||||
all args must be either tuple or list
|
all args must be either tuple or list
|
||||||
@@ -369,7 +378,7 @@ class FragmentFD(FileDownloader):
|
|||||||
interrupt_trigger = [True]
|
interrupt_trigger = [True]
|
||||||
max_progress = len(args)
|
max_progress = len(args)
|
||||||
if max_progress == 1:
|
if max_progress == 1:
|
||||||
return self.download_and_append_fragments(*args[0], pack_func=pack_func, finish_func=finish_func)
|
return self.download_and_append_fragments(*args[0], **kwargs)
|
||||||
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
||||||
if max_progress > 1:
|
if max_progress > 1:
|
||||||
self._prepare_multiline_status(max_progress)
|
self._prepare_multiline_status(max_progress)
|
||||||
@@ -379,8 +388,7 @@ class FragmentFD(FileDownloader):
|
|||||||
ctx['max_progress'] = max_progress
|
ctx['max_progress'] = max_progress
|
||||||
ctx['progress_idx'] = idx
|
ctx['progress_idx'] = idx
|
||||||
return self.download_and_append_fragments(
|
return self.download_and_append_fragments(
|
||||||
ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func,
|
ctx, fragments, info_dict, **kwargs, tpe=tpe, interrupt_trigger=interrupt_trigger)
|
||||||
tpe=tpe, interrupt_trigger=interrupt_trigger)
|
|
||||||
|
|
||||||
class FTPE(concurrent.futures.ThreadPoolExecutor):
|
class FTPE(concurrent.futures.ThreadPoolExecutor):
|
||||||
# has to stop this or it's going to wait on the worker thread itself
|
# has to stop this or it's going to wait on the worker thread itself
|
||||||
@@ -427,18 +435,12 @@ class FragmentFD(FileDownloader):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
def download_and_append_fragments(
|
def download_and_append_fragments(
|
||||||
self, ctx, fragments, info_dict, *, pack_func=None, finish_func=None,
|
self, ctx, fragments, info_dict, *, is_fatal=(lambda idx: False),
|
||||||
tpe=None, interrupt_trigger=None):
|
pack_func=(lambda content, idx: content), finish_func=None,
|
||||||
if not interrupt_trigger:
|
tpe=None, interrupt_trigger=(True, )):
|
||||||
interrupt_trigger = (True, )
|
|
||||||
|
|
||||||
fragment_retries = self.params.get('fragment_retries', 0)
|
if not self.params.get('skip_unavailable_fragments', True):
|
||||||
is_fatal = (
|
is_fatal = lambda _: True
|
||||||
((lambda _: False) if info_dict.get('is_live') else (lambda idx: idx == 0))
|
|
||||||
if self.params.get('skip_unavailable_fragments', True) else (lambda _: True))
|
|
||||||
|
|
||||||
if not pack_func:
|
|
||||||
pack_func = lambda frag_content, _: frag_content
|
|
||||||
|
|
||||||
def download_fragment(fragment, ctx):
|
def download_fragment(fragment, ctx):
|
||||||
if not interrupt_trigger[0]:
|
if not interrupt_trigger[0]:
|
||||||
@@ -452,32 +454,25 @@ class FragmentFD(FileDownloader):
|
|||||||
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
||||||
|
|
||||||
# Never skip the first fragment
|
# Never skip the first fragment
|
||||||
fatal, count = is_fatal(fragment.get('index') or (frag_index - 1)), 0
|
fatal = is_fatal(fragment.get('index') or (frag_index - 1))
|
||||||
while count <= fragment_retries:
|
|
||||||
|
def error_callback(err, count, retries):
|
||||||
|
if fatal and count > retries:
|
||||||
|
ctx['dest_stream'].close()
|
||||||
|
self.report_retry(err, count, retries, frag_index, fatal)
|
||||||
|
ctx['last_error'] = err
|
||||||
|
|
||||||
|
for retry in RetryManager(self.params.get('fragment_retries'), error_callback):
|
||||||
try:
|
try:
|
||||||
ctx['fragment_count'] = fragment.get('fragment_count')
|
ctx['fragment_count'] = fragment.get('fragment_count')
|
||||||
if self._download_fragment(ctx, fragment['url'], info_dict, headers):
|
if not self._download_fragment(ctx, fragment['url'], info_dict, headers):
|
||||||
break
|
return
|
||||||
return
|
|
||||||
except (urllib.error.HTTPError, http.client.IncompleteRead) as err:
|
except (urllib.error.HTTPError, http.client.IncompleteRead) as err:
|
||||||
# Unavailable (possibly temporary) fragments may be served.
|
retry.error = err
|
||||||
# First we try to retry then either skip or abort.
|
continue
|
||||||
# See https://github.com/ytdl-org/youtube-dl/issues/10165,
|
except DownloadError: # has own retry settings
|
||||||
# https://github.com/ytdl-org/youtube-dl/issues/10448).
|
if fatal:
|
||||||
count += 1
|
raise
|
||||||
ctx['last_error'] = err
|
|
||||||
if count <= fragment_retries:
|
|
||||||
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
|
||||||
except DownloadError:
|
|
||||||
# Don't retry fragment if error occurred during HTTP downloading
|
|
||||||
# itself since it has own retry settings
|
|
||||||
if not fatal:
|
|
||||||
break
|
|
||||||
raise
|
|
||||||
|
|
||||||
if count > fragment_retries and fatal:
|
|
||||||
ctx['dest_stream'].close()
|
|
||||||
self.report_error('Giving up after %s fragment retries' % fragment_retries)
|
|
||||||
|
|
||||||
def append_fragment(frag_content, frag_index, ctx):
|
def append_fragment(frag_content, frag_index, ctx):
|
||||||
if frag_content:
|
if frag_content:
|
||||||
@@ -534,5 +529,4 @@ class FragmentFD(FileDownloader):
|
|||||||
if finish_func is not None:
|
if finish_func is not None:
|
||||||
ctx['dest_stream'].write(finish_func())
|
ctx['dest_stream'].write(finish_func())
|
||||||
ctx['dest_stream'].flush()
|
ctx['dest_stream'].flush()
|
||||||
self._finish_frag_download(ctx, info_dict)
|
return self._finish_frag_download(ctx, info_dict)
|
||||||
return True
|
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import urllib.error
|
|||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ContentTooShortError,
|
ContentTooShortError,
|
||||||
|
RetryManager,
|
||||||
ThrottledDownload,
|
ThrottledDownload,
|
||||||
XAttrMetadataError,
|
XAttrMetadataError,
|
||||||
XAttrUnavailableError,
|
XAttrUnavailableError,
|
||||||
@@ -72,9 +73,6 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
ctx.is_resume = ctx.resume_len > 0
|
ctx.is_resume = ctx.resume_len > 0
|
||||||
|
|
||||||
count = 0
|
|
||||||
retries = self.params.get('retries', 0)
|
|
||||||
|
|
||||||
class SucceedDownload(Exception):
|
class SucceedDownload(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -349,9 +347,7 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
if data_len is not None and byte_counter != data_len:
|
if data_len is not None and byte_counter != data_len:
|
||||||
err = ContentTooShortError(byte_counter, int(data_len))
|
err = ContentTooShortError(byte_counter, int(data_len))
|
||||||
if count <= retries:
|
retry(err)
|
||||||
retry(err)
|
|
||||||
raise err
|
|
||||||
|
|
||||||
self.try_rename(ctx.tmpfilename, ctx.filename)
|
self.try_rename(ctx.tmpfilename, ctx.filename)
|
||||||
|
|
||||||
@@ -370,24 +366,20 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
while count <= retries:
|
for retry in RetryManager(self.params.get('retries'), self.report_retry):
|
||||||
try:
|
try:
|
||||||
establish_connection()
|
establish_connection()
|
||||||
return download()
|
return download()
|
||||||
except RetryDownload as e:
|
except RetryDownload as err:
|
||||||
count += 1
|
retry.error = err.source_error
|
||||||
if count <= retries:
|
|
||||||
self.report_retry(e.source_error, count, retries)
|
|
||||||
else:
|
|
||||||
self.to_screen(f'[download] Got server HTTP error: {e.source_error}')
|
|
||||||
continue
|
continue
|
||||||
except NextFragment:
|
except NextFragment:
|
||||||
|
retry.error = None
|
||||||
|
retry.attempt -= 1
|
||||||
continue
|
continue
|
||||||
except SucceedDownload:
|
except SucceedDownload:
|
||||||
return True
|
return True
|
||||||
except: # noqa: E722
|
except: # noqa: E722
|
||||||
close_stream()
|
close_stream()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
self.report_error('giving up after %s retries' % retries)
|
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import time
|
|||||||
import urllib.error
|
import urllib.error
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
|
from ..utils import RetryManager
|
||||||
|
|
||||||
u8 = struct.Struct('>B')
|
u8 = struct.Struct('>B')
|
||||||
u88 = struct.Struct('>Bx')
|
u88 = struct.Struct('>Bx')
|
||||||
@@ -137,6 +138,8 @@ def write_piff_header(stream, params):
|
|||||||
|
|
||||||
if fourcc == 'AACL':
|
if fourcc == 'AACL':
|
||||||
sample_entry_box = box(b'mp4a', sample_entry_payload)
|
sample_entry_box = box(b'mp4a', sample_entry_payload)
|
||||||
|
if fourcc == 'EC-3':
|
||||||
|
sample_entry_box = box(b'ec-3', sample_entry_payload)
|
||||||
elif stream_type == 'video':
|
elif stream_type == 'video':
|
||||||
sample_entry_payload += u16.pack(0) # pre defined
|
sample_entry_payload += u16.pack(0) # pre defined
|
||||||
sample_entry_payload += u16.pack(0) # reserved
|
sample_entry_payload += u16.pack(0) # reserved
|
||||||
@@ -245,7 +248,6 @@ class IsmFD(FragmentFD):
|
|||||||
'ism_track_written': False,
|
'ism_track_written': False,
|
||||||
})
|
})
|
||||||
|
|
||||||
fragment_retries = self.params.get('fragment_retries', 0)
|
|
||||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||||
|
|
||||||
frag_index = 0
|
frag_index = 0
|
||||||
@@ -253,8 +255,10 @@ class IsmFD(FragmentFD):
|
|||||||
frag_index += 1
|
frag_index += 1
|
||||||
if frag_index <= ctx['fragment_index']:
|
if frag_index <= ctx['fragment_index']:
|
||||||
continue
|
continue
|
||||||
count = 0
|
|
||||||
while count <= fragment_retries:
|
retry_manager = RetryManager(self.params.get('fragment_retries'), self.report_retry,
|
||||||
|
frag_index=frag_index, fatal=not skip_unavailable_fragments)
|
||||||
|
for retry in retry_manager:
|
||||||
try:
|
try:
|
||||||
success = self._download_fragment(ctx, segment['url'], info_dict)
|
success = self._download_fragment(ctx, segment['url'], info_dict)
|
||||||
if not success:
|
if not success:
|
||||||
@@ -267,18 +271,13 @@ class IsmFD(FragmentFD):
|
|||||||
write_piff_header(ctx['dest_stream'], info_dict['_download_params'])
|
write_piff_header(ctx['dest_stream'], info_dict['_download_params'])
|
||||||
extra_state['ism_track_written'] = True
|
extra_state['ism_track_written'] = True
|
||||||
self._append_fragment(ctx, frag_content)
|
self._append_fragment(ctx, frag_content)
|
||||||
break
|
|
||||||
except urllib.error.HTTPError as err:
|
except urllib.error.HTTPError as err:
|
||||||
count += 1
|
retry.error = err
|
||||||
if count <= fragment_retries:
|
|
||||||
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
|
||||||
if count > fragment_retries:
|
|
||||||
if skip_unavailable_fragments:
|
|
||||||
self.report_skip_fragment(frag_index)
|
|
||||||
continue
|
continue
|
||||||
self.report_error('giving up after %s fragment retries' % fragment_retries)
|
|
||||||
return False
|
|
||||||
|
|
||||||
self._finish_frag_download(ctx, info_dict)
|
if retry_manager.error:
|
||||||
|
if not skip_unavailable_fragments:
|
||||||
|
return False
|
||||||
|
self.report_skip_fragment(frag_index)
|
||||||
|
|
||||||
return True
|
return self._finish_frag_download(ctx, info_dict)
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import re
|
|||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
|
from ..compat import imghdr
|
||||||
from ..utils import escapeHTML, formatSeconds, srt_subtitles_timecode, urljoin
|
from ..utils import escapeHTML, formatSeconds, srt_subtitles_timecode, urljoin
|
||||||
from ..version import __version__ as YT_DLP_VERSION
|
from ..version import __version__ as YT_DLP_VERSION
|
||||||
|
|
||||||
@@ -166,21 +167,13 @@ body > figure > img {
|
|||||||
continue
|
continue
|
||||||
frag_content = self._read_fragment(ctx)
|
frag_content = self._read_fragment(ctx)
|
||||||
|
|
||||||
mime_type = b'image/jpeg'
|
|
||||||
if frag_content.startswith(b'\x89PNG\r\n\x1a\n'):
|
|
||||||
mime_type = b'image/png'
|
|
||||||
if frag_content.startswith((b'GIF87a', b'GIF89a')):
|
|
||||||
mime_type = b'image/gif'
|
|
||||||
if frag_content.startswith(b'RIFF') and frag_content[8:12] == b'WEBP':
|
|
||||||
mime_type = b'image/webp'
|
|
||||||
|
|
||||||
frag_header = io.BytesIO()
|
frag_header = io.BytesIO()
|
||||||
frag_header.write(
|
frag_header.write(
|
||||||
b'--%b\r\n' % frag_boundary.encode('us-ascii'))
|
b'--%b\r\n' % frag_boundary.encode('us-ascii'))
|
||||||
frag_header.write(
|
frag_header.write(
|
||||||
b'Content-ID: <%b>\r\n' % self._gen_cid(i, fragment, frag_boundary).encode('us-ascii'))
|
b'Content-ID: <%b>\r\n' % self._gen_cid(i, fragment, frag_boundary).encode('us-ascii'))
|
||||||
frag_header.write(
|
frag_header.write(
|
||||||
b'Content-type: %b\r\n' % mime_type)
|
b'Content-type: %b\r\n' % f'image/{imghdr.what(h=frag_content) or "jpeg"}'.encode())
|
||||||
frag_header.write(
|
frag_header.write(
|
||||||
b'Content-length: %u\r\n' % len(frag_content))
|
b'Content-length: %u\r\n' % len(frag_content))
|
||||||
frag_header.write(
|
frag_header.write(
|
||||||
@@ -193,5 +186,4 @@ body > figure > img {
|
|||||||
|
|
||||||
ctx['dest_stream'].write(
|
ctx['dest_stream'].write(
|
||||||
b'--%b--\r\n\r\n' % frag_boundary.encode('us-ascii'))
|
b'--%b--\r\n\r\n' % frag_boundary.encode('us-ascii'))
|
||||||
self._finish_frag_download(ctx, info_dict)
|
return self._finish_frag_download(ctx, info_dict)
|
||||||
return True
|
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import asyncio
|
||||||
import contextlib
|
import contextlib
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
@@ -5,7 +6,6 @@ import threading
|
|||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from .external import FFmpegFD
|
from .external import FFmpegFD
|
||||||
from ..compat import asyncio
|
|
||||||
from ..dependencies import websockets
|
from ..dependencies import websockets
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,13 @@ import time
|
|||||||
import urllib.error
|
import urllib.error
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..utils import RegexNotFoundError, dict_get, int_or_none, try_get
|
from ..utils import (
|
||||||
|
RegexNotFoundError,
|
||||||
|
RetryManager,
|
||||||
|
dict_get,
|
||||||
|
int_or_none,
|
||||||
|
try_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class YoutubeLiveChatFD(FragmentFD):
|
class YoutubeLiveChatFD(FragmentFD):
|
||||||
@@ -16,7 +22,6 @@ class YoutubeLiveChatFD(FragmentFD):
|
|||||||
self.report_warning('Live chat download runs until the livestream ends. '
|
self.report_warning('Live chat download runs until the livestream ends. '
|
||||||
'If you wish to download the video simultaneously, run a separate yt-dlp instance')
|
'If you wish to download the video simultaneously, run a separate yt-dlp instance')
|
||||||
|
|
||||||
fragment_retries = self.params.get('fragment_retries', 0)
|
|
||||||
test = self.params.get('test', False)
|
test = self.params.get('test', False)
|
||||||
|
|
||||||
ctx = {
|
ctx = {
|
||||||
@@ -104,8 +109,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
|||||||
return continuation_id, live_offset, click_tracking_params
|
return continuation_id, live_offset, click_tracking_params
|
||||||
|
|
||||||
def download_and_parse_fragment(url, frag_index, request_data=None, headers=None):
|
def download_and_parse_fragment(url, frag_index, request_data=None, headers=None):
|
||||||
count = 0
|
for retry in RetryManager(self.params.get('fragment_retries'), self.report_retry, frag_index=frag_index):
|
||||||
while count <= fragment_retries:
|
|
||||||
try:
|
try:
|
||||||
success = dl_fragment(url, request_data, headers)
|
success = dl_fragment(url, request_data, headers)
|
||||||
if not success:
|
if not success:
|
||||||
@@ -120,21 +124,15 @@ class YoutubeLiveChatFD(FragmentFD):
|
|||||||
live_chat_continuation = try_get(
|
live_chat_continuation = try_get(
|
||||||
data,
|
data,
|
||||||
lambda x: x['continuationContents']['liveChatContinuation'], dict) or {}
|
lambda x: x['continuationContents']['liveChatContinuation'], dict) or {}
|
||||||
if info_dict['protocol'] == 'youtube_live_chat_replay':
|
|
||||||
if frag_index == 1:
|
func = (info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live
|
||||||
continuation_id, offset, click_tracking_params = try_refresh_replay_beginning(live_chat_continuation)
|
or frag_index == 1 and try_refresh_replay_beginning
|
||||||
else:
|
or parse_actions_replay)
|
||||||
continuation_id, offset, click_tracking_params = parse_actions_replay(live_chat_continuation)
|
return (True, *func(live_chat_continuation))
|
||||||
elif info_dict['protocol'] == 'youtube_live_chat':
|
|
||||||
continuation_id, offset, click_tracking_params = parse_actions_live(live_chat_continuation)
|
|
||||||
return True, continuation_id, offset, click_tracking_params
|
|
||||||
except urllib.error.HTTPError as err:
|
except urllib.error.HTTPError as err:
|
||||||
count += 1
|
retry.error = err
|
||||||
if count <= fragment_retries:
|
continue
|
||||||
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
return False, None, None, None
|
||||||
if count > fragment_retries:
|
|
||||||
self.report_error('giving up after %s fragment retries' % fragment_retries)
|
|
||||||
return False, None, None, None
|
|
||||||
|
|
||||||
self._prepare_and_start_frag_download(ctx, info_dict)
|
self._prepare_and_start_frag_download(ctx, info_dict)
|
||||||
|
|
||||||
@@ -193,8 +191,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
|||||||
if test:
|
if test:
|
||||||
break
|
break
|
||||||
|
|
||||||
self._finish_frag_download(ctx, info_dict)
|
return self._finish_frag_download(ctx, info_dict)
|
||||||
return True
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_live_timestamp(action):
|
def parse_live_timestamp(action):
|
||||||
|
|||||||
@@ -1,5 +1,29 @@
|
|||||||
# flake8: noqa: F401
|
# flake8: noqa: F401
|
||||||
|
|
||||||
|
from .youtube import ( # Youtube is moved to the top to improve performance
|
||||||
|
YoutubeIE,
|
||||||
|
YoutubeClipIE,
|
||||||
|
YoutubeFavouritesIE,
|
||||||
|
YoutubeNotificationsIE,
|
||||||
|
YoutubeHistoryIE,
|
||||||
|
YoutubeTabIE,
|
||||||
|
YoutubeLivestreamEmbedIE,
|
||||||
|
YoutubePlaylistIE,
|
||||||
|
YoutubeRecommendedIE,
|
||||||
|
YoutubeSearchDateIE,
|
||||||
|
YoutubeSearchIE,
|
||||||
|
YoutubeSearchURLIE,
|
||||||
|
YoutubeMusicSearchURLIE,
|
||||||
|
YoutubeSubscriptionsIE,
|
||||||
|
YoutubeStoriesIE,
|
||||||
|
YoutubeTruncatedIDIE,
|
||||||
|
YoutubeTruncatedURLIE,
|
||||||
|
YoutubeYtBeIE,
|
||||||
|
YoutubeYtUserIE,
|
||||||
|
YoutubeWatchLaterIE,
|
||||||
|
YoutubeShortsAudioPivotIE
|
||||||
|
)
|
||||||
|
|
||||||
from .abc import (
|
from .abc import (
|
||||||
ABCIE,
|
ABCIE,
|
||||||
ABCIViewIE,
|
ABCIViewIE,
|
||||||
@@ -41,12 +65,20 @@ from .aenetworks import (
|
|||||||
HistoryPlayerIE,
|
HistoryPlayerIE,
|
||||||
BiographyIE,
|
BiographyIE,
|
||||||
)
|
)
|
||||||
|
from .aeonco import AeonCoIE
|
||||||
from .afreecatv import (
|
from .afreecatv import (
|
||||||
AfreecaTVIE,
|
AfreecaTVIE,
|
||||||
AfreecaTVLiveIE,
|
AfreecaTVLiveIE,
|
||||||
AfreecaTVUserIE,
|
AfreecaTVUserIE,
|
||||||
)
|
)
|
||||||
|
from .agora import (
|
||||||
|
TokFMAuditionIE,
|
||||||
|
TokFMPodcastIE,
|
||||||
|
WyborczaPodcastIE,
|
||||||
|
WyborczaVideoIE,
|
||||||
|
)
|
||||||
from .airmozilla import AirMozillaIE
|
from .airmozilla import AirMozillaIE
|
||||||
|
from .airtv import AirTVIE
|
||||||
from .aljazeera import AlJazeeraIE
|
from .aljazeera import AlJazeeraIE
|
||||||
from .alphaporno import AlphaPornoIE
|
from .alphaporno import AlphaPornoIE
|
||||||
from .amara import AmaraIE
|
from .amara import AmaraIE
|
||||||
@@ -55,12 +87,20 @@ from .alura import (
|
|||||||
AluraCourseIE
|
AluraCourseIE
|
||||||
)
|
)
|
||||||
from .amcnetworks import AMCNetworksIE
|
from .amcnetworks import AMCNetworksIE
|
||||||
from .amazon import AmazonStoreIE
|
from .amazon import (
|
||||||
|
AmazonStoreIE,
|
||||||
|
AmazonReviewsIE,
|
||||||
|
)
|
||||||
|
from .amazonminitv import (
|
||||||
|
AmazonMiniTVIE,
|
||||||
|
AmazonMiniTVSeasonIE,
|
||||||
|
AmazonMiniTVSeriesIE,
|
||||||
|
)
|
||||||
from .americastestkitchen import (
|
from .americastestkitchen import (
|
||||||
AmericasTestKitchenIE,
|
AmericasTestKitchenIE,
|
||||||
AmericasTestKitchenSeasonIE,
|
AmericasTestKitchenSeasonIE,
|
||||||
)
|
)
|
||||||
from .animeondemand import AnimeOnDemandIE
|
from .angel import AngelIE
|
||||||
from .anvato import AnvatoIE
|
from .anvato import AnvatoIE
|
||||||
from .aol import AolIE
|
from .aol import AolIE
|
||||||
from .allocine import AllocineIE
|
from .allocine import AllocineIE
|
||||||
@@ -147,7 +187,12 @@ from .bbc import (
|
|||||||
from .beeg import BeegIE
|
from .beeg import BeegIE
|
||||||
from .behindkink import BehindKinkIE
|
from .behindkink import BehindKinkIE
|
||||||
from .bellmedia import BellMediaIE
|
from .bellmedia import BellMediaIE
|
||||||
|
from .beatbump import (
|
||||||
|
BeatBumpVideoIE,
|
||||||
|
BeatBumpPlaylistIE,
|
||||||
|
)
|
||||||
from .beatport import BeatportIE
|
from .beatport import BeatportIE
|
||||||
|
from .berufetv import BerufeTVIE
|
||||||
from .bet import BetIE
|
from .bet import BetIE
|
||||||
from .bfi import BFIPlayerIE
|
from .bfi import BFIPlayerIE
|
||||||
from .bfmtv import (
|
from .bfmtv import (
|
||||||
@@ -161,13 +206,16 @@ from .bigo import BigoIE
|
|||||||
from .bild import BildIE
|
from .bild import BildIE
|
||||||
from .bilibili import (
|
from .bilibili import (
|
||||||
BiliBiliIE,
|
BiliBiliIE,
|
||||||
|
BiliBiliBangumiIE,
|
||||||
|
BiliBiliBangumiMediaIE,
|
||||||
BiliBiliSearchIE,
|
BiliBiliSearchIE,
|
||||||
BilibiliCategoryIE,
|
BilibiliCategoryIE,
|
||||||
BiliBiliBangumiIE,
|
|
||||||
BilibiliAudioIE,
|
BilibiliAudioIE,
|
||||||
BilibiliAudioAlbumIE,
|
BilibiliAudioAlbumIE,
|
||||||
BiliBiliPlayerIE,
|
BiliBiliPlayerIE,
|
||||||
BilibiliChannelIE,
|
BilibiliSpaceVideoIE,
|
||||||
|
BilibiliSpaceAudioIE,
|
||||||
|
BilibiliSpacePlaylistIE,
|
||||||
BiliIntlIE,
|
BiliIntlIE,
|
||||||
BiliIntlSeriesIE,
|
BiliIntlSeriesIE,
|
||||||
BiliLiveIE,
|
BiliLiveIE,
|
||||||
@@ -193,6 +241,7 @@ from .bokecc import BokeCCIE
|
|||||||
from .bongacams import BongaCamsIE
|
from .bongacams import BongaCamsIE
|
||||||
from .bostonglobe import BostonGlobeIE
|
from .bostonglobe import BostonGlobeIE
|
||||||
from .box import BoxIE
|
from .box import BoxIE
|
||||||
|
from .booyah import BooyahClipsIE
|
||||||
from .bpb import BpbIE
|
from .bpb import BpbIE
|
||||||
from .br import (
|
from .br import (
|
||||||
BRIE,
|
BRIE,
|
||||||
@@ -206,6 +255,7 @@ from .brightcove import (
|
|||||||
BrightcoveNewIE,
|
BrightcoveNewIE,
|
||||||
)
|
)
|
||||||
from .businessinsider import BusinessInsiderIE
|
from .businessinsider import BusinessInsiderIE
|
||||||
|
from .bundesliga import BundesligaIE
|
||||||
from .buzzfeed import BuzzFeedIE
|
from .buzzfeed import BuzzFeedIE
|
||||||
from .byutv import BYUtvIE
|
from .byutv import BYUtvIE
|
||||||
from .c56 import C56IE
|
from .c56 import C56IE
|
||||||
@@ -218,6 +268,8 @@ from .camdemy import (
|
|||||||
CamdemyFolderIE
|
CamdemyFolderIE
|
||||||
)
|
)
|
||||||
from .cammodels import CamModelsIE
|
from .cammodels import CamModelsIE
|
||||||
|
from .camsoda import CamsodaIE
|
||||||
|
from .camtasia import CamtasiaEmbedIE
|
||||||
from .camwithher import CamWithHerIE
|
from .camwithher import CamWithHerIE
|
||||||
from .canalalpha import CanalAlphaIE
|
from .canalalpha import CanalAlphaIE
|
||||||
from .canalplus import CanalplusIE
|
from .canalplus import CanalplusIE
|
||||||
@@ -280,6 +332,7 @@ from .chirbit import (
|
|||||||
)
|
)
|
||||||
from .cinchcast import CinchcastIE
|
from .cinchcast import CinchcastIE
|
||||||
from .cinemax import CinemaxIE
|
from .cinemax import CinemaxIE
|
||||||
|
from .cinetecamilano import CinetecaMilanoIE
|
||||||
from .ciscolive import (
|
from .ciscolive import (
|
||||||
CiscoLiveSessionIE,
|
CiscoLiveSessionIE,
|
||||||
CiscoLiveSearchIE,
|
CiscoLiveSearchIE,
|
||||||
@@ -304,6 +357,7 @@ from .cnn import (
|
|||||||
CNNIE,
|
CNNIE,
|
||||||
CNNBlogsIE,
|
CNNBlogsIE,
|
||||||
CNNArticleIE,
|
CNNArticleIE,
|
||||||
|
CNNIndonesiaIE,
|
||||||
)
|
)
|
||||||
from .coub import CoubIE
|
from .coub import CoubIE
|
||||||
from .comedycentral import (
|
from .comedycentral import (
|
||||||
@@ -333,8 +387,6 @@ from .crowdbunker import (
|
|||||||
CrowdBunkerChannelIE,
|
CrowdBunkerChannelIE,
|
||||||
)
|
)
|
||||||
from .crunchyroll import (
|
from .crunchyroll import (
|
||||||
CrunchyrollIE,
|
|
||||||
CrunchyrollShowPlaylistIE,
|
|
||||||
CrunchyrollBetaIE,
|
CrunchyrollBetaIE,
|
||||||
CrunchyrollBetaShowIE,
|
CrunchyrollBetaShowIE,
|
||||||
)
|
)
|
||||||
@@ -382,7 +434,7 @@ from .deezer import (
|
|||||||
DeezerAlbumIE,
|
DeezerAlbumIE,
|
||||||
)
|
)
|
||||||
from .democracynow import DemocracynowIE
|
from .democracynow import DemocracynowIE
|
||||||
from .detik import Detik20IE
|
from .detik import DetikEmbedIE
|
||||||
from .dfb import DFBIE
|
from .dfb import DFBIE
|
||||||
from .dhm import DHMIE
|
from .dhm import DHMIE
|
||||||
from .digg import DiggIE
|
from .digg import DiggIE
|
||||||
@@ -408,6 +460,8 @@ from .dplay import (
|
|||||||
DiscoveryLifeIE,
|
DiscoveryLifeIE,
|
||||||
AnimalPlanetIE,
|
AnimalPlanetIE,
|
||||||
TLCIE,
|
TLCIE,
|
||||||
|
MotorTrendIE,
|
||||||
|
MotorTrendOnDemandIE,
|
||||||
DiscoveryPlusIndiaIE,
|
DiscoveryPlusIndiaIE,
|
||||||
DiscoveryNetworksDeIE,
|
DiscoveryNetworksDeIE,
|
||||||
DiscoveryPlusItalyIE,
|
DiscoveryPlusItalyIE,
|
||||||
@@ -429,11 +483,14 @@ from .duboku import (
|
|||||||
)
|
)
|
||||||
from .dumpert import DumpertIE
|
from .dumpert import DumpertIE
|
||||||
from .defense import DefenseGouvFrIE
|
from .defense import DefenseGouvFrIE
|
||||||
|
from .deuxm import (
|
||||||
|
DeuxMIE,
|
||||||
|
DeuxMNewsIE
|
||||||
|
)
|
||||||
from .digitalconcerthall import DigitalConcertHallIE
|
from .digitalconcerthall import DigitalConcertHallIE
|
||||||
from .discovery import DiscoveryIE
|
from .discovery import DiscoveryIE
|
||||||
from .disney import DisneyIE
|
from .disney import DisneyIE
|
||||||
from .dispeak import DigitallySpeakingIE
|
from .dispeak import DigitallySpeakingIE
|
||||||
from .doodstream import DoodStreamIE
|
|
||||||
from .dropbox import DropboxIE
|
from .dropbox import DropboxIE
|
||||||
from .dropout import (
|
from .dropout import (
|
||||||
DropoutSeasonIE,
|
DropoutSeasonIE,
|
||||||
@@ -443,7 +500,7 @@ from .dw import (
|
|||||||
DWIE,
|
DWIE,
|
||||||
DWArticleIE,
|
DWArticleIE,
|
||||||
)
|
)
|
||||||
from .eagleplatform import EaglePlatformIE
|
from .eagleplatform import EaglePlatformIE, ClipYouEmbedIE
|
||||||
from .ebaumsworld import EbaumsWorldIE
|
from .ebaumsworld import EbaumsWorldIE
|
||||||
from .echomsk import EchoMskIE
|
from .echomsk import EchoMskIE
|
||||||
from .egghead import (
|
from .egghead import (
|
||||||
@@ -467,6 +524,7 @@ from .epicon import (
|
|||||||
EpiconIE,
|
EpiconIE,
|
||||||
EpiconSeriesIE,
|
EpiconSeriesIE,
|
||||||
)
|
)
|
||||||
|
from .epoch import EpochIE
|
||||||
from .eporner import EpornerIE
|
from .eporner import EpornerIE
|
||||||
from .eroprofile import (
|
from .eroprofile import (
|
||||||
EroProfileIE,
|
EroProfileIE,
|
||||||
@@ -486,8 +544,9 @@ from .espn import (
|
|||||||
ESPNCricInfoIE,
|
ESPNCricInfoIE,
|
||||||
)
|
)
|
||||||
from .esri import EsriVideoIE
|
from .esri import EsriVideoIE
|
||||||
from .europa import EuropaIE
|
from .europa import EuropaIE, EuroParlWebstreamIE
|
||||||
from .europeantour import EuropeanTourIE
|
from .europeantour import EuropeanTourIE
|
||||||
|
from .eurosport import EurosportIE
|
||||||
from .euscreen import EUScreenIE
|
from .euscreen import EUScreenIE
|
||||||
from .expotv import ExpoTVIE
|
from .expotv import ExpoTVIE
|
||||||
from .expressen import ExpressenIE
|
from .expressen import ExpressenIE
|
||||||
@@ -497,6 +556,7 @@ from .facebook import (
|
|||||||
FacebookIE,
|
FacebookIE,
|
||||||
FacebookPluginsVideoIE,
|
FacebookPluginsVideoIE,
|
||||||
FacebookRedirectURLIE,
|
FacebookRedirectURLIE,
|
||||||
|
FacebookReelIE,
|
||||||
)
|
)
|
||||||
from .fancode import (
|
from .fancode import (
|
||||||
FancodeVodIE,
|
FancodeVodIE,
|
||||||
@@ -542,6 +602,7 @@ from .foxgay import FoxgayIE
|
|||||||
from .foxnews import (
|
from .foxnews import (
|
||||||
FoxNewsIE,
|
FoxNewsIE,
|
||||||
FoxNewsArticleIE,
|
FoxNewsArticleIE,
|
||||||
|
FoxNewsVideoIE,
|
||||||
)
|
)
|
||||||
from .foxsports import FoxSportsIE
|
from .foxsports import FoxSportsIE
|
||||||
from .fptplay import FptplayIE
|
from .fptplay import FptplayIE
|
||||||
@@ -592,6 +653,10 @@ from .gazeta import GazetaIE
|
|||||||
from .gdcvault import GDCVaultIE
|
from .gdcvault import GDCVaultIE
|
||||||
from .gedidigital import GediDigitalIE
|
from .gedidigital import GediDigitalIE
|
||||||
from .generic import GenericIE
|
from .generic import GenericIE
|
||||||
|
from .genius import (
|
||||||
|
GeniusIE,
|
||||||
|
GeniusLyricsIE,
|
||||||
|
)
|
||||||
from .gettr import (
|
from .gettr import (
|
||||||
GettrIE,
|
GettrIE,
|
||||||
GettrStreamingIE,
|
GettrStreamingIE,
|
||||||
@@ -619,6 +684,7 @@ from .googlepodcasts import (
|
|||||||
)
|
)
|
||||||
from .googlesearch import GoogleSearchIE
|
from .googlesearch import GoogleSearchIE
|
||||||
from .gopro import GoProIE
|
from .gopro import GoProIE
|
||||||
|
from .goplay import GoPlayIE
|
||||||
from .goshgay import GoshgayIE
|
from .goshgay import GoshgayIE
|
||||||
from .gotostage import GoToStageIE
|
from .gotostage import GoToStageIE
|
||||||
from .gputechconf import GPUTechConfIE
|
from .gputechconf import GPUTechConfIE
|
||||||
@@ -628,6 +694,7 @@ from .gronkh import (
|
|||||||
GronkhVodsIE
|
GronkhVodsIE
|
||||||
)
|
)
|
||||||
from .groupon import GrouponIE
|
from .groupon import GrouponIE
|
||||||
|
from .harpodeon import HarpodeonIE
|
||||||
from .hbo import HBOIE
|
from .hbo import HBOIE
|
||||||
from .hearthisat import HearThisAtIE
|
from .hearthisat import HearThisAtIE
|
||||||
from .heise import HeiseIE
|
from .heise import HeiseIE
|
||||||
@@ -640,11 +707,13 @@ from .hidive import HiDiveIE
|
|||||||
from .historicfilms import HistoricFilmsIE
|
from .historicfilms import HistoricFilmsIE
|
||||||
from .hitbox import HitboxIE, HitboxLiveIE
|
from .hitbox import HitboxIE, HitboxLiveIE
|
||||||
from .hitrecord import HitRecordIE
|
from .hitrecord import HitRecordIE
|
||||||
|
from .holodex import HolodexIE
|
||||||
from .hotnewhiphop import HotNewHipHopIE
|
from .hotnewhiphop import HotNewHipHopIE
|
||||||
from .hotstar import (
|
from .hotstar import (
|
||||||
HotStarIE,
|
HotStarIE,
|
||||||
HotStarPrefixIE,
|
HotStarPrefixIE,
|
||||||
HotStarPlaylistIE,
|
HotStarPlaylistIE,
|
||||||
|
HotStarSeasonIE,
|
||||||
HotStarSeriesIE,
|
HotStarSeriesIE,
|
||||||
)
|
)
|
||||||
from .howcast import HowcastIE
|
from .howcast import HowcastIE
|
||||||
@@ -658,6 +727,10 @@ from .hse import (
|
|||||||
HSEShowIE,
|
HSEShowIE,
|
||||||
HSEProductIE,
|
HSEProductIE,
|
||||||
)
|
)
|
||||||
|
from .genericembeds import (
|
||||||
|
HTML5MediaEmbedIE,
|
||||||
|
QuotedHTMLIE,
|
||||||
|
)
|
||||||
from .huajiao import HuajiaoIE
|
from .huajiao import HuajiaoIE
|
||||||
from .huya import HuyaLiveIE
|
from .huya import HuyaLiveIE
|
||||||
from .huffpost import HuffPostIE
|
from .huffpost import HuffPostIE
|
||||||
@@ -682,6 +755,7 @@ from .iheart import (
|
|||||||
IHeartRadioIE,
|
IHeartRadioIE,
|
||||||
IHeartRadioPodcastIE,
|
IHeartRadioPodcastIE,
|
||||||
)
|
)
|
||||||
|
from .iltalehti import IltalehtiIE
|
||||||
from .imdb import (
|
from .imdb import (
|
||||||
ImdbIE,
|
ImdbIE,
|
||||||
ImdbListIE
|
ImdbListIE
|
||||||
@@ -713,6 +787,11 @@ from .iqiyi import (
|
|||||||
IqIE,
|
IqIE,
|
||||||
IqAlbumIE
|
IqAlbumIE
|
||||||
)
|
)
|
||||||
|
from .islamchannel import (
|
||||||
|
IslamChannelIE,
|
||||||
|
IslamChannelSeriesIE,
|
||||||
|
)
|
||||||
|
from .israelnationalnews import IsraelNationalNewsIE
|
||||||
from .itprotv import (
|
from .itprotv import (
|
||||||
ITProTVIE,
|
ITProTVIE,
|
||||||
ITProTVCourseIE
|
ITProTVCourseIE
|
||||||
@@ -741,12 +820,21 @@ from .jamendo import (
|
|||||||
JamendoIE,
|
JamendoIE,
|
||||||
JamendoAlbumIE,
|
JamendoAlbumIE,
|
||||||
)
|
)
|
||||||
|
from .japandiet import (
|
||||||
|
ShugiinItvLiveIE,
|
||||||
|
ShugiinItvLiveRoomIE,
|
||||||
|
ShugiinItvVodIE,
|
||||||
|
SangiinInstructionIE,
|
||||||
|
SangiinIE,
|
||||||
|
)
|
||||||
from .jeuxvideo import JeuxVideoIE
|
from .jeuxvideo import JeuxVideoIE
|
||||||
from .jove import JoveIE
|
from .jove import JoveIE
|
||||||
from .joj import JojIE
|
from .joj import JojIE
|
||||||
from .jwplatform import JWPlatformIE
|
from .jwplatform import JWPlatformIE
|
||||||
from .kakao import KakaoIE
|
from .kakao import KakaoIE
|
||||||
from .kaltura import KalturaIE
|
from .kaltura import KalturaIE
|
||||||
|
from .kanal2 import Kanal2IE
|
||||||
|
from .kankanews import KankaNewsIE
|
||||||
from .karaoketv import KaraoketvIE
|
from .karaoketv import KaraoketvIE
|
||||||
from .karrierevideos import KarriereVideosIE
|
from .karrierevideos import KarriereVideosIE
|
||||||
from .keezmovies import KeezMoviesIE
|
from .keezmovies import KeezMoviesIE
|
||||||
@@ -756,10 +844,15 @@ from .khanacademy import (
|
|||||||
KhanAcademyIE,
|
KhanAcademyIE,
|
||||||
KhanAcademyUnitIE,
|
KhanAcademyUnitIE,
|
||||||
)
|
)
|
||||||
|
from .kick import (
|
||||||
|
KickIE,
|
||||||
|
KickVODIE,
|
||||||
|
)
|
||||||
from .kicker import KickerIE
|
from .kicker import KickerIE
|
||||||
from .kickstarter import KickStarterIE
|
from .kickstarter import KickStarterIE
|
||||||
from .kinja import KinjaEmbedIE
|
from .kinja import KinjaEmbedIE
|
||||||
from .kinopoisk import KinoPoiskIE
|
from .kinopoisk import KinoPoiskIE
|
||||||
|
from .kompas import KompasVideoIE
|
||||||
from .konserthusetplay import KonserthusetPlayIE
|
from .konserthusetplay import KonserthusetPlayIE
|
||||||
from .koo import KooIE
|
from .koo import KooIE
|
||||||
from .kth import KTHIE
|
from .kth import KTHIE
|
||||||
@@ -839,6 +932,7 @@ from .linkedin import (
|
|||||||
)
|
)
|
||||||
from .linuxacademy import LinuxAcademyIE
|
from .linuxacademy import LinuxAcademyIE
|
||||||
from .liputan6 import Liputan6IE
|
from .liputan6 import Liputan6IE
|
||||||
|
from .listennotes import ListenNotesIE
|
||||||
from .litv import LiTVIE
|
from .litv import LiTVIE
|
||||||
from .livejournal import LiveJournalIE
|
from .livejournal import LiveJournalIE
|
||||||
from .livestream import (
|
from .livestream import (
|
||||||
@@ -901,6 +995,11 @@ from .mediasite import (
|
|||||||
MediasiteCatalogIE,
|
MediasiteCatalogIE,
|
||||||
MediasiteNamedCatalogIE,
|
MediasiteNamedCatalogIE,
|
||||||
)
|
)
|
||||||
|
from .mediastream import (
|
||||||
|
MediaStreamIE,
|
||||||
|
WinSportsVideoIE,
|
||||||
|
)
|
||||||
|
from .mediaworksnz import MediaWorksNZVODIE
|
||||||
from .medici import MediciIE
|
from .medici import MediciIE
|
||||||
from .megaphone import MegaphoneIE
|
from .megaphone import MegaphoneIE
|
||||||
from .meipai import MeipaiIE
|
from .meipai import MeipaiIE
|
||||||
@@ -916,6 +1015,7 @@ from .microsoftvirtualacademy import (
|
|||||||
MicrosoftVirtualAcademyIE,
|
MicrosoftVirtualAcademyIE,
|
||||||
MicrosoftVirtualAcademyCourseIE,
|
MicrosoftVirtualAcademyCourseIE,
|
||||||
)
|
)
|
||||||
|
from .microsoftembed import MicrosoftEmbedIE
|
||||||
from .mildom import (
|
from .mildom import (
|
||||||
MildomIE,
|
MildomIE,
|
||||||
MildomVodIE,
|
MildomVodIE,
|
||||||
@@ -949,6 +1049,8 @@ from .mixcloud import (
|
|||||||
from .mlb import (
|
from .mlb import (
|
||||||
MLBIE,
|
MLBIE,
|
||||||
MLBVideoIE,
|
MLBVideoIE,
|
||||||
|
MLBTVIE,
|
||||||
|
MLBArticleIE,
|
||||||
)
|
)
|
||||||
from .mlssoccer import MLSSoccerIE
|
from .mlssoccer import MLSSoccerIE
|
||||||
from .mnet import MnetIE
|
from .mnet import MnetIE
|
||||||
@@ -967,6 +1069,7 @@ from .motherless import (
|
|||||||
from .motorsport import MotorsportIE
|
from .motorsport import MotorsportIE
|
||||||
from .movieclips import MovieClipsIE
|
from .movieclips import MovieClipsIE
|
||||||
from .moviepilot import MoviepilotIE
|
from .moviepilot import MoviepilotIE
|
||||||
|
from .moview import MoviewPlayIE
|
||||||
from .moviezine import MoviezineIE
|
from .moviezine import MoviezineIE
|
||||||
from .movingimage import MovingImageIE
|
from .movingimage import MovingImageIE
|
||||||
from .msn import MSNIE
|
from .msn import MSNIE
|
||||||
@@ -1035,6 +1138,7 @@ from .nbc import (
|
|||||||
NBCSportsIE,
|
NBCSportsIE,
|
||||||
NBCSportsStreamIE,
|
NBCSportsStreamIE,
|
||||||
NBCSportsVPlayerIE,
|
NBCSportsVPlayerIE,
|
||||||
|
NBCStationsIE,
|
||||||
)
|
)
|
||||||
from .ndr import (
|
from .ndr import (
|
||||||
NDRIE,
|
NDRIE,
|
||||||
@@ -1063,12 +1167,14 @@ from .neteasemusic import (
|
|||||||
from .netverse import (
|
from .netverse import (
|
||||||
NetverseIE,
|
NetverseIE,
|
||||||
NetversePlaylistIE,
|
NetversePlaylistIE,
|
||||||
|
NetverseSearchIE,
|
||||||
)
|
)
|
||||||
from .newgrounds import (
|
from .newgrounds import (
|
||||||
NewgroundsIE,
|
NewgroundsIE,
|
||||||
NewgroundsPlaylistIE,
|
NewgroundsPlaylistIE,
|
||||||
NewgroundsUserIE,
|
NewgroundsUserIE,
|
||||||
)
|
)
|
||||||
|
from .newspicks import NewsPicksIE
|
||||||
from .newstube import NewstubeIE
|
from .newstube import NewstubeIE
|
||||||
from .newsy import NewsyIE
|
from .newsy import NewsyIE
|
||||||
from .nextmedia import (
|
from .nextmedia import (
|
||||||
@@ -1123,11 +1229,13 @@ from .nintendo import NintendoIE
|
|||||||
from .nitter import NitterIE
|
from .nitter import NitterIE
|
||||||
from .njpwworld import NJPWWorldIE
|
from .njpwworld import NJPWWorldIE
|
||||||
from .nobelprize import NobelPrizeIE
|
from .nobelprize import NobelPrizeIE
|
||||||
|
from .noice import NoicePodcastIE
|
||||||
from .nonktube import NonkTubeIE
|
from .nonktube import NonkTubeIE
|
||||||
from .noodlemagazine import NoodleMagazineIE
|
from .noodlemagazine import NoodleMagazineIE
|
||||||
from .noovo import NoovoIE
|
from .noovo import NoovoIE
|
||||||
from .normalboots import NormalbootsIE
|
from .normalboots import NormalbootsIE
|
||||||
from .nosvideo import NosVideoIE
|
from .nosvideo import NosVideoIE
|
||||||
|
from .nosnl import NOSNLArticleIE
|
||||||
from .nova import (
|
from .nova import (
|
||||||
NovaEmbedIE,
|
NovaEmbedIE,
|
||||||
NovaIE,
|
NovaIE,
|
||||||
@@ -1177,11 +1285,17 @@ from .nzherald import NZHeraldIE
|
|||||||
from .nzz import NZZIE
|
from .nzz import NZZIE
|
||||||
from .odatv import OdaTVIE
|
from .odatv import OdaTVIE
|
||||||
from .odnoklassniki import OdnoklassnikiIE
|
from .odnoklassniki import OdnoklassnikiIE
|
||||||
|
from .oftv import (
|
||||||
|
OfTVIE,
|
||||||
|
OfTVPlaylistIE
|
||||||
|
)
|
||||||
from .oktoberfesttv import OktoberfestTVIE
|
from .oktoberfesttv import OktoberfestTVIE
|
||||||
from .olympics import OlympicsReplayIE
|
from .olympics import OlympicsReplayIE
|
||||||
from .on24 import On24IE
|
from .on24 import On24IE
|
||||||
from .ondemandkorea import OnDemandKoreaIE
|
from .ondemandkorea import OnDemandKoreaIE
|
||||||
from .onefootball import OneFootballIE
|
from .onefootball import OneFootballIE
|
||||||
|
from .onenewsnz import OneNewsNZIE
|
||||||
|
from .oneplace import OnePlacePodcastIE
|
||||||
from .onet import (
|
from .onet import (
|
||||||
OnetIE,
|
OnetIE,
|
||||||
OnetChannelIE,
|
OnetChannelIE,
|
||||||
@@ -1205,19 +1319,8 @@ from .openrec import (
|
|||||||
from .ora import OraTVIE
|
from .ora import OraTVIE
|
||||||
from .orf import (
|
from .orf import (
|
||||||
ORFTVthekIE,
|
ORFTVthekIE,
|
||||||
ORFFM4IE,
|
|
||||||
ORFFM4StoryIE,
|
ORFFM4StoryIE,
|
||||||
ORFOE1IE,
|
ORFRadioIE,
|
||||||
ORFOE3IE,
|
|
||||||
ORFNOEIE,
|
|
||||||
ORFWIEIE,
|
|
||||||
ORFBGLIE,
|
|
||||||
ORFOOEIE,
|
|
||||||
ORFSTMIE,
|
|
||||||
ORFKTNIE,
|
|
||||||
ORFSBGIE,
|
|
||||||
ORFTIRIE,
|
|
||||||
ORFVBGIE,
|
|
||||||
ORFIPTVIE,
|
ORFIPTVIE,
|
||||||
)
|
)
|
||||||
from .outsidetv import OutsideTVIE
|
from .outsidetv import OutsideTVIE
|
||||||
@@ -1240,11 +1343,11 @@ from .paramountplus import (
|
|||||||
ParamountPlusIE,
|
ParamountPlusIE,
|
||||||
ParamountPlusSeriesIE,
|
ParamountPlusSeriesIE,
|
||||||
)
|
)
|
||||||
from .parliamentliveuk import ParliamentLiveUKIE
|
from .parler import ParlerIE
|
||||||
from .parlview import ParlviewIE
|
from .parlview import ParlviewIE
|
||||||
from .patreon import (
|
from .patreon import (
|
||||||
PatreonIE,
|
PatreonIE,
|
||||||
PatreonUserIE
|
PatreonCampaignIE
|
||||||
)
|
)
|
||||||
from .pbs import PBSIE
|
from .pbs import PBSIE
|
||||||
from .pearvideo import PearVideoIE
|
from .pearvideo import PearVideoIE
|
||||||
@@ -1301,6 +1404,7 @@ from .pluralsight import (
|
|||||||
PluralsightIE,
|
PluralsightIE,
|
||||||
PluralsightCourseIE,
|
PluralsightCourseIE,
|
||||||
)
|
)
|
||||||
|
from .podbayfm import PodbayFMIE, PodbayFMChannelIE
|
||||||
from .podchaser import PodchaserIE
|
from .podchaser import PodchaserIE
|
||||||
from .podomatic import PodomaticIE
|
from .podomatic import PodomaticIE
|
||||||
from .pokemon import (
|
from .pokemon import (
|
||||||
@@ -1314,6 +1418,8 @@ from .pokergo import (
|
|||||||
from .polsatgo import PolsatGoIE
|
from .polsatgo import PolsatGoIE
|
||||||
from .polskieradio import (
|
from .polskieradio import (
|
||||||
PolskieRadioIE,
|
PolskieRadioIE,
|
||||||
|
PolskieRadioLegacyIE,
|
||||||
|
PolskieRadioAuditionIE,
|
||||||
PolskieRadioCategoryIE,
|
PolskieRadioCategoryIE,
|
||||||
PolskieRadioPlayerIE,
|
PolskieRadioPlayerIE,
|
||||||
PolskieRadioPodcastIE,
|
PolskieRadioPodcastIE,
|
||||||
@@ -1341,6 +1447,7 @@ from .puhutv import (
|
|||||||
PuhuTVIE,
|
PuhuTVIE,
|
||||||
PuhuTVSerieIE,
|
PuhuTVSerieIE,
|
||||||
)
|
)
|
||||||
|
from .prankcast import PrankCastIE
|
||||||
from .premiershiprugby import PremiershipRugbyIE
|
from .premiershiprugby import PremiershipRugbyIE
|
||||||
from .presstv import PressTVIE
|
from .presstv import PressTVIE
|
||||||
from .projectveritas import ProjectVeritasIE
|
from .projectveritas import ProjectVeritasIE
|
||||||
@@ -1354,6 +1461,7 @@ from .prx import (
|
|||||||
)
|
)
|
||||||
from .puls4 import Puls4IE
|
from .puls4 import Puls4IE
|
||||||
from .pyvideo import PyvideoIE
|
from .pyvideo import PyvideoIE
|
||||||
|
from .qingting import QingTingIE
|
||||||
from .qqmusic import (
|
from .qqmusic import (
|
||||||
QQMusicIE,
|
QQMusicIE,
|
||||||
QQMusicSingerIE,
|
QQMusicSingerIE,
|
||||||
@@ -1391,6 +1499,8 @@ from .rai import (
|
|||||||
RaiPlaySoundIE,
|
RaiPlaySoundIE,
|
||||||
RaiPlaySoundLiveIE,
|
RaiPlaySoundLiveIE,
|
||||||
RaiPlaySoundPlaylistIE,
|
RaiPlaySoundPlaylistIE,
|
||||||
|
RaiNewsIE,
|
||||||
|
RaiSudtirolIE,
|
||||||
RaiIE,
|
RaiIE,
|
||||||
)
|
)
|
||||||
from .raywenderlich import (
|
from .raywenderlich import (
|
||||||
@@ -1409,6 +1519,7 @@ from .rcti import (
|
|||||||
RCTIPlusTVIE,
|
RCTIPlusTVIE,
|
||||||
)
|
)
|
||||||
from .rds import RDSIE
|
from .rds import RDSIE
|
||||||
|
from .redbee import ParliamentLiveUKIE, RTBFIE
|
||||||
from .redbulltv import (
|
from .redbulltv import (
|
||||||
RedBullTVIE,
|
RedBullTVIE,
|
||||||
RedBullEmbedIE,
|
RedBullEmbedIE,
|
||||||
@@ -1442,7 +1553,6 @@ from .rokfin import (
|
|||||||
from .roosterteeth import RoosterTeethIE, RoosterTeethSeriesIE
|
from .roosterteeth import RoosterTeethIE, RoosterTeethSeriesIE
|
||||||
from .rottentomatoes import RottenTomatoesIE
|
from .rottentomatoes import RottenTomatoesIE
|
||||||
from .rozhlas import RozhlasIE
|
from .rozhlas import RozhlasIE
|
||||||
from .rtbf import RTBFIE
|
|
||||||
from .rte import RteIE, RteRadioIE
|
from .rte import RteIE, RteRadioIE
|
||||||
from .rtlnl import (
|
from .rtlnl import (
|
||||||
RtlNlIE,
|
RtlNlIE,
|
||||||
@@ -1479,6 +1589,7 @@ from .ruhd import RUHDIE
|
|||||||
from .rule34video import Rule34VideoIE
|
from .rule34video import Rule34VideoIE
|
||||||
from .rumble import (
|
from .rumble import (
|
||||||
RumbleEmbedIE,
|
RumbleEmbedIE,
|
||||||
|
RumbleIE,
|
||||||
RumbleChannelIE,
|
RumbleChannelIE,
|
||||||
)
|
)
|
||||||
from .rutube import (
|
from .rutube import (
|
||||||
@@ -1519,7 +1630,9 @@ from .samplefocus import SampleFocusIE
|
|||||||
from .sapo import SapoIE
|
from .sapo import SapoIE
|
||||||
from .savefrom import SaveFromIE
|
from .savefrom import SaveFromIE
|
||||||
from .sbs import SBSIE
|
from .sbs import SBSIE
|
||||||
|
from .screen9 import Screen9IE
|
||||||
from .screencast import ScreencastIE
|
from .screencast import ScreencastIE
|
||||||
|
from .screencastify import ScreencastifyIE
|
||||||
from .screencastomatic import ScreencastOMaticIE
|
from .screencastomatic import ScreencastOMaticIE
|
||||||
from .scrippsnetworks import (
|
from .scrippsnetworks import (
|
||||||
ScrippsNetworksWatchIE,
|
ScrippsNetworksWatchIE,
|
||||||
@@ -1548,6 +1661,8 @@ from .shared import (
|
|||||||
SharedIE,
|
SharedIE,
|
||||||
VivoIE,
|
VivoIE,
|
||||||
)
|
)
|
||||||
|
from .sharevideos import ShareVideosEmbedIE
|
||||||
|
from .sibnet import SibnetEmbedIE
|
||||||
from .shemaroome import ShemarooMeIE
|
from .shemaroome import ShemarooMeIE
|
||||||
from .showroomlive import ShowRoomLiveIE
|
from .showroomlive import ShowRoomLiveIE
|
||||||
from .simplecast import (
|
from .simplecast import (
|
||||||
@@ -1563,7 +1678,6 @@ from .skyit import (
|
|||||||
SkyItVideoIE,
|
SkyItVideoIE,
|
||||||
SkyItVideoLiveIE,
|
SkyItVideoLiveIE,
|
||||||
SkyItIE,
|
SkyItIE,
|
||||||
SkyItAcademyIE,
|
|
||||||
SkyItArteIE,
|
SkyItArteIE,
|
||||||
CieloTVItIE,
|
CieloTVItIE,
|
||||||
TV8ItIE,
|
TV8ItIE,
|
||||||
@@ -1583,6 +1697,7 @@ from .sky import (
|
|||||||
from .slideshare import SlideshareIE
|
from .slideshare import SlideshareIE
|
||||||
from .slideslive import SlidesLiveIE
|
from .slideslive import SlidesLiveIE
|
||||||
from .slutload import SlutloadIE
|
from .slutload import SlutloadIE
|
||||||
|
from .smotrim import SmotrimIE
|
||||||
from .snotr import SnotrIE
|
from .snotr import SnotrIE
|
||||||
from .sohu import SohuIE
|
from .sohu import SohuIE
|
||||||
from .sonyliv import (
|
from .sonyliv import (
|
||||||
@@ -1595,6 +1710,7 @@ from .soundcloud import (
|
|||||||
SoundcloudSetIE,
|
SoundcloudSetIE,
|
||||||
SoundcloudRelatedIE,
|
SoundcloudRelatedIE,
|
||||||
SoundcloudUserIE,
|
SoundcloudUserIE,
|
||||||
|
SoundcloudUserPermalinkIE,
|
||||||
SoundcloudTrackStationIE,
|
SoundcloudTrackStationIE,
|
||||||
SoundcloudPlaylistIE,
|
SoundcloudPlaylistIE,
|
||||||
SoundcloudSearchIE,
|
SoundcloudSearchIE,
|
||||||
@@ -1682,6 +1798,7 @@ from .svt import (
|
|||||||
SVTPlayIE,
|
SVTPlayIE,
|
||||||
SVTSeriesIE,
|
SVTSeriesIE,
|
||||||
)
|
)
|
||||||
|
from .swearnet import SwearnetEpisodeIE
|
||||||
from .swrmediathek import SWRMediathekIE
|
from .swrmediathek import SWRMediathekIE
|
||||||
from .syvdk import SYVDKIE
|
from .syvdk import SYVDKIE
|
||||||
from .syfy import SyfyIE
|
from .syfy import SyfyIE
|
||||||
@@ -1725,6 +1842,15 @@ from .telequebec import (
|
|||||||
)
|
)
|
||||||
from .teletask import TeleTaskIE
|
from .teletask import TeleTaskIE
|
||||||
from .telewebion import TelewebionIE
|
from .telewebion import TelewebionIE
|
||||||
|
from .tempo import TempoIE
|
||||||
|
from .tencent import (
|
||||||
|
IflixEpisodeIE,
|
||||||
|
IflixSeriesIE,
|
||||||
|
VQQSeriesIE,
|
||||||
|
VQQVideoIE,
|
||||||
|
WeTvEpisodeIE,
|
||||||
|
WeTvSeriesIE,
|
||||||
|
)
|
||||||
from .tennistv import TennisTVIE
|
from .tennistv import TennisTVIE
|
||||||
from .tenplay import TenPlayIE
|
from .tenplay import TenPlayIE
|
||||||
from .testurl import TestURLIE
|
from .testurl import TestURLIE
|
||||||
@@ -1746,6 +1872,11 @@ from .theweatherchannel import TheWeatherChannelIE
|
|||||||
from .thisamericanlife import ThisAmericanLifeIE
|
from .thisamericanlife import ThisAmericanLifeIE
|
||||||
from .thisav import ThisAVIE
|
from .thisav import ThisAVIE
|
||||||
from .thisoldhouse import ThisOldHouseIE
|
from .thisoldhouse import ThisOldHouseIE
|
||||||
|
from .thisvid import (
|
||||||
|
ThisVidIE,
|
||||||
|
ThisVidMemberIE,
|
||||||
|
ThisVidPlaylistIE,
|
||||||
|
)
|
||||||
from .threespeak import (
|
from .threespeak import (
|
||||||
ThreeSpeakIE,
|
ThreeSpeakIE,
|
||||||
ThreeSpeakUserIE,
|
ThreeSpeakUserIE,
|
||||||
@@ -1784,6 +1915,10 @@ from .toongoggles import ToonGogglesIE
|
|||||||
from .toutv import TouTvIE
|
from .toutv import TouTvIE
|
||||||
from .toypics import ToypicsUserIE, ToypicsIE
|
from .toypics import ToypicsUserIE, ToypicsIE
|
||||||
from .traileraddict import TrailerAddictIE
|
from .traileraddict import TrailerAddictIE
|
||||||
|
from .triller import (
|
||||||
|
TrillerIE,
|
||||||
|
TrillerUserIE,
|
||||||
|
)
|
||||||
from .trilulilu import TriluliluIE
|
from .trilulilu import TriluliluIE
|
||||||
from .trovo import (
|
from .trovo import (
|
||||||
TrovoIE,
|
TrovoIE,
|
||||||
@@ -1791,8 +1926,10 @@ from .trovo import (
|
|||||||
TrovoChannelVodIE,
|
TrovoChannelVodIE,
|
||||||
TrovoChannelClipIE,
|
TrovoChannelClipIE,
|
||||||
)
|
)
|
||||||
|
from .trtcocuk import TrtCocukVideoIE
|
||||||
from .trueid import TrueIDIE
|
from .trueid import TrueIDIE
|
||||||
from .trunews import TruNewsIE
|
from .trunews import TruNewsIE
|
||||||
|
from .truth import TruthIE
|
||||||
from .trutv import TruTVIE
|
from .trutv import TruTVIE
|
||||||
from .tube8 import Tube8IE
|
from .tube8 import Tube8IE
|
||||||
from .tubetugraz import TubeTuGrazIE, TubeTuGrazSeriesIE
|
from .tubetugraz import TubeTuGrazIE, TubeTuGrazSeriesIE
|
||||||
@@ -1816,6 +1953,9 @@ from .tv2 import (
|
|||||||
KatsomoIE,
|
KatsomoIE,
|
||||||
MTVUutisetArticleIE,
|
MTVUutisetArticleIE,
|
||||||
)
|
)
|
||||||
|
from .tv24ua import (
|
||||||
|
TV24UAVideoIE,
|
||||||
|
)
|
||||||
from .tv2dk import (
|
from .tv2dk import (
|
||||||
TV2DKIE,
|
TV2DKIE,
|
||||||
TV2DKBornholmPlayIE,
|
TV2DKBornholmPlayIE,
|
||||||
@@ -1865,7 +2005,8 @@ from .tvp import (
|
|||||||
TVPEmbedIE,
|
TVPEmbedIE,
|
||||||
TVPIE,
|
TVPIE,
|
||||||
TVPStreamIE,
|
TVPStreamIE,
|
||||||
TVPWebsiteIE,
|
TVPVODSeriesIE,
|
||||||
|
TVPVODVideoIE,
|
||||||
)
|
)
|
||||||
from .tvplay import (
|
from .tvplay import (
|
||||||
TVPlayIE,
|
TVPlayIE,
|
||||||
@@ -1896,6 +2037,7 @@ from .twitter import (
|
|||||||
TwitterIE,
|
TwitterIE,
|
||||||
TwitterAmplifyIE,
|
TwitterAmplifyIE,
|
||||||
TwitterBroadcastIE,
|
TwitterBroadcastIE,
|
||||||
|
TwitterSpacesIE,
|
||||||
TwitterShortenerIE,
|
TwitterShortenerIE,
|
||||||
)
|
)
|
||||||
from .udemy import (
|
from .udemy import (
|
||||||
@@ -1918,6 +2060,8 @@ from .drooble import DroobleIE
|
|||||||
from .umg import UMGDeIE
|
from .umg import UMGDeIE
|
||||||
from .unistra import UnistraIE
|
from .unistra import UnistraIE
|
||||||
from .unity import UnityIE
|
from .unity import UnityIE
|
||||||
|
from .unscripted import UnscriptedNewsVideoIE
|
||||||
|
from .unsupported import KnownDRMIE, KnownPiracyIE
|
||||||
from .uol import UOLIE
|
from .uol import UOLIE
|
||||||
from .uplynk import (
|
from .uplynk import (
|
||||||
UplynkIE,
|
UplynkIE,
|
||||||
@@ -1937,7 +2081,10 @@ from .varzesh3 import Varzesh3IE
|
|||||||
from .vbox7 import Vbox7IE
|
from .vbox7 import Vbox7IE
|
||||||
from .veehd import VeeHDIE
|
from .veehd import VeeHDIE
|
||||||
from .veo import VeoIE
|
from .veo import VeoIE
|
||||||
from .veoh import VeohIE
|
from .veoh import (
|
||||||
|
VeohIE,
|
||||||
|
VeohUserIE
|
||||||
|
)
|
||||||
from .vesti import VestiIE
|
from .vesti import VestiIE
|
||||||
from .vevo import (
|
from .vevo import (
|
||||||
VevoIE,
|
VevoIE,
|
||||||
@@ -1963,6 +2110,13 @@ from .videocampus_sachsen import (
|
|||||||
)
|
)
|
||||||
from .videodetective import VideoDetectiveIE
|
from .videodetective import VideoDetectiveIE
|
||||||
from .videofyme import VideofyMeIE
|
from .videofyme import VideofyMeIE
|
||||||
|
from .videoken import (
|
||||||
|
VideoKenIE,
|
||||||
|
VideoKenPlayerIE,
|
||||||
|
VideoKenPlaylistIE,
|
||||||
|
VideoKenCategoryIE,
|
||||||
|
VideoKenTopicIE,
|
||||||
|
)
|
||||||
from .videomore import (
|
from .videomore import (
|
||||||
VideomoreIE,
|
VideomoreIE,
|
||||||
VideomoreVideoIE,
|
VideomoreVideoIE,
|
||||||
@@ -1975,7 +2129,6 @@ from .vidio import (
|
|||||||
VidioLiveIE
|
VidioLiveIE
|
||||||
)
|
)
|
||||||
from .vidlii import VidLiiIE
|
from .vidlii import VidLiiIE
|
||||||
from .vier import VierIE, VierVideosIE
|
|
||||||
from .viewlift import (
|
from .viewlift import (
|
||||||
ViewLiftIE,
|
ViewLiftIE,
|
||||||
ViewLiftEmbedIE,
|
ViewLiftEmbedIE,
|
||||||
@@ -1988,6 +2141,7 @@ from .vimeo import (
|
|||||||
VimeoGroupsIE,
|
VimeoGroupsIE,
|
||||||
VimeoLikesIE,
|
VimeoLikesIE,
|
||||||
VimeoOndemandIE,
|
VimeoOndemandIE,
|
||||||
|
VimeoProIE,
|
||||||
VimeoReviewIE,
|
VimeoReviewIE,
|
||||||
VimeoUserIE,
|
VimeoUserIE,
|
||||||
VimeoWatchLaterIE,
|
VimeoWatchLaterIE,
|
||||||
@@ -2075,6 +2229,7 @@ from .wdr import (
|
|||||||
WDRElefantIE,
|
WDRElefantIE,
|
||||||
WDRMobileIE,
|
WDRMobileIE,
|
||||||
)
|
)
|
||||||
|
from .webcamerapl import WebcameraplIE
|
||||||
from .webcaster import (
|
from .webcaster import (
|
||||||
WebcasterIE,
|
WebcasterIE,
|
||||||
WebcasterFeedIE,
|
WebcasterFeedIE,
|
||||||
@@ -2088,7 +2243,6 @@ from .weibo import (
|
|||||||
WeiboMobileIE
|
WeiboMobileIE
|
||||||
)
|
)
|
||||||
from .weiqitv import WeiqiTVIE
|
from .weiqitv import WeiqiTVIE
|
||||||
from .wetv import WeTvEpisodeIE, WeTvSeriesIE
|
|
||||||
from .wikimedia import WikimediaIE
|
from .wikimedia import WikimediaIE
|
||||||
from .willow import WillowIE
|
from .willow import WillowIE
|
||||||
from .wimtv import WimTVIE
|
from .wimtv import WimTVIE
|
||||||
@@ -2096,6 +2250,11 @@ from .whowatch import WhoWatchIE
|
|||||||
from .wistia import (
|
from .wistia import (
|
||||||
WistiaIE,
|
WistiaIE,
|
||||||
WistiaPlaylistIE,
|
WistiaPlaylistIE,
|
||||||
|
WistiaChannelIE,
|
||||||
|
)
|
||||||
|
from .wordpress import (
|
||||||
|
WordpressPlaylistEmbedIE,
|
||||||
|
WordpressMiniAudioPlayerEmbedIE,
|
||||||
)
|
)
|
||||||
from .worldstarhiphop import WorldStarHipHopIE
|
from .worldstarhiphop import WorldStarHipHopIE
|
||||||
from .wppilot import (
|
from .wppilot import (
|
||||||
@@ -2115,12 +2274,6 @@ from .xhamster import (
|
|||||||
XHamsterEmbedIE,
|
XHamsterEmbedIE,
|
||||||
XHamsterUserIE,
|
XHamsterUserIE,
|
||||||
)
|
)
|
||||||
from .xiami import (
|
|
||||||
XiamiSongIE,
|
|
||||||
XiamiAlbumIE,
|
|
||||||
XiamiArtistIE,
|
|
||||||
XiamiCollectionIE
|
|
||||||
)
|
|
||||||
from .ximalaya import (
|
from .ximalaya import (
|
||||||
XimalayaIE,
|
XimalayaIE,
|
||||||
XimalayaAlbumIE
|
XimalayaAlbumIE
|
||||||
@@ -2157,6 +2310,7 @@ from .yandexvideo import (
|
|||||||
from .yapfiles import YapFilesIE
|
from .yapfiles import YapFilesIE
|
||||||
from .yesjapan import YesJapanIE
|
from .yesjapan import YesJapanIE
|
||||||
from .yinyuetai import YinYueTaiIE
|
from .yinyuetai import YinYueTaiIE
|
||||||
|
from .yle_areena import YleAreenaIE
|
||||||
from .ynet import YnetIE
|
from .ynet import YnetIE
|
||||||
from .youjizz import YouJizzIE
|
from .youjizz import YouJizzIE
|
||||||
from .youku import (
|
from .youku import (
|
||||||
@@ -2171,42 +2325,44 @@ from .younow import (
|
|||||||
from .youporn import YouPornIE
|
from .youporn import YouPornIE
|
||||||
from .yourporn import YourPornIE
|
from .yourporn import YourPornIE
|
||||||
from .yourupload import YourUploadIE
|
from .yourupload import YourUploadIE
|
||||||
from .youtube import (
|
|
||||||
YoutubeIE,
|
|
||||||
YoutubeClipIE,
|
|
||||||
YoutubeFavouritesIE,
|
|
||||||
YoutubeNotificationsIE,
|
|
||||||
YoutubeHistoryIE,
|
|
||||||
YoutubeTabIE,
|
|
||||||
YoutubeLivestreamEmbedIE,
|
|
||||||
YoutubePlaylistIE,
|
|
||||||
YoutubeRecommendedIE,
|
|
||||||
YoutubeSearchDateIE,
|
|
||||||
YoutubeSearchIE,
|
|
||||||
YoutubeSearchURLIE,
|
|
||||||
YoutubeMusicSearchURLIE,
|
|
||||||
YoutubeSubscriptionsIE,
|
|
||||||
YoutubeStoriesIE,
|
|
||||||
YoutubeTruncatedIDIE,
|
|
||||||
YoutubeTruncatedURLIE,
|
|
||||||
YoutubeYtBeIE,
|
|
||||||
YoutubeYtUserIE,
|
|
||||||
YoutubeWatchLaterIE,
|
|
||||||
)
|
|
||||||
from .zapiks import ZapiksIE
|
from .zapiks import ZapiksIE
|
||||||
from .zattoo import (
|
from .zattoo import (
|
||||||
BBVTVIE,
|
BBVTVIE,
|
||||||
|
BBVTVLiveIE,
|
||||||
|
BBVTVRecordingsIE,
|
||||||
EinsUndEinsTVIE,
|
EinsUndEinsTVIE,
|
||||||
|
EinsUndEinsTVLiveIE,
|
||||||
|
EinsUndEinsTVRecordingsIE,
|
||||||
EWETVIE,
|
EWETVIE,
|
||||||
|
EWETVLiveIE,
|
||||||
|
EWETVRecordingsIE,
|
||||||
GlattvisionTVIE,
|
GlattvisionTVIE,
|
||||||
|
GlattvisionTVLiveIE,
|
||||||
|
GlattvisionTVRecordingsIE,
|
||||||
MNetTVIE,
|
MNetTVIE,
|
||||||
NetPlusIE,
|
MNetTVLiveIE,
|
||||||
|
MNetTVRecordingsIE,
|
||||||
|
NetPlusTVIE,
|
||||||
|
NetPlusTVLiveIE,
|
||||||
|
NetPlusTVRecordingsIE,
|
||||||
OsnatelTVIE,
|
OsnatelTVIE,
|
||||||
|
OsnatelTVLiveIE,
|
||||||
|
OsnatelTVRecordingsIE,
|
||||||
QuantumTVIE,
|
QuantumTVIE,
|
||||||
|
QuantumTVLiveIE,
|
||||||
|
QuantumTVRecordingsIE,
|
||||||
SaltTVIE,
|
SaltTVIE,
|
||||||
|
SaltTVLiveIE,
|
||||||
|
SaltTVRecordingsIE,
|
||||||
SAKTVIE,
|
SAKTVIE,
|
||||||
|
SAKTVLiveIE,
|
||||||
|
SAKTVRecordingsIE,
|
||||||
VTXTVIE,
|
VTXTVIE,
|
||||||
|
VTXTVLiveIE,
|
||||||
|
VTXTVRecordingsIE,
|
||||||
WalyTVIE,
|
WalyTVIE,
|
||||||
|
WalyTVLiveIE,
|
||||||
|
WalyTVRecordingsIE,
|
||||||
ZattooIE,
|
ZattooIE,
|
||||||
ZattooLiveIE,
|
ZattooLiveIE,
|
||||||
ZattooMoviesIE,
|
ZattooMoviesIE,
|
||||||
@@ -2217,6 +2373,7 @@ from .zee5 import (
|
|||||||
Zee5IE,
|
Zee5IE,
|
||||||
Zee5SeriesIE,
|
Zee5SeriesIE,
|
||||||
)
|
)
|
||||||
|
from .zeenews import ZeeNewsIE
|
||||||
from .zhihu import ZhihuIE
|
from .zhihu import ZhihuIE
|
||||||
from .zingmp3 import (
|
from .zingmp3 import (
|
||||||
ZingMp3IE,
|
ZingMp3IE,
|
||||||
|
|||||||
@@ -155,8 +155,6 @@ class ABCIE(InfoExtractor):
|
|||||||
'format_id': format_id
|
'format_id': format_id
|
||||||
})
|
})
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': self._og_search_title(webpage),
|
'title': self._og_search_title(webpage),
|
||||||
@@ -221,7 +219,6 @@ class ABCIViewIE(InfoExtractor):
|
|||||||
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
|
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
|
||||||
if formats:
|
if formats:
|
||||||
break
|
break
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
src_vtt = stream.get('captions', {}).get('src-vtt')
|
src_vtt = stream.get('captions', {}).get('src-vtt')
|
||||||
|
|||||||
@@ -78,7 +78,6 @@ class ABCOTVSIE(InfoExtractor):
|
|||||||
'url': mp4_url,
|
'url': mp4_url,
|
||||||
'width': 640,
|
'width': 640,
|
||||||
})
|
})
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
image = video.get('image') or {}
|
image = video.get('image') or {}
|
||||||
|
|
||||||
@@ -119,7 +118,6 @@ class ABCOTVSClipsIE(InfoExtractor):
|
|||||||
title = video_data['title']
|
title = video_data['title']
|
||||||
formats = self._extract_m3u8_formats(
|
formats = self._extract_m3u8_formats(
|
||||||
video_data['videoURL'].split('?')[0], video_id, 'mp4')
|
video_data['videoURL'].split('?')[0], video_id, 'mp4')
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import base64
|
import base64
|
||||||
import binascii
|
import binascii
|
||||||
|
import functools
|
||||||
import hashlib
|
import hashlib
|
||||||
import hmac
|
import hmac
|
||||||
import io
|
import io
|
||||||
@@ -20,11 +21,11 @@ from ..utils import (
|
|||||||
decode_base_n,
|
decode_base_n,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
|
OnDemandPagedList,
|
||||||
request_to_url,
|
request_to_url,
|
||||||
time_seconds,
|
time_seconds,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
urljoin,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# NOTE: network handler related code is temporary thing until network stack overhaul PRs are merged (#2861/#2862)
|
# NOTE: network handler related code is temporary thing until network stack overhaul PRs are merged (#2861/#2862)
|
||||||
@@ -145,17 +146,106 @@ class AbemaLicenseHandler(urllib.request.BaseHandler):
|
|||||||
|
|
||||||
|
|
||||||
class AbemaTVBaseIE(InfoExtractor):
|
class AbemaTVBaseIE(InfoExtractor):
|
||||||
|
_USERTOKEN = None
|
||||||
|
_DEVICE_ID = None
|
||||||
|
_MEDIATOKEN = None
|
||||||
|
|
||||||
|
_SECRETKEY = b'v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9BRbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$k9cD=3TxwWe86!x#Zyhe'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _generate_aks(cls, deviceid):
|
||||||
|
deviceid = deviceid.encode('utf-8')
|
||||||
|
# add 1 hour and then drop minute and secs
|
||||||
|
ts_1hour = int((time_seconds(hours=9) // 3600 + 1) * 3600)
|
||||||
|
time_struct = time.gmtime(ts_1hour)
|
||||||
|
ts_1hour_str = str(ts_1hour).encode('utf-8')
|
||||||
|
|
||||||
|
tmp = None
|
||||||
|
|
||||||
|
def mix_once(nonce):
|
||||||
|
nonlocal tmp
|
||||||
|
h = hmac.new(cls._SECRETKEY, digestmod=hashlib.sha256)
|
||||||
|
h.update(nonce)
|
||||||
|
tmp = h.digest()
|
||||||
|
|
||||||
|
def mix_tmp(count):
|
||||||
|
nonlocal tmp
|
||||||
|
for i in range(count):
|
||||||
|
mix_once(tmp)
|
||||||
|
|
||||||
|
def mix_twist(nonce):
|
||||||
|
nonlocal tmp
|
||||||
|
mix_once(base64.urlsafe_b64encode(tmp).rstrip(b'=') + nonce)
|
||||||
|
|
||||||
|
mix_once(cls._SECRETKEY)
|
||||||
|
mix_tmp(time_struct.tm_mon)
|
||||||
|
mix_twist(deviceid)
|
||||||
|
mix_tmp(time_struct.tm_mday % 5)
|
||||||
|
mix_twist(ts_1hour_str)
|
||||||
|
mix_tmp(time_struct.tm_hour % 5)
|
||||||
|
|
||||||
|
return base64.urlsafe_b64encode(tmp).rstrip(b'=').decode('utf-8')
|
||||||
|
|
||||||
|
def _get_device_token(self):
|
||||||
|
if self._USERTOKEN:
|
||||||
|
return self._USERTOKEN
|
||||||
|
|
||||||
|
AbemaTVBaseIE._DEVICE_ID = str(uuid.uuid4())
|
||||||
|
aks = self._generate_aks(self._DEVICE_ID)
|
||||||
|
user_data = self._download_json(
|
||||||
|
'https://api.abema.io/v1/users', None, note='Authorizing',
|
||||||
|
data=json.dumps({
|
||||||
|
'deviceId': self._DEVICE_ID,
|
||||||
|
'applicationKeySecret': aks,
|
||||||
|
}).encode('utf-8'),
|
||||||
|
headers={
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
})
|
||||||
|
AbemaTVBaseIE._USERTOKEN = user_data['token']
|
||||||
|
|
||||||
|
# don't allow adding it 2 times or more, though it's guarded
|
||||||
|
remove_opener(self._downloader, AbemaLicenseHandler)
|
||||||
|
add_opener(self._downloader, AbemaLicenseHandler(self))
|
||||||
|
|
||||||
|
return self._USERTOKEN
|
||||||
|
|
||||||
|
def _get_media_token(self, invalidate=False, to_show=True):
|
||||||
|
if not invalidate and self._MEDIATOKEN:
|
||||||
|
return self._MEDIATOKEN
|
||||||
|
|
||||||
|
AbemaTVBaseIE._MEDIATOKEN = self._download_json(
|
||||||
|
'https://api.abema.io/v1/media/token', None, note='Fetching media token' if to_show else False,
|
||||||
|
query={
|
||||||
|
'osName': 'android',
|
||||||
|
'osVersion': '6.0.1',
|
||||||
|
'osLang': 'ja_JP',
|
||||||
|
'osTimezone': 'Asia/Tokyo',
|
||||||
|
'appId': 'tv.abema',
|
||||||
|
'appVersion': '3.27.1'
|
||||||
|
}, headers={
|
||||||
|
'Authorization': f'bearer {self._get_device_token()}',
|
||||||
|
})['token']
|
||||||
|
|
||||||
|
return self._MEDIATOKEN
|
||||||
|
|
||||||
|
def _call_api(self, endpoint, video_id, query=None, note='Downloading JSON metadata'):
|
||||||
|
return self._download_json(
|
||||||
|
f'https://api.abema.io/{endpoint}', video_id, query=query or {},
|
||||||
|
note=note,
|
||||||
|
headers={
|
||||||
|
'Authorization': f'bearer {self._get_device_token()}',
|
||||||
|
})
|
||||||
|
|
||||||
def _extract_breadcrumb_list(self, webpage, video_id):
|
def _extract_breadcrumb_list(self, webpage, video_id):
|
||||||
for jld in re.finditer(
|
for jld in re.finditer(
|
||||||
r'(?is)</span></li></ul><script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
|
r'(?is)</span></li></ul><script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
|
||||||
webpage):
|
webpage):
|
||||||
jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False)
|
jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False)
|
||||||
if jsonld:
|
if traverse_obj(jsonld, '@type') != 'BreadcrumbList':
|
||||||
if jsonld.get('@type') != 'BreadcrumbList':
|
continue
|
||||||
continue
|
items = traverse_obj(jsonld, ('itemListElement', ..., 'name'))
|
||||||
trav = traverse_obj(jsonld, ('itemListElement', ..., 'name'))
|
if items:
|
||||||
if trav:
|
return items
|
||||||
return trav
|
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
@@ -207,87 +297,7 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
},
|
},
|
||||||
'skip': 'Not supported until yt-dlp implements native live downloader OR AbemaTV can start a local HTTP server',
|
'skip': 'Not supported until yt-dlp implements native live downloader OR AbemaTV can start a local HTTP server',
|
||||||
}]
|
}]
|
||||||
_USERTOKEN = None
|
|
||||||
_DEVICE_ID = None
|
|
||||||
_TIMETABLE = None
|
_TIMETABLE = None
|
||||||
_MEDIATOKEN = None
|
|
||||||
|
|
||||||
_SECRETKEY = b'v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9BRbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$k9cD=3TxwWe86!x#Zyhe'
|
|
||||||
|
|
||||||
def _generate_aks(self, deviceid):
|
|
||||||
deviceid = deviceid.encode('utf-8')
|
|
||||||
# add 1 hour and then drop minute and secs
|
|
||||||
ts_1hour = int((time_seconds(hours=9) // 3600 + 1) * 3600)
|
|
||||||
time_struct = time.gmtime(ts_1hour)
|
|
||||||
ts_1hour_str = str(ts_1hour).encode('utf-8')
|
|
||||||
|
|
||||||
tmp = None
|
|
||||||
|
|
||||||
def mix_once(nonce):
|
|
||||||
nonlocal tmp
|
|
||||||
h = hmac.new(self._SECRETKEY, digestmod=hashlib.sha256)
|
|
||||||
h.update(nonce)
|
|
||||||
tmp = h.digest()
|
|
||||||
|
|
||||||
def mix_tmp(count):
|
|
||||||
nonlocal tmp
|
|
||||||
for i in range(count):
|
|
||||||
mix_once(tmp)
|
|
||||||
|
|
||||||
def mix_twist(nonce):
|
|
||||||
nonlocal tmp
|
|
||||||
mix_once(base64.urlsafe_b64encode(tmp).rstrip(b'=') + nonce)
|
|
||||||
|
|
||||||
mix_once(self._SECRETKEY)
|
|
||||||
mix_tmp(time_struct.tm_mon)
|
|
||||||
mix_twist(deviceid)
|
|
||||||
mix_tmp(time_struct.tm_mday % 5)
|
|
||||||
mix_twist(ts_1hour_str)
|
|
||||||
mix_tmp(time_struct.tm_hour % 5)
|
|
||||||
|
|
||||||
return base64.urlsafe_b64encode(tmp).rstrip(b'=').decode('utf-8')
|
|
||||||
|
|
||||||
def _get_device_token(self):
|
|
||||||
if self._USERTOKEN:
|
|
||||||
return self._USERTOKEN
|
|
||||||
|
|
||||||
self._DEVICE_ID = str(uuid.uuid4())
|
|
||||||
aks = self._generate_aks(self._DEVICE_ID)
|
|
||||||
user_data = self._download_json(
|
|
||||||
'https://api.abema.io/v1/users', None, note='Authorizing',
|
|
||||||
data=json.dumps({
|
|
||||||
'deviceId': self._DEVICE_ID,
|
|
||||||
'applicationKeySecret': aks,
|
|
||||||
}).encode('utf-8'),
|
|
||||||
headers={
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
})
|
|
||||||
self._USERTOKEN = user_data['token']
|
|
||||||
|
|
||||||
# don't allow adding it 2 times or more, though it's guarded
|
|
||||||
remove_opener(self._downloader, AbemaLicenseHandler)
|
|
||||||
add_opener(self._downloader, AbemaLicenseHandler(self))
|
|
||||||
|
|
||||||
return self._USERTOKEN
|
|
||||||
|
|
||||||
def _get_media_token(self, invalidate=False, to_show=True):
|
|
||||||
if not invalidate and self._MEDIATOKEN:
|
|
||||||
return self._MEDIATOKEN
|
|
||||||
|
|
||||||
self._MEDIATOKEN = self._download_json(
|
|
||||||
'https://api.abema.io/v1/media/token', None, note='Fetching media token' if to_show else False,
|
|
||||||
query={
|
|
||||||
'osName': 'android',
|
|
||||||
'osVersion': '6.0.1',
|
|
||||||
'osLang': 'ja_JP',
|
|
||||||
'osTimezone': 'Asia/Tokyo',
|
|
||||||
'appId': 'tv.abema',
|
|
||||||
'appVersion': '3.27.1'
|
|
||||||
}, headers={
|
|
||||||
'Authorization': 'bearer ' + self._get_device_token()
|
|
||||||
})['token']
|
|
||||||
|
|
||||||
return self._MEDIATOKEN
|
|
||||||
|
|
||||||
def _perform_login(self, username, password):
|
def _perform_login(self, username, password):
|
||||||
if '@' in username: # don't strictly check if it's email address or not
|
if '@' in username: # don't strictly check if it's email address or not
|
||||||
@@ -301,13 +311,13 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
method: username,
|
method: username,
|
||||||
'password': password
|
'password': password
|
||||||
}).encode('utf-8'), headers={
|
}).encode('utf-8'), headers={
|
||||||
'Authorization': 'bearer ' + self._get_device_token(),
|
'Authorization': f'bearer {self._get_device_token()}',
|
||||||
'Origin': 'https://abema.tv',
|
'Origin': 'https://abema.tv',
|
||||||
'Referer': 'https://abema.tv/',
|
'Referer': 'https://abema.tv/',
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
})
|
})
|
||||||
|
|
||||||
self._USERTOKEN = login_response['token']
|
AbemaTVBaseIE._USERTOKEN = login_response['token']
|
||||||
self._get_media_token(True)
|
self._get_media_token(True)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -355,7 +365,7 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
# read breadcrumb on top of page
|
# read breadcrumb on top of page
|
||||||
breadcrumb = self._extract_breadcrumb_list(webpage, video_id)
|
breadcrumb = self._extract_breadcrumb_list(webpage, video_id)
|
||||||
if breadcrumb:
|
if breadcrumb:
|
||||||
# breadcrumb list translates to: (example is 1st test for this IE)
|
# breadcrumb list translates to: (e.g. 1st test for this IE)
|
||||||
# Home > Anime (genre) > Isekai Shokudo 2 (series name) > Episode 1 "Cheese cakes" "Morning again" (episode title)
|
# Home > Anime (genre) > Isekai Shokudo 2 (series name) > Episode 1 "Cheese cakes" "Morning again" (episode title)
|
||||||
# hence this works
|
# hence this works
|
||||||
info['series'] = breadcrumb[-2]
|
info['series'] = breadcrumb[-2]
|
||||||
@@ -442,6 +452,7 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
|
|
||||||
class AbemaTVTitleIE(AbemaTVBaseIE):
|
class AbemaTVTitleIE(AbemaTVBaseIE):
|
||||||
_VALID_URL = r'https?://abema\.tv/video/title/(?P<id>[^?/]+)'
|
_VALID_URL = r'https?://abema\.tv/video/title/(?P<id>[^?/]+)'
|
||||||
|
_PAGE_SIZE = 25
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://abema.tv/video/title/90-1597',
|
'url': 'https://abema.tv/video/title/90-1597',
|
||||||
@@ -457,18 +468,39 @@ class AbemaTVTitleIE(AbemaTVBaseIE):
|
|||||||
'title': '真心が届く~僕とスターのオフィス・ラブ!?~',
|
'title': '真心が届く~僕とスターのオフィス・ラブ!?~',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 16,
|
'playlist_mincount': 16,
|
||||||
|
}, {
|
||||||
|
'url': 'https://abema.tv/video/title/25-102',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '25-102',
|
||||||
|
'title': 'ソードアート・オンライン アリシゼーション',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 24,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
def _fetch_page(self, playlist_id, series_version, page):
|
||||||
|
programs = self._call_api(
|
||||||
|
f'v1/video/series/{playlist_id}/programs', playlist_id,
|
||||||
|
note=f'Downloading page {page + 1}',
|
||||||
|
query={
|
||||||
|
'seriesVersion': series_version,
|
||||||
|
'offset': str(page * self._PAGE_SIZE),
|
||||||
|
'order': 'seq',
|
||||||
|
'limit': str(self._PAGE_SIZE),
|
||||||
|
})
|
||||||
|
yield from (
|
||||||
|
self.url_result(f'https://abema.tv/video/episode/{x}')
|
||||||
|
for x in traverse_obj(programs, ('programs', ..., 'id'), default=[]))
|
||||||
|
|
||||||
|
def _entries(self, playlist_id, series_version):
|
||||||
|
return OnDemandPagedList(
|
||||||
|
functools.partial(self._fetch_page, playlist_id, series_version),
|
||||||
|
self._PAGE_SIZE)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
playlist_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
series_info = self._call_api(f'v1/video/series/{playlist_id}', playlist_id)
|
||||||
|
|
||||||
playlist_title, breadcrumb = None, self._extract_breadcrumb_list(webpage, video_id)
|
return self.playlist_result(
|
||||||
if breadcrumb:
|
self._entries(playlist_id, series_info['version']), playlist_id=playlist_id,
|
||||||
playlist_title = breadcrumb[-1]
|
playlist_title=series_info.get('title'),
|
||||||
|
playlist_description=series_info.get('content'))
|
||||||
playlist = [
|
|
||||||
self.url_result(urljoin('https://abema.tv/', mobj.group(1)))
|
|
||||||
for mobj in re.finditer(r'<li\s*class=".+?EpisodeList.+?"><a\s*href="(/[^"]+?)"', webpage)]
|
|
||||||
|
|
||||||
return self.playlist_result(playlist, playlist_title=playlist_title, playlist_id=video_id)
|
|
||||||
|
|||||||
@@ -27,7 +27,6 @@ class AcFunVideoBaseIE(InfoExtractor):
|
|||||||
**parse_codecs(video.get('codecs', ''))
|
**parse_codecs(video.get('codecs', ''))
|
||||||
})
|
})
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
@@ -84,7 +83,7 @@ class AcFunVideoIE(AcFunVideoBaseIE):
|
|||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
json_all = self._search_json(r'window.videoInfo\s*=\s*', webpage, 'videoInfo', video_id)
|
json_all = self._search_json(r'window.videoInfo\s*=', webpage, 'videoInfo', video_id)
|
||||||
|
|
||||||
title = json_all.get('title')
|
title = json_all.get('title')
|
||||||
video_list = json_all.get('videoList') or []
|
video_list = json_all.get('videoList') or []
|
||||||
@@ -161,10 +160,10 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
ac_idx = parse_qs(url).get('ac', [None])[-1]
|
ac_idx = parse_qs(url).get('ac', [None])[-1]
|
||||||
video_id = f'{video_id}{format_field(ac_idx, template="__%s")}'
|
video_id = f'{video_id}{format_field(ac_idx, None, "__%s")}'
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
json_bangumi_data = self._search_json(r'window.bangumiData\s*=\s*', webpage, 'bangumiData', video_id)
|
json_bangumi_data = self._search_json(r'window.bangumiData\s*=', webpage, 'bangumiData', video_id)
|
||||||
|
|
||||||
if ac_idx:
|
if ac_idx:
|
||||||
video_info = json_bangumi_data['hlVideoInfo']
|
video_info = json_bangumi_data['hlVideoInfo']
|
||||||
@@ -181,7 +180,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
|||||||
if v.get('id') == season_id), 1)
|
if v.get('id') == season_id), 1)
|
||||||
|
|
||||||
json_bangumi_list = self._search_json(
|
json_bangumi_list = self._search_json(
|
||||||
r'window\.bangumiList\s*=\s*', webpage, 'bangumiList', video_id, fatal=False)
|
r'window\.bangumiList\s*=', webpage, 'bangumiList', video_id, fatal=False)
|
||||||
video_internal_id = int_or_none(traverse_obj(json_bangumi_data, ('currentVideoInfo', 'id')))
|
video_internal_id = int_or_none(traverse_obj(json_bangumi_data, ('currentVideoInfo', 'id')))
|
||||||
episode_number = video_internal_id and next((
|
episode_number = video_internal_id and next((
|
||||||
idx for idx, v in enumerate(json_bangumi_list.get('items') or [], 1)
|
idx for idx, v in enumerate(json_bangumi_list.get('items') or [], 1)
|
||||||
|
|||||||
@@ -28,30 +28,34 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class ADNIE(InfoExtractor):
|
class ADNIE(InfoExtractor):
|
||||||
IE_DESC = 'Anime Digital Network'
|
IE_DESC = 'Animation Digital Network'
|
||||||
_VALID_URL = r'https?://(?:www\.)?animedigitalnetwork\.fr/video/[^/]+/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?(?:animation|anime)digitalnetwork\.fr/video/[^/]+/(?P<id>\d+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',
|
'url': 'https://animationdigitalnetwork.fr/video/fruits-basket/9841-episode-1-a-ce-soir',
|
||||||
'md5': '0319c99885ff5547565cacb4f3f9348d',
|
'md5': '1c9ef066ceb302c86f80c2b371615261',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '7778',
|
'id': '9841',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Blue Exorcist - Kyôto Saga - Episode 1',
|
'title': 'Fruits Basket - Episode 1',
|
||||||
'description': 'md5:2f7b5aa76edbc1a7a92cedcda8a528d5',
|
'description': 'md5:14be2f72c3c96809b0ca424b0097d336',
|
||||||
'series': 'Blue Exorcist - Kyôto Saga',
|
'series': 'Fruits Basket',
|
||||||
'duration': 1467,
|
'duration': 1437,
|
||||||
'release_date': '20170106',
|
'release_date': '20190405',
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'average_rating': float,
|
'average_rating': float,
|
||||||
'season_number': 2,
|
'season_number': 1,
|
||||||
'episode': 'Début des hostilités',
|
'episode': 'À ce soir !',
|
||||||
'episode_number': 1,
|
'episode_number': 1,
|
||||||
}
|
},
|
||||||
}
|
'skip': 'Only available in region (FR, ...)',
|
||||||
|
}, {
|
||||||
|
'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
_NETRC_MACHINE = 'animedigitalnetwork'
|
_NETRC_MACHINE = 'animationdigitalnetwork'
|
||||||
_BASE_URL = 'http://animedigitalnetwork.fr'
|
_BASE = 'animationdigitalnetwork.fr'
|
||||||
_API_BASE_URL = 'https://gw.api.animedigitalnetwork.fr/'
|
_API_BASE_URL = 'https://gw.api.' + _BASE + '/'
|
||||||
_PLAYER_BASE_URL = _API_BASE_URL + 'player/'
|
_PLAYER_BASE_URL = _API_BASE_URL + 'player/'
|
||||||
_HEADERS = {}
|
_HEADERS = {}
|
||||||
_LOGIN_ERR_MESSAGE = 'Unable to log in'
|
_LOGIN_ERR_MESSAGE = 'Unable to log in'
|
||||||
@@ -75,11 +79,11 @@ class ADNIE(InfoExtractor):
|
|||||||
if subtitle_location:
|
if subtitle_location:
|
||||||
enc_subtitles = self._download_webpage(
|
enc_subtitles = self._download_webpage(
|
||||||
subtitle_location, video_id, 'Downloading subtitles data',
|
subtitle_location, video_id, 'Downloading subtitles data',
|
||||||
fatal=False, headers={'Origin': 'https://animedigitalnetwork.fr'})
|
fatal=False, headers={'Origin': 'https://' + self._BASE})
|
||||||
if not enc_subtitles:
|
if not enc_subtitles:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
# http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
||||||
dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes(
|
dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes(
|
||||||
compat_b64decode(enc_subtitles[24:]),
|
compat_b64decode(enc_subtitles[24:]),
|
||||||
binascii.unhexlify(self._K + '7fac1178830cfe0c'),
|
binascii.unhexlify(self._K + '7fac1178830cfe0c'),
|
||||||
@@ -164,7 +168,7 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
}, data=b'')['token']
|
}, data=b'')['token']
|
||||||
|
|
||||||
links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link')
|
links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link')
|
||||||
self._K = ''.join([random.choice('0123456789abcdef') for _ in range(16)])
|
self._K = ''.join(random.choices('0123456789abcdef', k=16))
|
||||||
message = bytes_to_intlist(json.dumps({
|
message = bytes_to_intlist(json.dumps({
|
||||||
'k': self._K,
|
'k': self._K,
|
||||||
't': token,
|
't': token,
|
||||||
@@ -231,7 +235,6 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
for f in m3u8_formats:
|
for f in m3u8_formats:
|
||||||
f['language'] = 'fr'
|
f['language'] = 'fr'
|
||||||
formats.extend(m3u8_formats)
|
formats.extend(m3u8_formats)
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
video = (self._download_json(
|
video = (self._download_json(
|
||||||
self._API_BASE_URL + 'video/%s' % video_id, video_id,
|
self._API_BASE_URL + 'video/%s' % video_id, video_id,
|
||||||
|
|||||||
@@ -1344,10 +1344,15 @@ MSO_INFO = {
|
|||||||
'username_field': 'username',
|
'username_field': 'username',
|
||||||
'password_field': 'password',
|
'password_field': 'password',
|
||||||
},
|
},
|
||||||
|
'AlticeOne': {
|
||||||
|
'name': 'Optimum TV',
|
||||||
|
'username_field': 'j_username',
|
||||||
|
'password_field': 'j_password',
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class AdobePassIE(InfoExtractor):
|
class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should end with BaseIE/InfoExtractor
|
||||||
_SERVICE_PROVIDER_TEMPLATE = 'https://sp.auth.adobe.com/adobe-services/%s'
|
_SERVICE_PROVIDER_TEMPLATE = 'https://sp.auth.adobe.com/adobe-services/%s'
|
||||||
_USER_AGENT = 'Mozilla/5.0 (X11; Linux i686; rv:47.0) Gecko/20100101 Firefox/47.0'
|
_USER_AGENT = 'Mozilla/5.0 (X11; Linux i686; rv:47.0) Gecko/20100101 Firefox/47.0'
|
||||||
_MVPD_CACHE = 'ap-mvpd'
|
_MVPD_CACHE = 'ap-mvpd'
|
||||||
@@ -1705,7 +1710,7 @@ class AdobePassIE(InfoExtractor):
|
|||||||
mso_info.get('username_field', 'username'): username,
|
mso_info.get('username_field', 'username'): username,
|
||||||
mso_info.get('password_field', 'password'): password
|
mso_info.get('password_field', 'password'): password
|
||||||
}
|
}
|
||||||
if mso_id == 'Cablevision':
|
if mso_id in ('Cablevision', 'AlticeOne'):
|
||||||
form_data['_eventId_proceed'] = ''
|
form_data['_eventId_proceed'] = ''
|
||||||
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', form_data)
|
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', form_data)
|
||||||
if mso_id != 'Rogers':
|
if mso_id != 'Rogers':
|
||||||
|
|||||||
@@ -70,7 +70,6 @@ class AdobeTVBaseIE(InfoExtractor):
|
|||||||
})
|
})
|
||||||
s3_extracted = True
|
s3_extracted = True
|
||||||
formats.append(f)
|
formats.append(f)
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
@@ -232,6 +231,7 @@ class AdobeTVChannelIE(AdobeTVPlaylistBaseIE):
|
|||||||
class AdobeTVVideoIE(AdobeTVBaseIE):
|
class AdobeTVVideoIE(AdobeTVBaseIE):
|
||||||
IE_NAME = 'adobetv:video'
|
IE_NAME = 'adobetv:video'
|
||||||
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
|
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
|
||||||
|
_EMBED_REGEX = [r'<iframe[^>]+src=[\'"](?P<url>(?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]']
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
# From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners
|
# From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners
|
||||||
@@ -268,7 +268,6 @@ class AdobeTVVideoIE(AdobeTVBaseIE):
|
|||||||
'width': int_or_none(source.get('width') or None),
|
'width': int_or_none(source.get('width') or None),
|
||||||
'url': source_src,
|
'url': source_src,
|
||||||
})
|
})
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
# For both metadata and downloaded files the duration varies among
|
# For both metadata and downloaded files the duration varies among
|
||||||
# formats. I just pick the max one
|
# formats. I just pick the max one
|
||||||
|
|||||||
@@ -180,7 +180,6 @@ class AdultSwimIE(TurnerBaseIE):
|
|||||||
info['subtitles'].setdefault('en', []).append({
|
info['subtitles'].setdefault('en', []).append({
|
||||||
'url': asset_url,
|
'url': asset_url,
|
||||||
})
|
})
|
||||||
self._sort_formats(info['formats'])
|
|
||||||
|
|
||||||
return info
|
return info
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from ..utils import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class AENetworksBaseIE(ThePlatformIE):
|
class AENetworksBaseIE(ThePlatformIE): # XXX: Do not subclass from concrete IE
|
||||||
_BASE_URL_REGEX = r'''(?x)https?://
|
_BASE_URL_REGEX = r'''(?x)https?://
|
||||||
(?:(?:www|play|watch)\.)?
|
(?:(?:www|play|watch)\.)?
|
||||||
(?P<domain>
|
(?P<domain>
|
||||||
@@ -28,14 +28,17 @@ class AENetworksBaseIE(ThePlatformIE):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _extract_aen_smil(self, smil_url, video_id, auth=None):
|
def _extract_aen_smil(self, smil_url, video_id, auth=None):
|
||||||
query = {'mbr': 'true'}
|
query = {
|
||||||
|
'mbr': 'true',
|
||||||
|
'formats': 'M3U+none,MPEG-DASH+none,MPEG4,MP3',
|
||||||
|
}
|
||||||
if auth:
|
if auth:
|
||||||
query['auth'] = auth
|
query['auth'] = auth
|
||||||
TP_SMIL_QUERY = [{
|
TP_SMIL_QUERY = [{
|
||||||
'assetTypes': 'high_video_ak',
|
'assetTypes': 'high_video_ak',
|
||||||
'switch': 'hls_high_ak'
|
'switch': 'hls_high_ak',
|
||||||
}, {
|
}, {
|
||||||
'assetTypes': 'high_video_s3'
|
'assetTypes': 'high_video_s3',
|
||||||
}, {
|
}, {
|
||||||
'assetTypes': 'high_video_s3',
|
'assetTypes': 'high_video_s3',
|
||||||
'switch': 'hls_high_fastly',
|
'switch': 'hls_high_fastly',
|
||||||
@@ -59,7 +62,6 @@ class AENetworksBaseIE(ThePlatformIE):
|
|||||||
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
|
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
|
||||||
if last_e and not formats:
|
if last_e and not formats:
|
||||||
raise last_e
|
raise last_e
|
||||||
self._sort_formats(formats)
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
@@ -301,7 +303,6 @@ class HistoryTopicIE(AENetworksBaseIE):
|
|||||||
class HistoryPlayerIE(AENetworksBaseIE):
|
class HistoryPlayerIE(AENetworksBaseIE):
|
||||||
IE_NAME = 'history:player'
|
IE_NAME = 'history:player'
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:history|biography)\.com)/player/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:history|biography)\.com)/player/(?P<id>\d+)'
|
||||||
_TESTS = []
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
domain, video_id = self._match_valid_url(url).groups()
|
domain, video_id = self._match_valid_url(url).groups()
|
||||||
|
|||||||
40
yt_dlp/extractor/aeonco.py
Normal file
40
yt_dlp/extractor/aeonco.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
from .common import InfoExtractor
|
||||||
|
from .vimeo import VimeoIE
|
||||||
|
|
||||||
|
|
||||||
|
class AeonCoIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?aeon\.co/videos/(?P<id>[^/?]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://aeon.co/videos/raw-solar-storm-footage-is-the-punk-rock-antidote-to-sleek-james-webb-imagery',
|
||||||
|
'md5': 'e5884d80552c9b6ea8d268a258753362',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1284717',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Brilliant Noise',
|
||||||
|
'thumbnail': 'https://i.vimeocdn.com/video/21006315-1a1e49da8b07fd908384a982b4ba9ff0268c509a474576ebdf7b1392f4acae3b-d_960',
|
||||||
|
'uploader': 'Semiconductor',
|
||||||
|
'uploader_id': 'semiconductor',
|
||||||
|
'uploader_url': 'https://vimeo.com/semiconductor',
|
||||||
|
'duration': 348
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://aeon.co/videos/dazzling-timelapse-shows-how-microbes-spoil-our-food-and-sometimes-enrich-it',
|
||||||
|
'md5': '4e5f3dad9dbda0dbfa2da41a851e631e',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '728595228',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Wrought',
|
||||||
|
'thumbnail': 'https://i.vimeocdn.com/video/1484618528-c91452611f9a4e4497735a533da60d45b2fe472deb0c880f0afaab0cd2efb22a-d_1280',
|
||||||
|
'uploader': 'Biofilm Productions',
|
||||||
|
'uploader_id': 'user140352216',
|
||||||
|
'uploader_url': 'https://vimeo.com/user140352216',
|
||||||
|
'duration': 1344
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
vimeo_id = self._search_regex(r'hosterId":\s*"(?P<id>[0-9]+)', webpage, 'vimeo id')
|
||||||
|
vimeo_url = VimeoIE._smuggle_referrer(f'https://player.vimeo.com/video/{vimeo_id}', 'https://aeon.co')
|
||||||
|
return self.url_result(vimeo_url, VimeoIE)
|
||||||
@@ -338,7 +338,6 @@ class AfreecaTVIE(InfoExtractor):
|
|||||||
}]
|
}]
|
||||||
if not formats and not self.get_param('ignore_no_formats'):
|
if not formats and not self.get_param('ignore_no_formats'):
|
||||||
continue
|
continue
|
||||||
self._sort_formats(formats)
|
|
||||||
file_info = common_entry.copy()
|
file_info = common_entry.copy()
|
||||||
file_info.update({
|
file_info.update({
|
||||||
'id': format_id,
|
'id': format_id,
|
||||||
@@ -380,7 +379,7 @@ class AfreecaTVIE(InfoExtractor):
|
|||||||
return info
|
return info
|
||||||
|
|
||||||
|
|
||||||
class AfreecaTVLiveIE(AfreecaTVIE):
|
class AfreecaTVLiveIE(AfreecaTVIE): # XXX: Do not subclass from concrete IE
|
||||||
|
|
||||||
IE_NAME = 'afreecatv:live'
|
IE_NAME = 'afreecatv:live'
|
||||||
_VALID_URL = r'https?://play\.afreeca(?:tv)?\.com/(?P<id>[^/]+)(?:/(?P<bno>\d+))?'
|
_VALID_URL = r'https?://play\.afreeca(?:tv)?\.com/(?P<id>[^/]+)(?:/(?P<bno>\d+))?'
|
||||||
@@ -464,8 +463,6 @@ class AfreecaTVLiveIE(AfreecaTVIE):
|
|||||||
'quality': quality_key(quality_str),
|
'quality': quality_key(quality_str),
|
||||||
})
|
})
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
station_info = self._download_json(
|
station_info = self._download_json(
|
||||||
'https://st.afreecatv.com/api/get_station_status.php', broadcast_no,
|
'https://st.afreecatv.com/api/get_station_status.php', broadcast_no,
|
||||||
query={'szBjId': broadcaster_id}, fatal=False,
|
query={'szBjId': broadcaster_id}, fatal=False,
|
||||||
|
|||||||
251
yt_dlp/extractor/agora.py
Normal file
251
yt_dlp/extractor/agora.py
Normal file
@@ -0,0 +1,251 @@
|
|||||||
|
import functools
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
OnDemandPagedList,
|
||||||
|
int_or_none,
|
||||||
|
month_by_name,
|
||||||
|
parse_duration,
|
||||||
|
try_call,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class WyborczaVideoIE(InfoExtractor):
|
||||||
|
# this id is not an article id, it has to be extracted from the article
|
||||||
|
_VALID_URL = r'(?:wyborcza:video:|https?://wyborcza\.pl/(?:api-)?video/)(?P<id>\d+)'
|
||||||
|
IE_NAME = 'wyborcza:video'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'wyborcza:video:26207634',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '26207634',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '- Polska w 2020 r. jest innym państwem niż w 2015 r. Nie zmieniła się konstytucja, ale jest to już inny ustrój - mówi Adam Bodnar',
|
||||||
|
'description': ' ',
|
||||||
|
'uploader': 'Dorota Roman',
|
||||||
|
'duration': 2474,
|
||||||
|
'thumbnail': r're:https://.+\.jpg',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://wyborcza.pl/video/26207634',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://wyborcza.pl/api-video/26207634',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
meta = self._download_json(f'https://wyborcza.pl/api-video/{video_id}', video_id)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
base_url = meta['redirector'].replace('http://', 'https://') + meta['basePath']
|
||||||
|
for quality in ('standard', 'high'):
|
||||||
|
if not meta['files'].get(quality):
|
||||||
|
continue
|
||||||
|
formats.append({
|
||||||
|
'url': base_url + meta['files'][quality],
|
||||||
|
'height': int_or_none(
|
||||||
|
self._search_regex(
|
||||||
|
r'p(\d+)[a-z]+\.mp4$', meta['files'][quality],
|
||||||
|
'mp4 video height', default=None)),
|
||||||
|
'format_id': quality,
|
||||||
|
})
|
||||||
|
if meta['files'].get('dash'):
|
||||||
|
formats.extend(self._extract_mpd_formats(base_url + meta['files']['dash'], video_id))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'formats': formats,
|
||||||
|
'title': meta.get('title'),
|
||||||
|
'description': meta.get('lead'),
|
||||||
|
'uploader': meta.get('signature'),
|
||||||
|
'thumbnail': meta.get('imageUrl'),
|
||||||
|
'duration': meta.get('duration'),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class WyborczaPodcastIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'''(?x)
|
||||||
|
https?://(?:www\.)?(?:
|
||||||
|
wyborcza\.pl/podcast(?:/0,172673\.html)?|
|
||||||
|
wysokieobcasy\.pl/wysokie-obcasy/0,176631\.html
|
||||||
|
)(?:\?(?:[^&#]+?&)*podcast=(?P<id>\d+))?
|
||||||
|
'''
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://wyborcza.pl/podcast/0,172673.html?podcast=100720#S.main_topic-K.C-B.6-L.1.podcast',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '100720',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'Cyfrodziewczyny. Kim były pionierki polskiej informatyki ',
|
||||||
|
'uploader': 'Michał Nogaś ',
|
||||||
|
'upload_date': '20210117',
|
||||||
|
'description': 'md5:49f0a06ffc4c1931210d3ab1416a651d',
|
||||||
|
'duration': 3684.0,
|
||||||
|
'thumbnail': r're:https://.+\.jpg',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.wysokieobcasy.pl/wysokie-obcasy/0,176631.html?podcast=100673',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '100673',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'Czym jest ubóstwo menstruacyjne i dlaczego dotyczy każdej i każdego z nas?',
|
||||||
|
'uploader': 'Agnieszka Urazińska ',
|
||||||
|
'upload_date': '20210115',
|
||||||
|
'description': 'md5:c161dc035f8dbb60077011fc41274899',
|
||||||
|
'duration': 1803.0,
|
||||||
|
'thumbnail': r're:https://.+\.jpg',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://wyborcza.pl/podcast',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '334',
|
||||||
|
'title': 'Gościnnie: Wyborcza, 8:10',
|
||||||
|
'series': 'Gościnnie: Wyborcza, 8:10',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 370,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.wysokieobcasy.pl/wysokie-obcasy/0,176631.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '395',
|
||||||
|
'title': 'Gościnnie: Wysokie Obcasy',
|
||||||
|
'series': 'Gościnnie: Wysokie Obcasy',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 12,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
podcast_id = self._match_id(url)
|
||||||
|
|
||||||
|
if not podcast_id: # playlist
|
||||||
|
podcast_id = '395' if 'wysokieobcasy.pl/' in url else '334'
|
||||||
|
return self.url_result(TokFMAuditionIE._create_url(podcast_id), TokFMAuditionIE, podcast_id)
|
||||||
|
|
||||||
|
meta = self._download_json('https://wyborcza.pl/api/podcast', podcast_id,
|
||||||
|
query={'guid': podcast_id, 'type': 'wo' if 'wysokieobcasy.pl/' in url else None})
|
||||||
|
|
||||||
|
day, month, year = self._search_regex(r'^(\d\d?) (\w+) (\d{4})$', meta.get('publishedDate'),
|
||||||
|
'upload date', group=(1, 2, 3), default=(None, None, None))
|
||||||
|
return {
|
||||||
|
'id': podcast_id,
|
||||||
|
'url': meta['url'],
|
||||||
|
'title': meta.get('title'),
|
||||||
|
'description': meta.get('description'),
|
||||||
|
'thumbnail': meta.get('imageUrl'),
|
||||||
|
'duration': parse_duration(meta.get('duration')),
|
||||||
|
'uploader': meta.get('author'),
|
||||||
|
'upload_date': try_call(lambda: f'{year}{month_by_name(month, lang="pl"):0>2}{day:0>2}'),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TokFMPodcastIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'(?:https?://audycje\.tokfm\.pl/podcast/|tokfm:podcast:)(?P<id>\d+),?'
|
||||||
|
IE_NAME = 'tokfm:podcast'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://audycje.tokfm.pl/podcast/91275,-Systemowy-rasizm-Czy-zamieszki-w-USA-po-morderstwie-w-Minneapolis-doprowadza-do-zmian-w-sluzbach-panstwowych',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '91275',
|
||||||
|
'ext': 'aac',
|
||||||
|
'title': 'md5:a9b15488009065556900169fb8061cce',
|
||||||
|
'episode': 'md5:a9b15488009065556900169fb8061cce',
|
||||||
|
'series': 'Analizy',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
media_id = self._match_id(url)
|
||||||
|
|
||||||
|
# in case it breaks see this but it returns a lot of useless data
|
||||||
|
# https://api.podcast.radioagora.pl/api4/getPodcasts?podcast_id=100091&with_guests=true&with_leaders_for_mobile=true
|
||||||
|
metadata = self._download_json(
|
||||||
|
f'https://audycje.tokfm.pl/getp/3{media_id}', media_id, 'Downloading podcast metadata')
|
||||||
|
if not metadata:
|
||||||
|
raise ExtractorError('No such podcast', expected=True)
|
||||||
|
metadata = metadata[0]
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for ext in ('aac', 'mp3'):
|
||||||
|
url_data = self._download_json(
|
||||||
|
f'https://api.podcast.radioagora.pl/api4/getSongUrl?podcast_id={media_id}&device_id={uuid.uuid4()}&ppre=false&audio={ext}',
|
||||||
|
media_id, 'Downloading podcast %s URL' % ext)
|
||||||
|
# prevents inserting the mp3 (default) multiple times
|
||||||
|
if 'link_ssl' in url_data and f'.{ext}' in url_data['link_ssl']:
|
||||||
|
formats.append({
|
||||||
|
'url': url_data['link_ssl'],
|
||||||
|
'ext': ext,
|
||||||
|
'vcodec': 'none',
|
||||||
|
'acodec': ext,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': media_id,
|
||||||
|
'formats': formats,
|
||||||
|
'title': metadata.get('podcast_name'),
|
||||||
|
'series': metadata.get('series_name'),
|
||||||
|
'episode': metadata.get('podcast_name'),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TokFMAuditionIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'(?:https?://audycje\.tokfm\.pl/audycja/|tokfm:audition:)(?P<id>\d+),?'
|
||||||
|
IE_NAME = 'tokfm:audition'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://audycje.tokfm.pl/audycja/218,Analizy',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '218',
|
||||||
|
'title': 'Analizy',
|
||||||
|
'series': 'Analizy',
|
||||||
|
},
|
||||||
|
'playlist_count': 1635,
|
||||||
|
}]
|
||||||
|
|
||||||
|
_PAGE_SIZE = 30
|
||||||
|
_HEADERS = {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Linux; Android 9; Redmi 3S Build/PQ3A.190801.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.101 Mobile Safari/537.36',
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _create_url(id):
|
||||||
|
return f'https://audycje.tokfm.pl/audycja/{id}'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
audition_id = self._match_id(url)
|
||||||
|
|
||||||
|
data = self._download_json(
|
||||||
|
f'https://api.podcast.radioagora.pl/api4/getSeries?series_id={audition_id}',
|
||||||
|
audition_id, 'Downloading audition metadata', headers=self._HEADERS)
|
||||||
|
if not data:
|
||||||
|
raise ExtractorError('No such audition', expected=True)
|
||||||
|
data = data[0]
|
||||||
|
|
||||||
|
entries = OnDemandPagedList(functools.partial(
|
||||||
|
self._fetch_page, audition_id, data), self._PAGE_SIZE)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': audition_id,
|
||||||
|
'title': data.get('series_name'),
|
||||||
|
'series': data.get('series_name'),
|
||||||
|
'entries': entries,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _fetch_page(self, audition_id, data, page):
|
||||||
|
for retry in self.RetryManager():
|
||||||
|
podcast_page = self._download_json(
|
||||||
|
f'https://api.podcast.radioagora.pl/api4/getPodcasts?series_id={audition_id}&limit=30&offset={page}&with_guests=true&with_leaders_for_mobile=true',
|
||||||
|
audition_id, f'Downloading podcast list page {page + 1}', headers=self._HEADERS)
|
||||||
|
if not podcast_page:
|
||||||
|
retry.error = ExtractorError('Agora returned empty page', expected=True)
|
||||||
|
|
||||||
|
for podcast in podcast_page:
|
||||||
|
yield {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': podcast['podcast_sharing_url'],
|
||||||
|
'ie_key': TokFMPodcastIE.ie_key(),
|
||||||
|
'title': podcast.get('podcast_name'),
|
||||||
|
'episode': podcast.get('podcast_name'),
|
||||||
|
'description': podcast.get('podcast_description'),
|
||||||
|
'timestamp': int_or_none(podcast.get('podcast_timestamp')),
|
||||||
|
'series': data.get('series_name'),
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user