diff --git a/yt_dlp/extractor/_extractors.py b/yt_dlp/extractor/_extractors.py index 34c98b537..14229e009 100644 --- a/yt_dlp/extractor/_extractors.py +++ b/yt_dlp/extractor/_extractors.py @@ -201,7 +201,6 @@ BanByeChannelIE, BanByeIE, ) -from .bandaichannel import BandaiChannelIE from .bandcamp import ( BandcampAlbumIE, BandcampIE, @@ -934,7 +933,6 @@ JioSaavnSongIE, ) from .joj import JojIE -from .joqrag import JoqrAgIE from .jove import JoveIE from .jstream import JStreamIE from .jtbc import ( diff --git a/yt_dlp/extractor/bandaichannel.py b/yt_dlp/extractor/bandaichannel.py deleted file mode 100644 index d7fcf44bd..000000000 --- a/yt_dlp/extractor/bandaichannel.py +++ /dev/null @@ -1,33 +0,0 @@ -from .brightcove import BrightcoveNewBaseIE -from ..utils import extract_attributes - - -class BandaiChannelIE(BrightcoveNewBaseIE): - IE_NAME = 'bandaichannel' - _VALID_URL = r'https?://(?:www\.)?b-ch\.com/titles/(?P\d+/\d+)' - _TESTS = [{ - 'url': 'https://www.b-ch.com/titles/514/001', - 'md5': 'a0f2d787baa5729bed71108257f613a4', - 'info_dict': { - 'id': '6128044564001', - 'ext': 'mp4', - 'title': 'メタルファイターMIKU 第1話', - 'timestamp': 1580354056, - 'uploader_id': '5797077852001', - 'upload_date': '20200130', - 'duration': 1387.733, - }, - 'params': { - 'skip_download': True, - }, - }] - - def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - attrs = extract_attributes(self._search_regex( - r'(]+\bid="bcplayer"[^>]*>)', webpage, 'player')) - bc = self._download_json( - 'https://pbifcd.b-ch.com/v1/playbackinfo/ST/70/' + attrs['data-info'], - video_id, headers={'X-API-KEY': attrs['data-auth'].strip()})['bc'] - return self._parse_brightcove_metadata(bc, bc['id']) diff --git a/yt_dlp/extractor/joqrag.py b/yt_dlp/extractor/joqrag.py deleted file mode 100644 index 7a91d4a23..000000000 --- a/yt_dlp/extractor/joqrag.py +++ /dev/null @@ -1,112 +0,0 @@ -import datetime as dt -import urllib.parse - -from .common import InfoExtractor -from ..utils import ( - clean_html, - datetime_from_str, - unified_timestamp, - urljoin, -) - - -class JoqrAgIE(InfoExtractor): - IE_DESC = '超!A&G+ 文化放送 (f.k.a. AGQR) Nippon Cultural Broadcasting, Inc. (JOQR)' - _VALID_URL = [r'https?://www\.uniqueradio\.jp/agplayer5/(?:player|inc-player-hls)\.php', - r'https?://(?:www\.)?joqr\.co\.jp/ag/', - r'https?://(?:www\.)?joqr\.co\.jp/qr/ag(?:daily|regular)program/?(?:$|[#?])'] - _TESTS = [{ - 'url': 'https://www.uniqueradio.jp/agplayer5/player.php', - 'info_dict': { - 'id': 'live', - 'title': str, - 'channel': '超!A&G+', - 'description': str, - 'live_status': 'is_live', - 'release_timestamp': int, - }, - 'params': { - 'skip_download': True, - 'ignore_no_formats_error': True, - }, - }, { - 'url': 'https://www.uniqueradio.jp/agplayer5/inc-player-hls.php', - 'only_matching': True, - }, { - 'url': 'https://www.joqr.co.jp/ag/article/103760/', - 'only_matching': True, - }, { - 'url': 'http://www.joqr.co.jp/qr/agdailyprogram/', - 'only_matching': True, - }, { - 'url': 'http://www.joqr.co.jp/qr/agregularprogram/', - 'only_matching': True, - }] - - def _extract_metadata(self, variable, html): - return clean_html(urllib.parse.unquote_plus(self._search_regex( - rf'var\s+{variable}\s*=\s*(["\'])(?P(?:(?!\1).)+)\1', - html, 'metadata', group='value', default=''))) or None - - def _extract_start_timestamp(self, video_id, is_live): - def extract_start_time_from(date_str): - dt_ = datetime_from_str(date_str) + dt.timedelta(hours=9) - date = dt_.strftime('%Y%m%d') - start_time = self._search_regex( - r']+\bclass="dailyProgram-itemHeaderTime"[^>]*>[\s\d:]+–\s*(\d{1,2}:\d{1,2})', - self._download_webpage( - f'https://www.joqr.co.jp/qr/agdailyprogram/?date={date}', video_id, - note=f'Downloading program list of {date}', fatal=False, - errnote=f'Failed to download program list of {date}') or '', - 'start time', default=None) - if start_time: - return unified_timestamp(f'{dt_.strftime("%Y/%m/%d")} {start_time} +09:00') - return None - - start_timestamp = extract_start_time_from('today') - if not start_timestamp: - return None - - if not is_live or start_timestamp < datetime_from_str('now').timestamp(): - return start_timestamp - else: - return extract_start_time_from('yesterday') - - def _real_extract(self, url): - video_id = 'live' - - metadata = self._download_webpage( - 'https://www.uniqueradio.jp/aandg', video_id, - note='Downloading metadata', errnote='Failed to download metadata') - title = self._extract_metadata('Program_name', metadata) - - if not title or title == '放送休止': - formats = [] - live_status = 'is_upcoming' - release_timestamp = self._extract_start_timestamp(video_id, False) - msg = 'This stream is not currently live' - if release_timestamp: - msg += (' and will start at ' - + dt.datetime.fromtimestamp(release_timestamp).strftime('%Y-%m-%d %H:%M:%S')) - self.raise_no_formats(msg, expected=True) - else: - m3u8_path = self._search_regex( - r']*\bsrc="([^"]+)"', - self._download_webpage( - 'https://www.uniqueradio.jp/agplayer5/inc-player-hls.php', video_id, - note='Downloading player data', errnote='Failed to download player data'), - 'm3u8 url') - formats = self._extract_m3u8_formats( - urljoin('https://www.uniqueradio.jp/', m3u8_path), video_id) - live_status = 'is_live' - release_timestamp = self._extract_start_timestamp(video_id, True) - - return { - 'id': video_id, - 'title': title, - 'channel': '超!A&G+', - 'description': self._extract_metadata('Program_text', metadata), - 'formats': formats, - 'live_status': live_status, - 'release_timestamp': release_timestamp, - } diff --git a/yt_dlp/extractor/unsupported.py b/yt_dlp/extractor/unsupported.py index 31393b02a..1b7719810 100644 --- a/yt_dlp/extractor/unsupported.py +++ b/yt_dlp/extractor/unsupported.py @@ -53,6 +53,7 @@ class KnownDRMIE(UnsupportedInfoExtractor): r'(?:beta\.)?crunchyroll\.com', r'viki\.com', r'deezer\.com', + r'b-ch\.com', ) _TESTS = [{ @@ -168,6 +169,9 @@ class KnownDRMIE(UnsupportedInfoExtractor): }, { 'url': 'http://www.deezer.com/playlist/176747451', 'only_matching': True, + }, { + 'url': 'https://www.b-ch.com/titles/8203/001', + 'only_matching': True, }] def _real_extract(self, url):