1 from .common
import InfoExtractor
2 from ..networking
.exceptions
import HTTPError
11 from ..utils
.traversal
import find_element
, traverse_obj
14 class TBSJPEpisodeIE(InfoExtractor
):
15 _VALID_URL
= r
'https?://cu\.tbs\.co\.jp/episode/(?P<id>[\d_]+)'
18 'url': 'https://cu.tbs.co.jp/episode/23613_2044134_1000049010',
19 'skip': 'streams geo-restricted, Japan only. Also, will likely expire eventually',
21 'title': 'VIVANT 第三話 誤送金完結へ!絶体絶命の反撃開始',
22 'id': '23613_2044134_1000049010',
24 'upload_date': '20230728',
26 'release_timestamp': 1691118230,
27 'episode': '第三話 誤送金完結へ!絶体絶命の反撃開始',
28 'release_date': '20230804',
29 'categories': 'count:11',
31 'timestamp': 1690522538,
32 'description': 'md5:2b796341af1ef772034133174ba4a895',
37 def _real_extract(self
, url
):
38 video_id
= self
._match
_id
(url
)
39 webpage
= self
._download
_webpage
(url
, video_id
)
40 meta
= self
._search
_json
(r
'window\.app\s*=', webpage
, 'episode info', video_id
, fatal
=False)
41 episode
= traverse_obj(meta
, ('falcorCache', 'catalog', 'episode', video_id
, 'value'))
43 tf_path
= self
._search
_regex
(
44 r
'<script[^>]+src=["\'](/assets
/tf\
.[^
"\']+\.js)["\']', webpage, 'stream API config
')
45 tf_js = self._download_webpage(urljoin(url, tf_path), video_id, note='Downloading stream API config
')
46 video_url = self._search_regex(r'videoPlaybackUrl
:\s
*[\'"]([^\'"]+)[\'"]', tf_js, 'stream API url')
47 api_key = self._search_regex(r'api_key:\s*[\'"]([^
\'"]+)[\'"]', tf_js, 'stream API key
')
50 source_meta = self._download_json(f'{video_url}ref
:{video_id}
', video_id,
51 headers={'X
-Streaks
-Api
-Key
': api_key},
52 note='Downloading stream metadata
')
53 except ExtractorError as e:
54 if isinstance(e.cause, HTTPError) and e.cause.status == 403:
55 self.raise_geo_restricted(countries=['JP
'])
58 formats, subtitles = [], {}
59 for src in traverse_obj(source_meta, ('sources
', ..., 'src
')):
60 fmts, subs = self._extract_m3u8_formats_and_subtitles(src, video_id, fatal=False)
62 self._merge_subtitles(subs, target=subtitles)
65 'title
': traverse_obj(webpage, ({find_element(tag='h3
')}, {clean_html})),
67 **traverse_obj(episode, {
68 'categories
': ('keywords
', {list}),
69 'id': ('content_id
', {str}),
70 'description
': ('description
', 0, 'value
'),
71 'timestamp
': ('created_at
', {unified_timestamp}),
72 'release_timestamp
': ('pub_date
', {unified_timestamp}),
73 'duration
': ('tv_episode_info
', 'duration
', {int_or_none}),
74 'episode_number
': ('tv_episode_info
', 'episode_number
', {int_or_none}),
75 'episode
': ('title
', lambda _, v: not v.get('is_phonetic
'), 'value
'),
76 'series
': ('custom_data
', 'program_name
'),
79 'subtitles
': subtitles,
83 class TBSJPProgramIE(InfoExtractor):
84 _VALID_URL = r'https?
://cu\
.tbs\
.co\
.jp
/program
/(?P
<id>\d
+)'
86 'url
': 'https
://cu
.tbs
.co
.jp
/program
/23601',
87 'playlist_mincount
': 4,
90 'categories
': ['エンタメ
', 'ミライカプセル
', '会社
', '働く
', 'バラエティ
', '動画
'],
91 'description
': '幼少期の夢は大人になって、どう成長したのだろうか?
\nそしてその夢は今後、どのように広がっていくのか?
\nいま話題の会社で働く人の「夢の成長」を描く
',
92 'series
': 'ミライカプセル
-I have a dream
-',
93 'title
': 'ミライカプセル
-I have a dream
-',
97 def _real_extract(self, url):
98 programme_id = self._match_id(url)
99 webpage = self._download_webpage(url, programme_id)
100 meta = self._search_json(r'window\
.app\s
*=', webpage, 'programme info
', programme_id)
102 programme = traverse_obj(meta, ('falcorCache
', 'catalog
', 'program
', programme_id, 'false
', 'value
'))
106 'entries
': [self.url_result(f'https
://cu
.tbs
.co
.jp
/episode
/{video_id}
', TBSJPEpisodeIE, video_id)
107 for video_id in traverse_obj(programme, ('custom_data
', 'seriesList
', 'episodeCode
', ...))],
109 **traverse_obj(programme, {
110 'categories
': ('keywords
', ...),
111 'id': ('tv_episode_info
', 'show_content_id
', {str_or_none}),
112 'description
': ('custom_data
', 'program_description
'),
113 'series
': ('custom_data
', 'program_name
'),
114 'title
': ('custom_data
', 'program_name
'),
119 class TBSJPPlaylistIE(InfoExtractor):
120 _VALID_URL = r'https?
://cu\
.tbs\
.co\
.jp
/playlist
/(?P
<id>[\da
-f
]+)'
122 'url
': 'https
://cu
.tbs
.co
.jp
/playlist
/184f9970e7ba48e4915f1b252c55015e
',
123 'playlist_mincount
': 4,
126 'id': '184f9970e7ba48e4915f1b252c55015e
',
130 def _real_extract(self, url):
131 playlist_id = self._match_id(url)
132 page = self._download_webpage(url, playlist_id)
133 meta = self._search_json(r'window\
.app\s
*=', page, 'playlist info
', playlist_id)
134 playlist = traverse_obj(meta, ('falcorCache
', 'playList
', playlist_id))
137 for entry in traverse_obj(playlist, ('catalogs
', 'value
', lambda _, v: v['content_id
'])):
138 # TODO: it's likely possible to get all metadata
from the playlist page json instead
139 content_id
= entry
['content_id']
140 content_type
= entry
.get('content_type')
141 if content_type
== 'tv_show':
142 yield self
.url_result(
143 f
'https://cu.tbs.co.jp/program/{content_id}', TBSJPProgramIE
, content_id
)
144 elif content_type
== 'tv_episode':
145 yield self
.url_result(
146 f
'https://cu.tbs.co.jp/episode/{content_id}', TBSJPEpisodeIE
, content_id
)
148 self
.report_warning(f
'Skipping "{content_id}" with unsupported content_type "{content_type}"')
150 return self
.playlist_result(entries(), playlist_id
, traverse_obj(playlist
, ('display_name', 'value')))