[ie/youtube:tab] Fix playlists tab extraction (#11615)
[yt-dlp3.git] / yt_dlp / extractor / abematv.py
blobb1343eed39552162e307556fa066f712d47cf705
1 import base64
2 import binascii
3 import functools
4 import hashlib
5 import hmac
6 import io
7 import json
8 import re
9 import time
10 import urllib.parse
11 import uuid
13 from .common import InfoExtractor
14 from ..aes import aes_ecb_decrypt
15 from ..networking import RequestHandler, Response
16 from ..networking.exceptions import TransportError
17 from ..utils import (
18 ExtractorError,
19 OnDemandPagedList,
20 decode_base_n,
21 int_or_none,
22 time_seconds,
23 traverse_obj,
24 update_url_query,
28 class AbemaLicenseRH(RequestHandler):
29 _SUPPORTED_URL_SCHEMES = ('abematv-license',)
30 _SUPPORTED_PROXY_SCHEMES = None
31 _SUPPORTED_FEATURES = None
32 RH_NAME = 'abematv_license'
34 _STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
35 _HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E'
37 def __init__(self, *, ie: 'AbemaTVIE', **kwargs):
38 super().__init__(**kwargs)
39 self.ie = ie
41 def _send(self, request):
42 url = request.url
43 ticket = urllib.parse.urlparse(url).netloc
45 try:
46 response_data = self._get_videokey_from_ticket(ticket)
47 except ExtractorError as e:
48 raise TransportError(cause=e.cause) from e
49 except (IndexError, KeyError, TypeError) as e:
50 raise TransportError(cause=repr(e)) from e
52 return Response(
53 io.BytesIO(response_data), url,
54 headers={'Content-Length': str(len(response_data))})
56 def _get_videokey_from_ticket(self, ticket):
57 to_show = self.ie.get_param('verbose', False)
58 media_token = self.ie._get_media_token(to_show=to_show)
60 license_response = self.ie._download_json(
61 'https://license.abema.io/abematv-hls', None, note='Requesting playback license' if to_show else False,
62 query={'t': media_token},
63 data=json.dumps({
64 'kv': 'a',
65 'lt': ticket,
66 }).encode(),
67 headers={
68 'Content-Type': 'application/json',
71 res = decode_base_n(license_response['k'], table=self._STRTABLE)
72 encvideokey = list(res.to_bytes(16, 'big'))
74 h = hmac.new(
75 binascii.unhexlify(self._HKEY),
76 (license_response['cid'] + self.ie._DEVICE_ID).encode(),
77 digestmod=hashlib.sha256)
78 enckey = list(h.digest())
80 return bytes(aes_ecb_decrypt(encvideokey, enckey))
83 class AbemaTVBaseIE(InfoExtractor):
84 _NETRC_MACHINE = 'abematv'
86 _USERTOKEN = None
87 _DEVICE_ID = None
88 _MEDIATOKEN = None
90 _SECRETKEY = b'v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9BRbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$k9cD=3TxwWe86!x#Zyhe'
92 @classmethod
93 def _generate_aks(cls, deviceid):
94 deviceid = deviceid.encode()
95 # add 1 hour and then drop minute and secs
96 ts_1hour = int((time_seconds() // 3600 + 1) * 3600)
97 time_struct = time.gmtime(ts_1hour)
98 ts_1hour_str = str(ts_1hour).encode()
100 tmp = None
102 def mix_once(nonce):
103 nonlocal tmp
104 h = hmac.new(cls._SECRETKEY, digestmod=hashlib.sha256)
105 h.update(nonce)
106 tmp = h.digest()
108 def mix_tmp(count):
109 nonlocal tmp
110 for _ in range(count):
111 mix_once(tmp)
113 def mix_twist(nonce):
114 nonlocal tmp
115 mix_once(base64.urlsafe_b64encode(tmp).rstrip(b'=') + nonce)
117 mix_once(cls._SECRETKEY)
118 mix_tmp(time_struct.tm_mon)
119 mix_twist(deviceid)
120 mix_tmp(time_struct.tm_mday % 5)
121 mix_twist(ts_1hour_str)
122 mix_tmp(time_struct.tm_hour % 5)
124 return base64.urlsafe_b64encode(tmp).rstrip(b'=').decode('utf-8')
126 def _get_device_token(self):
127 if self._USERTOKEN:
128 return self._USERTOKEN
130 self._downloader._request_director.add_handler(AbemaLicenseRH(ie=self, logger=None))
132 username, _ = self._get_login_info()
133 auth_cache = username and self.cache.load(self._NETRC_MACHINE, username, min_ver='2024.01.19')
134 AbemaTVBaseIE._USERTOKEN = auth_cache and auth_cache.get('usertoken')
135 if AbemaTVBaseIE._USERTOKEN:
136 # try authentication with locally stored token
137 try:
138 AbemaTVBaseIE._DEVICE_ID = auth_cache.get('device_id')
139 self._get_media_token(True)
140 return
141 except ExtractorError as e:
142 self.report_warning(f'Failed to login with cached user token; obtaining a fresh one ({e})')
144 AbemaTVBaseIE._DEVICE_ID = str(uuid.uuid4())
145 aks = self._generate_aks(self._DEVICE_ID)
146 user_data = self._download_json(
147 'https://api.abema.io/v1/users', None, note='Authorizing',
148 data=json.dumps({
149 'deviceId': self._DEVICE_ID,
150 'applicationKeySecret': aks,
151 }).encode(),
152 headers={
153 'Content-Type': 'application/json',
155 AbemaTVBaseIE._USERTOKEN = user_data['token']
157 return self._USERTOKEN
159 def _get_media_token(self, invalidate=False, to_show=True):
160 if not invalidate and self._MEDIATOKEN:
161 return self._MEDIATOKEN
163 AbemaTVBaseIE._MEDIATOKEN = self._download_json(
164 'https://api.abema.io/v1/media/token', None, note='Fetching media token' if to_show else False,
165 query={
166 'osName': 'android',
167 'osVersion': '6.0.1',
168 'osLang': 'ja_JP',
169 'osTimezone': 'Asia/Tokyo',
170 'appId': 'tv.abema',
171 'appVersion': '3.27.1',
172 }, headers={
173 'Authorization': f'bearer {self._get_device_token()}',
174 })['token']
176 return self._MEDIATOKEN
178 def _perform_login(self, username, password):
179 self._get_device_token()
180 if self.cache.load(self._NETRC_MACHINE, username, min_ver='2024.01.19') and self._get_media_token():
181 self.write_debug('Skipping logging in')
182 return
184 if '@' in username: # don't strictly check if it's email address or not
185 ep, method = 'user/email', 'email'
186 else:
187 ep, method = 'oneTimePassword', 'userId'
189 login_response = self._download_json(
190 f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in',
191 data=json.dumps({
192 method: username,
193 'password': password,
194 }).encode(), headers={
195 'Authorization': f'bearer {self._get_device_token()}',
196 'Origin': 'https://abema.tv',
197 'Referer': 'https://abema.tv/',
198 'Content-Type': 'application/json',
201 AbemaTVBaseIE._USERTOKEN = login_response['token']
202 self._get_media_token(True)
203 auth_cache = {
204 'device_id': AbemaTVBaseIE._DEVICE_ID,
205 'usertoken': AbemaTVBaseIE._USERTOKEN,
207 self.cache.store(self._NETRC_MACHINE, username, auth_cache)
209 def _call_api(self, endpoint, video_id, query=None, note='Downloading JSON metadata'):
210 return self._download_json(
211 f'https://api.abema.io/{endpoint}', video_id, query=query or {},
212 note=note,
213 headers={
214 'Authorization': f'bearer {self._get_device_token()}',
217 def _extract_breadcrumb_list(self, webpage, video_id):
218 for jld in re.finditer(
219 r'(?is)</span></li></ul><script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
220 webpage):
221 jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False)
222 if traverse_obj(jsonld, '@type') != 'BreadcrumbList':
223 continue
224 items = traverse_obj(jsonld, ('itemListElement', ..., 'name'))
225 if items:
226 return items
227 return []
230 class AbemaTVIE(AbemaTVBaseIE):
231 _VALID_URL = r'https?://abema\.tv/(?P<type>now-on-air|video/episode|channels/.+?/slots)/(?P<id>[^?/]+)'
232 _TESTS = [{
233 'url': 'https://abema.tv/video/episode/194-25_s2_p1',
234 'info_dict': {
235 'id': '194-25_s2_p1',
236 'title': '1話 「チーズケーキ」 「モーニング再び」',
237 'series': '異世界食堂2',
238 'season': 'シーズン2',
239 'season_number': 2,
240 'episode': '1話 「チーズケーキ」 「モーニング再び」',
241 'episode_number': 1,
243 'skip': 'expired',
244 }, {
245 'url': 'https://abema.tv/channels/anime-live2/slots/E8tvAnMJ7a9a5d',
246 'info_dict': {
247 'id': 'E8tvAnMJ7a9a5d',
248 'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
249 'series': 'ゆるキャン△ SEASON2',
250 'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
251 'season_number': 2,
252 'episode_number': 1,
253 'description': 'md5:9c5a3172ae763278f9303922f0ea5b17',
255 'skip': 'expired',
256 }, {
257 'url': 'https://abema.tv/video/episode/87-877_s1282_p31047',
258 'info_dict': {
259 'id': 'E8tvAnMJ7a9a5d',
260 'title': '5話『光射す』',
261 'description': 'md5:56d4fc1b4f7769ded5f923c55bb4695d',
262 'thumbnail': r're:https://hayabusa\.io/.+',
263 'series': '相棒',
264 'episode': '5話『光射す』',
266 'skip': 'expired',
267 }, {
268 'url': 'https://abema.tv/now-on-air/abema-anime',
269 'info_dict': {
270 'id': 'abema-anime',
271 # this varies
272 # 'title': '女子高生の無駄づかい 全話一挙【無料ビデオ72時間】',
273 'description': 'md5:55f2e61f46a17e9230802d7bcc913d5f',
274 'is_live': True,
276 'skip': 'Not supported until yt-dlp implements native live downloader OR AbemaTV can start a local HTTP server',
278 _TIMETABLE = None
280 def _real_extract(self, url):
281 # starting download using infojson from this extractor is undefined behavior,
282 # and never be fixed in the future; you must trigger downloads by directly specifying URL.
283 # (unless there's a way to hook before downloading by extractor)
284 video_id, video_type = self._match_valid_url(url).group('id', 'type')
285 headers = {
286 'Authorization': 'Bearer ' + self._get_device_token(),
288 video_type = video_type.split('/')[-1]
290 webpage = self._download_webpage(url, video_id)
291 canonical_url = self._search_regex(
292 r'<link\s+rel="canonical"\s*href="(.+?)"', webpage, 'canonical URL',
293 default=url)
294 info = self._search_json_ld(webpage, video_id, default={})
296 title = self._search_regex(
297 r'<span\s*class=".+?EpisodeTitleBlock__title">(.+?)</span>', webpage, 'title', default=None)
298 if not title:
299 jsonld = None
300 for jld in re.finditer(
301 r'(?is)<span\s*class="com-m-Thumbnail__image">(?:</span>)?<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
302 webpage):
303 jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False)
304 if jsonld:
305 break
306 if jsonld:
307 title = jsonld.get('caption')
308 if not title and video_type == 'now-on-air':
309 if not self._TIMETABLE:
310 # cache the timetable because it goes to 5MiB in size (!!)
311 self._TIMETABLE = self._download_json(
312 'https://api.abema.io/v1/timetable/dataSet?debug=false', video_id,
313 headers=headers)
314 now = time_seconds(hours=9)
315 for slot in self._TIMETABLE.get('slots', []):
316 if slot.get('channelId') != video_id:
317 continue
318 if slot['startAt'] <= now and now < slot['endAt']:
319 title = slot['title']
320 break
322 # read breadcrumb on top of page
323 breadcrumb = self._extract_breadcrumb_list(webpage, video_id)
324 if breadcrumb:
325 # breadcrumb list translates to: (e.g. 1st test for this IE)
326 # Home > Anime (genre) > Isekai Shokudo 2 (series name) > Episode 1 "Cheese cakes" "Morning again" (episode title)
327 # hence this works
328 info['series'] = breadcrumb[-2]
329 info['episode'] = breadcrumb[-1]
330 if not title:
331 title = info['episode']
333 description = self._html_search_regex(
334 (r'<p\s+class="com-video-EpisodeDetailsBlock__content"><span\s+class=".+?">(.+?)</span></p><div',
335 r'<span\s+class=".+?SlotSummary.+?">(.+?)</span></div><div'),
336 webpage, 'description', default=None, group=1)
337 if not description:
338 og_desc = self._html_search_meta(
339 ('description', 'og:description', 'twitter:description'), webpage)
340 if og_desc:
341 description = re.sub(r'''(?sx)
342 ^(.+?)(?:
343 アニメの動画を無料で見るならABEMA!| # anime
344 等、.+ # applies for most of categories
346 ''', r'\1', og_desc)
348 # canonical URL may contain season and episode number
349 mobj = re.search(r's(\d+)_p(\d+)$', canonical_url)
350 if mobj:
351 seri = int_or_none(mobj.group(1), default=float('inf'))
352 epis = int_or_none(mobj.group(2), default=float('inf'))
353 info['season_number'] = seri if seri < 100 else None
354 # some anime like Detective Conan (though not available in AbemaTV)
355 # has more than 1000 episodes (1026 as of 2021/11/15)
356 info['episode_number'] = epis if epis < 2000 else None
358 is_live, m3u8_url = False, None
359 availability = 'public'
360 if video_type == 'now-on-air':
361 is_live = True
362 channel_url = 'https://api.abema.io/v1/channels'
363 if video_id == 'news-global':
364 channel_url = update_url_query(channel_url, {'division': '1'})
365 onair_channels = self._download_json(channel_url, video_id)
366 for ch in onair_channels['channels']:
367 if video_id == ch['id']:
368 m3u8_url = ch['playback']['hls']
369 break
370 else:
371 raise ExtractorError(f'Cannot find on-air {video_id} channel.', expected=True)
372 elif video_type == 'episode':
373 api_response = self._download_json(
374 f'https://api.abema.io/v1/video/programs/{video_id}', video_id,
375 note='Checking playability',
376 headers=headers)
377 if not traverse_obj(api_response, ('label', 'free', {bool})):
378 # cannot acquire decryption key for these streams
379 self.report_warning('This is a premium-only stream')
380 availability = 'premium_only'
381 info.update(traverse_obj(api_response, {
382 'series': ('series', 'title'),
383 'season': ('season', 'name'),
384 'season_number': ('season', 'sequence'),
385 'episode_number': ('episode', 'number'),
387 if not title:
388 title = traverse_obj(api_response, ('episode', 'title'))
389 if not description:
390 description = traverse_obj(api_response, ('episode', 'content'))
392 m3u8_url = f'https://vod-abematv.akamaized.net/program/{video_id}/playlist.m3u8'
393 elif video_type == 'slots':
394 api_response = self._download_json(
395 f'https://api.abema.io/v1/media/slots/{video_id}', video_id,
396 note='Checking playability',
397 headers=headers)
398 if not traverse_obj(api_response, ('slot', 'flags', 'timeshiftFree'), default=False):
399 self.report_warning('This is a premium-only stream')
400 availability = 'premium_only'
402 m3u8_url = f'https://vod-abematv.akamaized.net/slot/{video_id}/playlist.m3u8'
403 else:
404 raise ExtractorError('Unreachable')
406 if is_live:
407 self.report_warning("This is a livestream; yt-dlp doesn't support downloading natively, but FFmpeg cannot handle m3u8 manifests from AbemaTV")
408 self.report_warning('Please consider using Streamlink to download these streams (https://github.com/streamlink/streamlink)')
409 formats = self._extract_m3u8_formats(
410 m3u8_url, video_id, ext='mp4', live=is_live)
412 info.update({
413 'id': video_id,
414 'title': title,
415 'description': description,
416 'formats': formats,
417 'is_live': is_live,
418 'availability': availability,
420 return info
423 class AbemaTVTitleIE(AbemaTVBaseIE):
424 _VALID_URL = r'https?://abema\.tv/video/title/(?P<id>[^?/]+)'
425 _PAGE_SIZE = 25
427 _TESTS = [{
428 'url': 'https://abema.tv/video/title/90-1597',
429 'info_dict': {
430 'id': '90-1597',
431 'title': 'シャッフルアイランド',
433 'playlist_mincount': 2,
434 }, {
435 'url': 'https://abema.tv/video/title/193-132',
436 'info_dict': {
437 'id': '193-132',
438 'title': '真心が届く~僕とスターのオフィス・ラブ!?~',
440 'playlist_mincount': 16,
441 }, {
442 'url': 'https://abema.tv/video/title/25-102',
443 'info_dict': {
444 'id': '25-102',
445 'title': 'ソードアート・オンライン アリシゼーション',
447 'playlist_mincount': 24,
450 def _fetch_page(self, playlist_id, series_version, page):
451 programs = self._call_api(
452 f'v1/video/series/{playlist_id}/programs', playlist_id,
453 note=f'Downloading page {page + 1}',
454 query={
455 'seriesVersion': series_version,
456 'offset': str(page * self._PAGE_SIZE),
457 'order': 'seq',
458 'limit': str(self._PAGE_SIZE),
460 yield from (
461 self.url_result(f'https://abema.tv/video/episode/{x}')
462 for x in traverse_obj(programs, ('programs', ..., 'id')))
464 def _entries(self, playlist_id, series_version):
465 return OnDemandPagedList(
466 functools.partial(self._fetch_page, playlist_id, series_version),
467 self._PAGE_SIZE)
469 def _real_extract(self, url):
470 playlist_id = self._match_id(url)
471 series_info = self._call_api(f'v1/video/series/{playlist_id}', playlist_id)
473 return self.playlist_result(
474 self._entries(playlist_id, series_info['version']), playlist_id=playlist_id,
475 playlist_title=series_info.get('title'),
476 playlist_description=series_info.get('content'))