9 from .common
import InfoExtractor
, SearchInfoExtractor
10 from ..networking
import Request
11 from ..networking
.exceptions
import HTTPError
35 class NiconicoIE(InfoExtractor
):
38 _GEO_COUNTRIES
= ['JP']
42 'url': 'http://www.nicovideo.jp/watch/sm22312215',
46 'title': 'Big Buck Bunny',
47 'thumbnail': r
're:https?://.*',
48 'uploader': 'takuya0301',
49 'uploader_id': '2698420',
50 'upload_date': '20131123',
51 'timestamp': int, # timestamp is unstable
52 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
59 'params': {'skip_download': 'm3u8'},
61 # File downloaded with and without credentials are different, so omit
63 'url': 'http://www.nicovideo.jp/watch/nm14296458',
67 'title': '【Kagamine Rin】Dance on media【Original】take2!',
68 'description': 'md5:9368f2b1f4178de64f2602c2f3d6cbf5',
69 'thumbnail': r
're:https?://.*',
71 'uploader_id': '18822557',
72 'upload_date': '20110429',
73 'timestamp': 1304065916,
77 'genres': ['音楽・サウンド'],
78 'tags': ['Translation_Request', 'Kagamine_Rin', 'Rin_Original'],
80 'params': {'skip_download': 'm3u8'},
82 # 'video exists but is marked as "deleted"
84 'url': 'http://www.nicovideo.jp/watch/sm10000',
87 'ext': 'unknown_video',
88 'description': 'deleted',
89 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
90 'thumbnail': r
're:https?://.*',
91 'upload_date': '20071224',
92 'timestamp': int, # timestamp field has different value if logged in
96 'skip': 'Requires an account',
98 'url': 'http://www.nicovideo.jp/watch/so22543406',
102 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
103 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
104 'thumbnail': r
're:https?://.*',
105 'timestamp': 1388851200,
106 'upload_date': '20140104',
107 'uploader': 'アニメロチャンネル',
108 'uploader_id': '312',
110 'skip': 'The viewing period of the video you were searching for has expired.',
112 # video not available via `getflv`; "old" HTML5 video
113 'url': 'http://www.nicovideo.jp/watch/sm1151009',
117 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
118 'description': 'md5:f95a3d259172667b293530cc2e41ebda',
119 'thumbnail': r
're:https?://.*',
121 'timestamp': 1190835883,
122 'upload_date': '20070926',
123 'uploader': 'denden2',
124 'uploader_id': '1392194',
126 'comment_count': int,
130 'params': {'skip_download': 'm3u8'},
133 'url': 'http://www.nicovideo.jp/watch/sm31464864',
137 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
138 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
139 'timestamp': 1498481660,
140 'upload_date': '20170626',
141 'uploader': 'no-namamae',
142 'uploader_id': '40826363',
143 'thumbnail': r
're:https?://.*',
146 'comment_count': int,
150 'params': {'skip_download': 'm3u8'},
152 # Video without owner
153 'url': 'http://www.nicovideo.jp/watch/sm18238488',
157 'title': '【実写版】ミュータントタートルズ',
158 'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
159 'timestamp': 1341128008,
160 'upload_date': '20120701',
161 'thumbnail': r
're:https?://.*',
164 'comment_count': int,
165 'genres': ['エンターテイメント'],
168 'params': {'skip_download': 'm3u8'},
170 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
171 'only_matching': True,
173 'note': 'a video that is only served as an ENCRYPTED HLS.',
174 'url': 'https://www.nicovideo.jp/watch/so38016254',
175 'only_matching': True,
178 _VALID_URL
= r
'https?://(?:(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch|nico\.ms)/(?P<id>(?:[a-z]{2})?[0-9]+)'
179 _NETRC_MACHINE
= 'niconico'
181 'X-Frontend-ID': '6',
182 'X-Frontend-Version': '0',
183 'X-Niconico-Language': 'en-us',
184 'Referer': 'https://www.nicovideo.jp/',
185 'Origin': 'https://www.nicovideo.jp',
188 def _perform_login(self
, username
, password
):
191 'mail_tel': username
,
192 'password': password
,
194 self
._request
_webpage
(
195 'https://account.nicovideo.jp/login', None,
196 note
='Acquiring Login session')
197 page
= self
._download
_webpage
(
198 'https://account.nicovideo.jp/login/redirector?show_button_twitter=1&site=niconico&show_button_facebook=1', None,
199 note
='Logging in', errnote
='Unable to log in',
200 data
=urlencode_postdata(login_form_strs
),
202 'Referer': 'https://account.nicovideo.jp/login',
203 'Content-Type': 'application/x-www-form-urlencoded',
205 if 'oneTimePw' in page
:
206 post_url
= self
._search
_regex
(
207 r
'<form[^>]+action=(["\'])(?P
<url
>.+?
)\
1', page, 'post url
', group='url
')
208 page = self._download_webpage(
209 urljoin('https
://account
.nicovideo
.jp
', post_url), None,
210 note='Performing MFA
', errnote='Unable to complete MFA
',
211 data=urlencode_postdata({
212 'otp
': self._get_tfa_info('6 digits code
'),
214 'Content
-Type
': 'application
/x
-www
-form
-urlencoded
',
216 if 'oneTimePw
' in page or 'formError
' in page:
217 err_msg = self._html_search_regex(
218 r'formError
["\']+>(.*?)</div>', page, 'form_error',
219 default='There\'s an error but the message can\'t be parsed.',
221 self.report_warning(f'Unable to log in: MFA challenge failed, "{err_msg}
"')
223 login_ok = 'class="notice error
"' not in page
225 self.report_warning('Unable to log in: bad username or password')
228 def _get_heartbeat_info(self, info_dict):
229 video_id, video_src_id, audio_src_id = info_dict['url'].split(':')[1].split('/')
230 dmc_protocol = info_dict['expected_protocol']
233 info_dict.get('_api_data')
235 self._html_search_regex(
236 'data-api-data="([^
"]+)"',
237 self._download_webpage('https
://www
.nicovideo
.jp
/watch
/' + video_id, video_id),
238 'API data
', default='{}'),
241 session_api_data = try_get(api_data, lambda x: x['media
']['delivery
']['movie
']['session
'])
242 session_api_endpoint = try_get(session_api_data, lambda x: x['urls
'][0])
245 tracking_id = traverse_obj(api_data, ('media
', 'delivery
', 'trackingId
'))
247 tracking_url = update_url_query('https
://nvapi
.nicovideo
.jp
/v1
/2ab0cbaa
/watch
', {'t
': tracking_id})
248 watch_request_response = self._download_json(
249 tracking_url, video_id,
250 note='Acquiring permission
for downloading video
', fatal=False,
251 headers=self._API_HEADERS)
252 if traverse_obj(watch_request_response, ('meta
', 'status
')) != 200:
253 self.report_warning('Failed to acquire permission
for playing video
. Video download may fail
.')
255 yesno = lambda x: 'yes
' if x else 'no
'
257 if dmc_protocol == 'http
':
259 protocol_parameters = {
260 'http_output_download_parameters
': {
261 'use_ssl
': yesno(session_api_data['urls
'][0]['isSsl
']),
262 'use_well_known_port
': yesno(session_api_data['urls
'][0]['isWellKnownPort
']),
265 elif dmc_protocol == 'hls
':
267 segment_duration = try_get(self._configuration_arg('segment_duration
'), lambda x: int(x[0])) or 6000
268 parsed_token = self._parse_json(session_api_data['token
'], video_id)
269 encryption = traverse_obj(api_data, ('media
', 'delivery
', 'encryption
'))
270 protocol_parameters = {
272 'segment_duration
': segment_duration,
273 'transfer_preset
': '',
274 'use_ssl
': yesno(session_api_data['urls
'][0]['isSsl
']),
275 'use_well_known_port
': yesno(session_api_data['urls
'][0]['isWellKnownPort
']),
278 if 'hls_encryption
' in parsed_token and encryption:
279 protocol_parameters['hls_parameters
']['encryption
'] = {
280 parsed_token['hls_encryption
']: {
281 'encrypted_key
': encryption['encryptedKey
'],
282 'key_uri
': encryption['keyUri
'],
286 protocol = 'm3u8_native
'
288 raise ExtractorError(f'Unsupported DMC protocol
: {dmc_protocol}
')
290 session_response = self._download_json(
291 session_api_endpoint['url
'], video_id,
292 query={'_format
': 'json
'},
293 headers={'Content
-Type
': 'application
/json
'},
294 note='Downloading JSON metadata
for {}'.format(info_dict['format_id
']),
298 'player_id
': session_api_data.get('playerId
'),
301 'auth_type
': try_get(session_api_data, lambda x: x['authTypes
'][session_api_data['protocols
'][0]]),
302 'content_key_timeout
': session_api_data.get('contentKeyTimeout
'),
303 'service_id
': 'nicovideo
',
304 'service_user_id
': session_api_data.get('serviceUserId
'),
306 'content_id
': session_api_data.get('contentId
'),
307 'content_src_id_sets
': [{
308 'content_src_ids
': [{
310 'audio_src_ids
': [audio_src_id],
311 'video_src_ids
': [video_src_id],
315 'content_type
': 'movie
',
319 'lifetime
': session_api_data.get('heartbeatLifetime
'),
322 'priority
': session_api_data['priority
'],
327 'parameters
': protocol_parameters,
331 'recipe_id
': session_api_data.get('recipeId
'),
332 'session_operation_auth
': {
333 'session_operation_auth_by_signature
': {
334 'signature
': session_api_data.get('signature
'),
335 'token
': session_api_data.get('token
'),
338 'timing_constraint
': 'unlimited
',
342 info_dict['url
'] = session_response['data
']['session
']['content_uri
']
343 info_dict['protocol
'] = protocol
346 heartbeat_info_dict = {
347 'url
': session_api_endpoint['url
'] + '/' + session_response['data
']['session
']['id'] + '?_format
=json
&_method
=PUT
',
348 'data
': json.dumps(session_response['data
']),
349 # interval, convert milliseconds to seconds, then halve to make a buffer.
350 'interval
': float_or_none(session_api_data.get('heartbeatLifetime
'), scale=3000),
354 return info_dict, heartbeat_info_dict
356 def _extract_format_for_quality(self, video_id, audio_quality, video_quality, dmc_protocol):
358 if not audio_quality.get('isAvailable
') or not video_quality.get('isAvailable
'):
361 format_id = '-'.join(
362 [remove_start(s['id'], 'archive_
') for s in (video_quality, audio_quality)] + [dmc_protocol])
364 vid_qual_label = traverse_obj(video_quality, ('metadata
', 'label
'))
367 'url
': 'niconico_dmc
:{}/{}/{}'.format(video_id, video_quality['id'], audio_quality['id']),
368 'format_id
': format_id,
369 'format_note
': join_nonempty('DMC
', vid_qual_label, dmc_protocol.upper(), delim=' '),
370 'ext
': 'mp4
', # Session API are used in HTML5, which always serves mp4
373 **traverse_obj(audio_quality, ('metadata
', {
374 'abr
': ('bitrate
', {float_or_none(scale=1000)}),
375 'asr
': ('samplingRate
', {int_or_none}),
377 **traverse_obj(video_quality, ('metadata
', {
378 'vbr
': ('bitrate
', {float_or_none(scale=1000)}),
379 'height
': ('resolution
', 'height
', {int_or_none}),
380 'width
': ('resolution
', 'width
', {int_or_none}),
382 'quality
': -2 if 'low
' in video_quality['id'] else None,
383 'protocol
': 'niconico_dmc
',
384 'expected_protocol
': dmc_protocol, # XXX: This is not a documented field
386 'Origin
': 'https
://www
.nicovideo
.jp
',
387 'Referer
': 'https
://www
.nicovideo
.jp
/watch
/' + video_id,
391 def _yield_dmc_formats(self, api_data, video_id):
392 dmc_data = traverse_obj(api_data, ('media
', 'delivery
', 'movie
'))
393 audios = traverse_obj(dmc_data, ('audios
', ..., {dict}))
394 videos = traverse_obj(dmc_data, ('videos
', ..., {dict}))
395 protocols = traverse_obj(dmc_data, ('session
', 'protocols
', ..., {str}))
396 if not all((audios, videos, protocols)):
399 for audio_quality, video_quality, protocol in itertools.product(audios, videos, protocols):
400 if fmt := self._extract_format_for_quality(video_id, audio_quality, video_quality, protocol):
403 def _yield_dms_formats(self, api_data, video_id):
404 fmt_filter = lambda _, v: v['isAvailable
'] and v['id']
405 videos = traverse_obj(api_data, ('media
', 'domand
', 'videos
', fmt_filter))
406 audios = traverse_obj(api_data, ('media
', 'domand
', 'audios
', fmt_filter))
407 access_key = traverse_obj(api_data, ('media
', 'domand
', 'accessRightKey
', {str}))
408 track_id = traverse_obj(api_data, ('client
', 'watchTrackId
', {str}))
409 if not all((videos, audios, access_key, track_id)):
412 dms_m3u8_url = self._download_json(
413 f'https
://nvapi
.nicovideo
.jp
/v1
/watch
/{video_id}
/access
-rights
/hls
', video_id,
415 'outputs
': list(itertools.product((v['id'] for v in videos), (a['id'] for a in audios))),
416 }).encode(), query={'actionTrackId
': track_id}, headers={
417 'x
-access
-right
-key
': access_key,
419 'x
-frontend
-version
': 0,
420 'x
-request
-with
': 'https
://www
.nicovideo
.jp
',
421 })['data
']['contentUrl
']
422 # Getting all audio formats results in duplicate video formats which we filter out later
423 dms_fmts = self._extract_m3u8_formats(dms_m3u8_url, video_id, 'mp4
')
425 # m3u8 extraction does not provide audio bitrates, so extract from the API data and fix
426 for audio_fmt in traverse_obj(dms_fmts, lambda _, v: v['vcodec
'] == 'none
'):
429 **traverse_obj(audios, (lambda _, v: audio_fmt['format_id
'].startswith(v['id']), {
430 'format_id
': ('id', {str}),
431 'abr
': ('bitRate
', {float_or_none(scale=1000)}),
432 'asr
': ('samplingRate
', {int_or_none}),
437 # Sort before removing dupes to keep the format dicts with the lowest tbr
438 video_fmts = sorted((fmt for fmt in dms_fmts if fmt['vcodec
'] != 'none
'), key=lambda f: f['tbr
'])
439 self._remove_duplicate_formats(video_fmts)
440 # Calculate the true vbr/tbr by subtracting the lowest abr
441 min_abr = min(traverse_obj(audios, (..., 'bitRate
', {float_or_none})), default=0) / 1000
442 for video_fmt in video_fmts:
443 video_fmt['tbr
'] -= min_abr
444 video_fmt['format_id
'] = f'video
-{video_fmt
["tbr"]:.0f
}'
447 def _real_extract(self, url):
448 video_id = self._match_id(url)
451 webpage, handle = self._download_webpage_handle(
452 'https
://www
.nicovideo
.jp
/watch
/' + video_id, video_id)
453 if video_id.startswith('so
'):
454 video_id = self._match_id(handle.url)
456 api_data = traverse_obj(
457 self._parse_json(self._html_search_meta('server
-response
', webpage) or '', video_id),
458 ('data
', 'response
', {dict}))
460 raise ExtractorError('Server response data
not found
')
461 except ExtractorError as e:
463 api_data = self._download_json(
464 f'https
://www
.nicovideo
.jp
/api
/watch
/v3
/{video_id}?_frontendId
=6&_frontendVersion
=0&actionTrackId
=AAAAAAAAAA_
{round(time
.time() * 1000)}', video_id,
465 note='Downloading API JSON
', errnote='Unable to fetch data
')['data
']
466 except ExtractorError:
467 if not isinstance(e.cause, HTTPError):
469 webpage = e.cause.response.read().decode('utf
-8', 'replace
')
470 error_msg = self._html_search_regex(
471 r'(?s
)<section\s
+class="(?:(?:ErrorMessage|WatchExceptionPage-message)\s*)+">(.+?
)</section
>',
472 webpage, 'error reason
', default=None)
475 raise ExtractorError(clean_html(error_msg), expected=True)
477 availability = self._availability(**(traverse_obj(api_data, ('payment
', 'video
', {
478 'needs_premium
': ('isPremium
', {bool}),
479 'needs_subscription
': ('isAdmission
', {bool}),
480 })) or {'needs_auth
': True}))
481 formats = [*self._yield_dmc_formats(api_data, video_id),
482 *self._yield_dms_formats(api_data, video_id)]
484 fail_msg = clean_html(self._html_search_regex(
485 r'<p
[^
>]+\bclass
="fail-message"[^
>]*>(?P
<msg
>.+?
)</p
>',
486 webpage, 'fail message
', default=None, group='msg
'))
488 self.to_screen(f'Niconico said
: {fail_msg}
')
489 if fail_msg and 'された地域と同じ地域からのみ視聴できます。
' in fail_msg:
491 self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True)
492 elif availability == 'premium_only
':
493 self.raise_login_required('This video requires premium
', metadata_available=True)
494 elif availability == 'subscriber_only
':
495 self.raise_login_required('This video
is for members only
', metadata_available=True)
496 elif availability == 'needs_auth
':
497 self.raise_login_required(metadata_available=False)
499 # Start extracting information
502 # use og:video:tag (not logged in)
503 og_video_tags = re.finditer(r'<meta\s
+property="og:video:tag"\s
*content
="(.*?)">', webpage)
504 tags = list(filter(None, (clean_html(x.group(1)) for x in og_video_tags)))
506 # use keywords and split with comma (not logged in)
507 kwds = self._html_search_meta('keywords
', webpage, default=None)
509 tags = [x for x in kwds.split(',') if x]
511 # find in json (logged in)
512 tags = traverse_obj(api_data, ('tag
', 'items
', ..., 'name
'))
514 thumb_prefs = qualities(['url
', 'middleUrl
', 'largeUrl
', 'player
', 'ogp
'])
516 def get_video_info(*items, get_first=True, **kwargs):
517 return traverse_obj(api_data, ('video
', *items), get_all=not get_first, **kwargs)
521 '_api_data
': api_data,
522 'title
': get_video_info(('originalTitle
', 'title
')) or self._og_search_title(webpage, default=None),
524 'availability
': availability,
529 'preference
': thumb_prefs(key),
530 **parse_resolution(url, lenient=True),
531 } for key, url in (get_video_info('thumbnail
') or {}).items() if url],
532 'description
': clean_html(get_video_info('description
')),
533 'uploader
': traverse_obj(api_data, ('owner
', 'nickname
'), ('channel
', 'name
'), ('community
', 'name
')),
534 'uploader_id
': str_or_none(traverse_obj(api_data, ('owner
', 'id'), ('channel
', 'id'), ('community
', 'id'))),
535 'timestamp
': parse_iso8601(get_video_info('registeredAt
')) or parse_iso8601(
536 self._html_search_meta('video
:release_date
', webpage, 'date published
', default=None)),
537 'channel
': traverse_obj(api_data, ('channel
', 'name
'), ('community
', 'name
')),
538 'channel_id
': traverse_obj(api_data, ('channel
', 'id'), ('community
', 'id')),
539 'view_count
': int_or_none(get_video_info('count
', 'view
')),
541 'genre
': traverse_obj(api_data, ('genre
', 'label
'), ('genre
', 'key
')),
542 'comment_count
': get_video_info('count
', 'comment
', expected_type=int),
544 parse_duration(self._html_search_meta('video
:duration
', webpage, 'video duration
', default=None))
545 or get_video_info('duration
')),
546 'webpage_url
': url_or_none(url) or f'https
://www
.nicovideo
.jp
/watch
/{video_id}
',
547 'subtitles
': self.extract_subtitles(video_id, api_data),
550 def _get_subtitles(self, video_id, api_data):
551 comments_info = traverse_obj(api_data, ('comment
', 'nvComment
', {dict})) or {}
552 if not comments_info.get('server
'):
555 danmaku = traverse_obj(self._download_json(
556 f'{comments_info
["server"]}/v1
/threads
', video_id, data=json.dumps({
558 'params
': comments_info.get('params
'),
559 'threadKey
': comments_info.get('threadKey
'),
560 }).encode(), fatal=False,
562 'Referer
': 'https
://www
.nicovideo
.jp
/',
563 'Origin
': 'https
://www
.nicovideo
.jp
',
564 'Content
-Type
': 'text
/plain
;charset
=UTF
-8',
565 'x
-client
-os
-type': 'others
',
566 'x
-frontend
-id': '6',
567 'x
-frontend
-version
': '0',
569 note='Downloading comments
', errnote='Failed to download comments
'),
570 ('data
', 'threads
', ..., 'comments
', ...))
575 'data
': json.dumps(danmaku),
580 class NiconicoPlaylistBaseIE(InfoExtractor):
584 'X
-Frontend
-ID
': '6',
585 'X
-Frontend
-Version
': '0',
586 'X
-Niconico
-Language
': 'en
-us
',
589 def _call_api(self, list_id, resource, query):
590 raise NotImplementedError('Must be implemented
in subclasses
')
593 def _parse_owner(item):
595 'uploader
': traverse_obj(item, ('owner
', 'name
')),
596 'uploader_id
': traverse_obj(item, ('owner
', 'id')),
599 def _fetch_page(self, list_id, page):
601 resp = self._call_api(list_id, f'page {page}
', {
603 'pageSize
': self._PAGE_SIZE,
605 # this is needed to support both mylist and user
606 for video in traverse_obj(resp, ('items
', ..., ('video
', None))) or []:
607 video_id = video.get('id')
609 # skip {"video": {"id": "blablabla", ...}}
611 count = video.get('count
') or {}
612 get_count = lambda x: int_or_none(count.get(x))
616 'title
': video.get('title
'),
617 'url
': f'https
://www
.nicovideo
.jp
/watch
/{video_id}
',
618 'description
': video.get('shortDescription
'),
619 'duration
': int_or_none(video.get('duration
')),
620 'view_count
': get_count('view
'),
621 'comment_count
': get_count('comment
'),
622 'thumbnail
': traverse_obj(video, ('thumbnail
', ('nHdUrl
', 'largeUrl
', 'listingUrl
', 'url
'))),
623 'ie_key
': NiconicoIE.ie_key(),
624 **self._parse_owner(video),
627 def _entries(self, list_id):
628 return OnDemandPagedList(functools.partial(self._fetch_page, list_id), self._PAGE_SIZE)
631 class NiconicoPlaylistIE(NiconicoPlaylistBaseIE):
632 IE_NAME = 'niconico
:playlist
'
633 _VALID_URL = r'https?
://(?
:(?
:www\
.|sp\
.)?nicovideo\
.jp|nico\
.ms
)/(?
:user
/\d
+/)?
(?
:my
/)?mylist
/(?
:#/)?(?P<id>\d+)'
636 'url': 'http://www.nicovideo.jp/mylist/27411728',
639 'title': 'AKB48のオールナイトニッポン',
640 'description': 'md5:d89694c5ded4b6c693dea2db6e41aa08',
642 'uploader_id': '805442',
644 'playlist_mincount': 291,
646 'url': 'https://www.nicovideo.jp/user/805442/mylist/27411728',
647 'only_matching': True,
649 'url': 'https://www.nicovideo.jp/my/mylist/#/68048635',
650 'only_matching': True,
653 def _call_api(self
, list_id
, resource
, query
):
654 return self
._download
_json
(
655 f
'https://nvapi.nicovideo.jp/v2/mylists/{list_id}', list_id
,
656 f
'Downloading {resource}', query
=query
,
657 headers
=self
._API
_HEADERS
)['data']['mylist']
659 def _real_extract(self
, url
):
660 list_id
= self
._match
_id
(url
)
661 mylist
= self
._call
_api
(list_id
, 'list', {
664 return self
.playlist_result(
665 self
._entries
(list_id
), list_id
,
666 mylist
.get('name'), mylist
.get('description'), **self
._parse
_owner
(mylist
))
669 class NiconicoSeriesIE(InfoExtractor
):
670 IE_NAME
= 'niconico:series'
671 _VALID_URL
= r
'https?://(?:(?:www\.|sp\.)?nicovideo\.jp(?:/user/\d+)?|nico\.ms)/series/(?P<id>\d+)'
674 'url': 'https://www.nicovideo.jp/user/44113208/series/110226',
677 'title': 'ご立派ァ!のシリーズ',
679 'playlist_mincount': 10,
681 'url': 'https://www.nicovideo.jp/series/12312/',
684 'title': 'バトルスピリッツ お勧めカード紹介(調整中)',
686 'playlist_mincount': 103,
688 'url': 'https://nico.ms/series/203559',
689 'only_matching': True,
692 def _real_extract(self
, url
):
693 list_id
= self
._match
_id
(url
)
694 webpage
= self
._download
_webpage
(url
, list_id
)
696 title
= self
._search
_regex
(
698 r
'<div class="TwitterShareButton"\s+data-text="(.+)\s+https:'),
699 webpage
, 'title', fatal
=False)
701 title
= unescapeHTML(title
)
702 json_data
= next(self
._yield
_json
_ld
(webpage
, None, fatal
=False))
703 return self
.playlist_from_matches(
704 traverse_obj(json_data
, ('itemListElement', ..., 'url')), list_id
, title
, ie
=NiconicoIE
)
707 class NiconicoHistoryIE(NiconicoPlaylistBaseIE
):
708 IE_NAME
= 'niconico:history'
709 IE_DESC
= 'NicoNico user history or likes. Requires cookies.'
710 _VALID_URL
= r
'https?://(?:www\.|sp\.)?nicovideo\.jp/my/(?P<id>history(?:/like)?)'
713 'note': 'PC page, with /video',
714 'url': 'https://www.nicovideo.jp/my/history/video',
715 'only_matching': True,
717 'note': 'PC page, without /video',
718 'url': 'https://www.nicovideo.jp/my/history',
719 'only_matching': True,
721 'note': 'mobile page, with /video',
722 'url': 'https://sp.nicovideo.jp/my/history/video',
723 'only_matching': True,
725 'note': 'mobile page, without /video',
726 'url': 'https://sp.nicovideo.jp/my/history',
727 'only_matching': True,
730 'url': 'https://www.nicovideo.jp/my/history/like',
731 'only_matching': True,
733 'note': 'Mobile page',
734 'url': 'https://sp.nicovideo.jp/my/history/like',
735 'only_matching': True,
738 def _call_api(self
, list_id
, resource
, query
):
739 path
= 'likes' if list_id
== 'history/like' else 'watch/history'
740 return self
._download
_json
(
741 f
'https://nvapi.nicovideo.jp/v1/users/me/{path}', list_id
,
742 f
'Downloading {resource}', query
=query
, headers
=self
._API
_HEADERS
)['data']
744 def _real_extract(self
, url
):
745 list_id
= self
._match
_id
(url
)
747 mylist
= self
._call
_api
(list_id
, 'list', {'pageSize': 1})
748 except ExtractorError
as e
:
749 if isinstance(e
.cause
, HTTPError
) and e
.cause
.status
== 401:
750 self
.raise_login_required('You have to be logged in to get your history')
752 return self
.playlist_result(self
._entries
(list_id
), list_id
, **self
._parse
_owner
(mylist
))
755 class NicovideoSearchBaseIE(InfoExtractor
):
756 _SEARCH_TYPE
= 'search'
758 def _entries(self
, url
, item_id
, query
=None, note
='Downloading page %(page)s'):
760 pages
= [query
['page']] if 'page' in query
else itertools
.count(1)
761 for page_num
in pages
:
762 query
['page'] = str(page_num
)
763 webpage
= self
._download
_webpage
(url
, item_id
, query
=query
, note
=note
% {'page': page_num
})
764 results
= re
.findall(r
'(?<=data-video-id=)["\']?
(?P
<videoid
>.*?
)(?
=["\'])', webpage)
766 yield self.url_result(f'https://www.nicovideo.jp/watch/{item}', 'Niconico', item)
770 def _search_results(self, query):
771 return self._entries(
772 self._proto_relative_url(f'//www.nicovideo.jp/{self._SEARCH_TYPE}/{query}'), query)
775 class NicovideoSearchIE(NicovideoSearchBaseIE, SearchInfoExtractor):
776 IE_DESC = 'Nico video search'
777 IE_NAME = 'nicovideo:search'
778 _SEARCH_KEY = 'nicosearch'
781 class NicovideoSearchURLIE(NicovideoSearchBaseIE):
782 IE_NAME = f'{NicovideoSearchIE.IE_NAME}_url'
783 IE_DESC = 'Nico video search URLs'
784 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/search/(?P<id>[^?#&]+)?'
786 'url': 'http://www.nicovideo.jp/search/sm9',
791 'playlist_mincount': 40,
793 'url': 'https://www.nicovideo.jp/search/sm9?sort=h&order=d&end=2020-12-31&start=2020-01-01',
798 'playlist_count': 31,
801 def _real_extract(self, url):
802 query = self._match_id(url)
803 return self.playlist_result(self._entries(url, query), query, query)
806 class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor):
807 IE_DESC = 'Nico video search, newest first'
808 IE_NAME = f'{NicovideoSearchIE.IE_NAME}:date'
809 _SEARCH_KEY = 'nicosearchdate'
811 'url': 'nicosearchdateall:a',
816 'playlist_mincount': 1610,
819 _START_DATE = dt.date(2007, 1, 1)
820 _RESULTS_PER_PAGE = 32
823 def _entries(self, url, item_id, start_date=None, end_date=None):
824 start_date, end_date = start_date or self._START_DATE, end_date or dt.datetime.now().date()
826 # If the last page has a full page of videos, we need to break down the query interval further
827 last_page_len = len(list(self._get_entries_for_date(
828 url, item_id, start_date, end_date, self._MAX_PAGES,
829 note=f'Checking number of videos from {start_date} to {end_date}')))
830 if (last_page_len == self._RESULTS_PER_PAGE and start_date != end_date):
831 midpoint = start_date + ((end_date - start_date) // 2)
832 yield from self._entries(url, item_id, midpoint, end_date)
833 yield from self._entries(url, item_id, start_date, midpoint)
835 self.to_screen(f'{item_id}: Downloading results from {start_date} to {end_date}')
836 yield from self._get_entries_for_date(
837 url, item_id, start_date, end_date, note=' Downloading page %(page)s')
839 def _get_entries_for_date(self, url, item_id, start_date, end_date=None, page_num=None, note=None):
841 'start': str(start_date),
842 'end': str(end_date or start_date),
847 query['page'] = str(page_num)
849 yield from super()._entries(url, item_id, query=query, note=note)
852 class NicovideoTagURLIE(NicovideoSearchBaseIE):
853 IE_NAME = 'niconico:tag'
854 IE_DESC = 'NicoNico video tag URLs'
856 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/tag/(?P<id>[^?#&]+)?'
858 'url': 'https://www.nicovideo.jp/tag/ドキュメンタリー淫夢',
861 'title': 'ドキュメンタリー淫夢',
863 'playlist_mincount': 400,
866 def _real_extract(self, url):
867 query = self._match_id(url)
868 return self.playlist_result(self._entries(url, query), query, query)
871 class NiconicoUserIE(InfoExtractor):
872 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/user/(?P<id>\d+)(?:/video)?/?(?:$|[#?])'
874 'url': 'https://www.nicovideo.jp/user/419948',
878 'playlist_mincount': 101,
880 _API_URL = 'https://nvapi.nicovideo.jp/v2/users/%s/videos?sortKey=registeredAt&sortOrder=desc&pageSize=%s&page=%s'
884 'X-Frontend-ID': '6',
885 'X-Frontend-Version': '0',
888 def _entries(self, list_id):
891 while count < total_count:
892 json_parsed = self._download_json(
893 self._API_URL % (list_id, self._PAGE_SIZE, page_num + 1), list_id,
894 headers=self._API_HEADERS,
895 note='Downloading JSON metadata%s' % (f' page {page_num}' if page_num else ''))
897 total_count = int_or_none(json_parsed['data'].get('totalCount'))
898 for entry in json_parsed['data']['items']:
900 yield self.url_result(
901 f'https://www.nicovideo.jp/watch/{entry["essential
"]["id"]}', ie=NiconicoIE)
904 def _real_extract(self, url):
905 list_id = self._match_id(url)
906 return self.playlist_result(self._entries(list_id), list_id)
909 class NiconicoLiveIE(InfoExtractor):
910 IE_NAME = 'niconico:live'
912 _VALID_URL = r'https?://(?:sp\.)?live2?\.nicovideo\.jp/(?:watch|gate)/(?P<id>lv\d+)'
914 'note': 'this test case includes invisible characters for title, pasting them as-is',
915 'url': 'https://live.nicovideo.jp/watch/lv339533123',
918 'title': '激辛ペヤング食べます\u202a( ;ᯅ; )\u202c(歌枠オーディション参加中)',
920 'comment_count': 1772,
921 'description': '初めましてもかって言います❕\nのんびり自由に適当に暮らしてます',
923 'channel': 'ゲストさんのコミュニティ',
924 'channel_id': 'co5776900',
925 'channel_url': 'https://com.nicovideo.jp/community/co5776900',
926 'timestamp': 1670677328,
929 'skip': 'livestream',
931 'url': 'https://live2.nicovideo.jp/watch/lv339533123',
932 'only_matching': True,
934 'url': 'https://sp.live.nicovideo.jp/watch/lv339533123',
935 'only_matching': True,
937 'url': 'https://sp.live2.nicovideo.jp/watch/lv339533123',
938 'only_matching': True,
941 _KNOWN_LATENCY = ('high', 'low')
943 def _real_extract(self, url):
944 video_id = self._match_id(url)
945 webpage, urlh = self._download_webpage_handle(f'https://live.nicovideo.jp/watch/{video_id}', video_id)
947 embedded_data = self._parse_json(unescapeHTML(self._search_regex(
948 r'<script\s+id="embedded
-data
"\s*data-props="(.+?
)"', webpage, 'embedded data')), video_id)
950 ws_url = traverse_obj(embedded_data, ('site', 'relive', 'webSocketUrl'))
952 raise ExtractorError('The live hasn\'t started yet or already ended.', expected=True)
953 ws_url = update_url_query(ws_url, {
954 'frontend_id': traverse_obj(embedded_data, ('site', 'frontendId')) or '9',
957 hostname = remove_start(urllib.parse.urlparse(urlh.url).hostname, 'sp.')
958 latency = try_get(self._configuration_arg('latency'), lambda x: x[0])
959 if latency not in self._KNOWN_LATENCY:
962 ws = self._request_webpage(
963 Request(ws_url, headers={'Origin': f'https://{hostname}'}),
964 video_id=video_id, note='Connecting to WebSocket server')
966 self.write_debug('[debug] Sending HLS server request')
968 'type': 'startWatching',
972 'protocol': 'hls+fmp4',
977 'protocol': 'webSocket',
988 data = json.loads(recv)
989 if not isinstance(data, dict):
991 if data.get('type') == 'stream':
992 m3u8_url = data['data']['uri']
993 qualities = data['data']['availableQualities']
995 elif data.get('type') == 'disconnect':
996 self.write_debug(recv)
997 raise ExtractorError('Disconnected at middle of extraction')
998 elif data.get('type') == 'error':
999 self.write_debug(recv)
1000 message = traverse_obj(data, ('body', 'code')) or recv
1001 raise ExtractorError(message)
1002 elif self.get_param('verbose', False):
1004 recv = recv[:100] + '...'
1005 self.write_debug(f'Server said: {recv}')
1007 title = traverse_obj(embedded_data, ('program', 'title')) or self._html_search_meta(
1008 ('og:title', 'twitter:title'), webpage, 'live title', fatal=False)
1010 raw_thumbs = traverse_obj(embedded_data, ('program', 'thumbnail')) or {}
1012 for name, value in raw_thumbs.items():
1013 if not isinstance(value, dict):
1017 **parse_resolution(value, lenient=True),
1021 for k, img_url in value.items():
1022 res = parse_resolution(k, lenient=True) or parse_resolution(img_url, lenient=True)
1023 width, height = res.get('width'), res.get('height')
1026 'id': f'{name}_{width}x{height}',
1031 formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', live=True)
1032 for fmt, q in zip(formats, reversed(qualities[1:])):
1035 'protocol': 'niconico_live',
1037 'video_id': video_id,
1038 'live_latency': latency,
1045 **traverse_obj(embedded_data, {
1046 'view_count': ('program', 'statistics', 'watchCount'),
1047 'comment_count': ('program', 'statistics', 'commentCount'),
1048 'uploader': ('program', 'supplier', 'name'),
1049 'channel': ('socialGroup', 'name'),
1050 'channel_id': ('socialGroup', 'id'),
1051 'channel_url': ('socialGroup', 'socialGroupPageUrl'),
1053 'description': clean_html(traverse_obj(embedded_data, ('program', 'description'))),
1054 'timestamp': int_or_none(traverse_obj(embedded_data, ('program', 'openTime'))),
1056 'thumbnails': thumbnails,