[ie/wistia] Support password-protected videos (#11100)
[yt-dlp3.git] / yt_dlp / extractor / theplatform.py
blob7c1769c2dfd0def159c69ce79e4da8e3f1bc6a7f
1 import hashlib
2 import hmac
3 import re
4 import time
6 from .adobepass import AdobePassIE
7 from .once import OnceIE
8 from ..networking import HEADRequest, Request
9 from ..utils import (
10 ExtractorError,
11 determine_ext,
12 find_xpath_attr,
13 float_or_none,
14 int_or_none,
15 mimetype2ext,
16 parse_qs,
17 traverse_obj,
18 unsmuggle_url,
19 update_url,
20 update_url_query,
21 urlhandle_detect_ext,
22 xpath_with_ns,
25 default_ns = 'http://www.w3.org/2005/SMIL21/Language'
26 _x = lambda p: xpath_with_ns(p, {'smil': default_ns})
29 class ThePlatformBaseIE(OnceIE):
30 _TP_TLD = 'com'
32 def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'):
33 meta = self._download_xml(
34 smil_url, video_id, note=note, query={'format': 'SMIL'},
35 headers=self.geo_verification_headers())
36 error_element = find_xpath_attr(meta, _x('.//smil:ref'), 'src')
37 if error_element is not None:
38 exception = find_xpath_attr(
39 error_element, _x('.//smil:param'), 'name', 'exception')
40 if exception is not None:
41 if exception.get('value') == 'GeoLocationBlocked':
42 self.raise_geo_restricted(error_element.attrib['abstract'])
43 elif error_element.attrib['src'].startswith(
44 f'http://link.theplatform.{self._TP_TLD}/s/errorFiles/Unavailable.'):
45 raise ExtractorError(
46 error_element.attrib['abstract'], expected=True)
48 smil_formats, subtitles = self._parse_smil_formats_and_subtitles(
49 meta, smil_url, video_id, namespace=default_ns,
50 # the parameters are from syfy.com, other sites may use others,
51 # they also work for nbc.com
52 f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'},
53 transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src))
55 formats = []
56 for _format in smil_formats:
57 if OnceIE.suitable(_format['url']):
58 formats.extend(self._extract_once_formats(_format['url']))
59 else:
60 media_url = _format['url']
61 if determine_ext(media_url) == 'm3u8':
62 hdnea2 = self._get_cookies(media_url).get('hdnea2')
63 if hdnea2:
64 _format['url'] = update_url_query(media_url, {'hdnea3': hdnea2.value})
66 formats.append(_format)
68 return formats, subtitles
70 def _download_theplatform_metadata(self, path, video_id):
71 info_url = f'http://link.theplatform.{self._TP_TLD}/s/{path}?format=preview'
72 return self._download_json(info_url, video_id)
74 def _parse_theplatform_metadata(self, info):
75 subtitles = {}
76 captions = info.get('captions')
77 if isinstance(captions, list):
78 for caption in captions:
79 lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
80 subtitles.setdefault(lang, []).append({
81 'ext': mimetype2ext(mime),
82 'url': src,
85 duration = info.get('duration')
86 tp_chapters = info.get('chapters', [])
87 chapters = []
88 if tp_chapters:
89 def _add_chapter(start_time, end_time):
90 start_time = float_or_none(start_time, 1000)
91 end_time = float_or_none(end_time, 1000)
92 if start_time is None or end_time is None:
93 return
94 chapters.append({
95 'start_time': start_time,
96 'end_time': end_time,
99 for chapter in tp_chapters[:-1]:
100 _add_chapter(chapter.get('startTime'), chapter.get('endTime'))
101 _add_chapter(tp_chapters[-1].get('startTime'), tp_chapters[-1].get('endTime') or duration)
103 def extract_site_specific_field(field):
104 # A number of sites have custom-prefixed keys, e.g. 'cbc$seasonNumber'
105 return traverse_obj(info, lambda k, v: v and k.endswith(f'${field}'), get_all=False)
107 return {
108 'title': info['title'],
109 'subtitles': subtitles,
110 'description': info['description'],
111 'thumbnail': info['defaultThumbnailUrl'],
112 'duration': float_or_none(duration, 1000),
113 'timestamp': int_or_none(info.get('pubDate'), 1000) or None,
114 'uploader': info.get('billingCode'),
115 'chapters': chapters,
116 'creator': traverse_obj(info, ('author', {str})) or None,
117 'categories': traverse_obj(info, (
118 'categories', lambda _, v: v.get('label') in ('category', None), 'name', {str})) or None,
119 'tags': traverse_obj(info, ('keywords', {lambda x: re.split(r'[;,]\s?', x) if x else None})),
120 'location': extract_site_specific_field('region'),
121 'series': extract_site_specific_field('show'),
122 'season_number': int_or_none(extract_site_specific_field('seasonNumber')),
123 'media_type': extract_site_specific_field('programmingType') or extract_site_specific_field('type'),
126 def _extract_theplatform_metadata(self, path, video_id):
127 info = self._download_theplatform_metadata(path, video_id)
128 return self._parse_theplatform_metadata(info)
131 class ThePlatformIE(ThePlatformBaseIE, AdobePassIE):
132 _VALID_URL = r'''(?x)
133 (?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
134 (?:(?:(?:[^/]+/)+select/)?(?P<media>media/(?:guid/\d+/)?)?|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
135 |theplatform:)(?P<id>[^/\?&]+)'''
136 _EMBED_REGEX = [
137 r'''(?x)
138 <meta\s+
139 property=(["'])(?:og:video(?::(?:secure_)?url)?|twitter:player)\1\s+
140 content=(["'])(?P<url>https?://player\.theplatform\.com/p/.+?)\2''',
141 r'(?s)<(?:iframe|script)[^>]+src=(["\'])(?P<url>(?:https?:)?//player\.theplatform\.com/p/.+?)\1',
144 _TESTS = [{
145 # from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/
146 'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true',
147 'info_dict': {
148 'id': 'e9I_cZgTgIPd',
149 'ext': 'flv',
150 'title': 'Blackberry\'s big, bold Z30',
151 'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.',
152 'duration': 247,
153 'timestamp': 1383239700,
154 'upload_date': '20131031',
155 'uploader': 'CBSI-NEW',
157 'params': {
158 # rtmp download
159 'skip_download': True,
161 'skip': '404 Not Found',
162 }, {
163 # from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/
164 'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT',
165 'info_dict': {
166 'id': '22d_qsQ6MIRT',
167 'ext': 'flv',
168 'description': 'md5:ac330c9258c04f9d7512cf26b9595409',
169 'title': 'Tesla Model S: A second step towards a cleaner motoring future',
170 'timestamp': 1426176191,
171 'upload_date': '20150312',
172 'uploader': 'CBSI-NEW',
174 'params': {
175 # rtmp download
176 'skip_download': True,
178 'skip': 'CNet no longer uses ThePlatform',
179 }, {
180 'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD',
181 'info_dict': {
182 'id': 'yMBg9E8KFxZD',
183 'ext': 'mp4',
184 'description': 'md5:644ad9188d655b742f942bf2e06b002d',
185 'title': 'HIGHLIGHTS: USA bag first ever series Cup win',
186 'uploader': 'EGSM',
188 'skip': 'Dead link',
189 }, {
190 'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7',
191 'only_matching': True,
192 }, {
193 'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701',
194 'md5': 'fb96bb3d85118930a5b055783a3bd992',
195 'info_dict': {
196 'id': 'tdy_or_siri_150701',
197 'ext': 'mp4',
198 'title': 'iPhone Siri’s sassy response to a math question has people talking',
199 'description': 'md5:a565d1deadd5086f3331d57298ec6333',
200 'duration': 83.0,
201 'thumbnail': r're:^https?://.*\.jpg$',
202 'timestamp': 1435752600,
203 'upload_date': '20150701',
204 'uploader': 'NBCU-NEWS',
206 'skip': 'Error: Player PID "nbcNewsOffsite" is disabled',
207 }, {
208 # From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1
209 # geo-restricted (US), HLS encrypted with AES-128
210 'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781',
211 'only_matching': True,
214 @classmethod
215 def _extract_embed_urls(cls, url, webpage):
216 # Are whitespaces ignored in URLs?
217 # https://github.com/ytdl-org/youtube-dl/issues/12044
218 for embed_url in super()._extract_embed_urls(url, webpage):
219 yield re.sub(r'\s', '', embed_url)
221 @staticmethod
222 def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False):
223 flags = '10' if include_qs else '00'
224 expiration_date = '%x' % (int(time.time()) + life)
226 def str_to_hex(str_data):
227 return str_data.encode('ascii').hex()
229 relative_path = re.match(r'https?://link\.theplatform\.com/s/([^?]+)', url).group(1)
230 clear_text = bytes.fromhex(flags + expiration_date + str_to_hex(relative_path))
231 checksum = hmac.new(sig_key.encode('ascii'), clear_text, hashlib.sha1).hexdigest()
232 sig = flags + expiration_date + checksum + str_to_hex(sig_secret)
233 return f'{url}&sig={sig}'
235 def _real_extract(self, url):
236 url, smuggled_data = unsmuggle_url(url, {})
237 self._initialize_geo_bypass({
238 'countries': smuggled_data.get('geo_countries'),
241 mobj = self._match_valid_url(url)
242 provider_id = mobj.group('provider_id')
243 video_id = mobj.group('id')
245 if not provider_id:
246 provider_id = 'dJ5BDC'
248 path = provider_id + '/'
249 if mobj.group('media'):
250 path += mobj.group('media')
251 path += video_id
253 qs_dict = parse_qs(url)
254 if 'guid' in qs_dict:
255 webpage = self._download_webpage(url, video_id)
256 scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
257 feed_id = None
258 # feed id usually locates in the last script.
259 # Seems there's no pattern for the interested script filename, so
260 # I try one by one
261 for script in reversed(scripts):
262 feed_script = self._download_webpage(
263 self._proto_relative_url(script, 'http:'),
264 video_id, 'Downloading feed script')
265 feed_id = self._search_regex(
266 r'defaultFeedId\s*:\s*"([^"]+)"', feed_script,
267 'default feed id', default=None)
268 if feed_id is not None:
269 break
270 if feed_id is None:
271 raise ExtractorError('Unable to find feed id')
272 return self.url_result('http://feed.theplatform.com/f/{}/{}?byGuid={}'.format(
273 provider_id, feed_id, qs_dict['guid'][0]))
275 if smuggled_data.get('force_smil_url', False):
276 smil_url = url
277 # Explicitly specified SMIL (see https://github.com/ytdl-org/youtube-dl/issues/7385)
278 elif '/guid/' in url:
279 headers = {}
280 source_url = smuggled_data.get('source_url')
281 if source_url:
282 headers['Referer'] = source_url
283 request = Request(url, headers=headers)
284 webpage = self._download_webpage(request, video_id)
285 smil_url = self._search_regex(
286 r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml',
287 webpage, 'smil url', group='url')
288 path = self._search_regex(
289 r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path')
290 smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4'
291 elif mobj.group('config'):
292 config_url = url + '&form=json'
293 config_url = config_url.replace('swf/', 'config/')
294 config_url = config_url.replace('onsite/', 'onsite/config/')
295 config = self._download_json(config_url, video_id, 'Downloading config')
296 release_url = config.get('releaseUrl') or f'http://link.theplatform.com/s/{path}?mbr=true'
297 smil_url = release_url + '&formats=MPEG4&manifest=f4m'
298 else:
299 smil_url = f'http://link.theplatform.com/s/{path}?mbr=true'
301 sig = smuggled_data.get('sig')
302 if sig:
303 smil_url = self._sign_url(smil_url, sig['key'], sig['secret'])
305 formats, subtitles = self._extract_theplatform_smil(smil_url, video_id)
307 # With some sites, manifest URL must be forced to extract HLS formats
308 if not traverse_obj(formats, lambda _, v: v['format_id'].startswith('hls')):
309 m3u8_url = update_url(url, query='mbr=true&manifest=m3u', fragment=None)
310 urlh = self._request_webpage(
311 HEADRequest(m3u8_url), video_id, 'Checking for HLS formats', 'No HLS formats found', fatal=False)
312 if urlh and urlhandle_detect_ext(urlh) == 'm3u8':
313 m3u8_fmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles(
314 m3u8_url, video_id, m3u8_id='hls', fatal=False)
315 formats.extend(m3u8_fmts)
316 self._merge_subtitles(m3u8_subs, target=subtitles)
318 ret = self._extract_theplatform_metadata(path, video_id)
319 combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles)
320 ret.update({
321 'id': video_id,
322 'formats': formats,
323 'subtitles': combined_subtitles,
326 return ret
329 class ThePlatformFeedIE(ThePlatformBaseIE):
330 _URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&%s'
331 _VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*(?P<filter>by(?:Gui|I)d=(?P<id>[^&]+))'
332 _TESTS = [{
333 # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
334 'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
335 'md5': '6e32495b5073ab414471b615c5ded394',
336 'info_dict': {
337 'id': 'n_hardball_5biden_140207',
338 'ext': 'mp4',
339 'title': 'The Biden factor: will Joe run in 2016?',
340 'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.',
341 'thumbnail': r're:^https?://.*\.jpg$',
342 'upload_date': '20140208',
343 'timestamp': 1391824260,
344 'duration': 467.0,
345 'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
346 'uploader': 'NBCU-NEWS',
348 }, {
349 'url': 'http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews?byGuid=nn_netcast_180306.Copy.01',
350 'only_matching': True,
353 def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}, account_id=None):
354 real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query)
355 entry = self._download_json(real_url, video_id)['entries'][0]
356 main_smil_url = 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % (provider_id, account_id, entry['guid']) if account_id else entry.get('plmedia$publicUrl')
358 formats = []
359 subtitles = {}
360 first_video_id = None
361 duration = None
362 asset_types = []
363 for item in entry['media$content']:
364 smil_url = item['plfile$url']
365 cur_video_id = ThePlatformIE._match_id(smil_url)
366 if first_video_id is None:
367 first_video_id = cur_video_id
368 duration = float_or_none(item.get('plfile$duration'))
369 file_asset_types = item.get('plfile$assetTypes') or parse_qs(smil_url)['assetTypes']
370 for asset_type in file_asset_types:
371 if asset_type in asset_types:
372 continue
373 asset_types.append(asset_type)
374 query = {
375 'mbr': 'true',
376 'formats': item['plfile$format'],
377 'assetTypes': asset_type,
379 if asset_type in asset_types_query:
380 query.update(asset_types_query[asset_type])
381 cur_formats, cur_subtitles = self._extract_theplatform_smil(update_url_query(
382 main_smil_url or smil_url, query), video_id, f'Downloading SMIL data for {asset_type}')
383 formats.extend(cur_formats)
384 subtitles = self._merge_subtitles(subtitles, cur_subtitles)
386 thumbnails = [{
387 'url': thumbnail['plfile$url'],
388 'width': int_or_none(thumbnail.get('plfile$width')),
389 'height': int_or_none(thumbnail.get('plfile$height')),
390 } for thumbnail in entry.get('media$thumbnails', [])]
392 timestamp = int_or_none(entry.get('media$availableDate'), scale=1000)
393 categories = [item['media$name'] for item in entry.get('media$categories', [])]
395 ret = self._extract_theplatform_metadata(f'{provider_id}/{first_video_id}', video_id)
396 subtitles = self._merge_subtitles(subtitles, ret['subtitles'])
397 ret.update({
398 'id': video_id,
399 'formats': formats,
400 'subtitles': subtitles,
401 'thumbnails': thumbnails,
402 'duration': duration,
403 'timestamp': timestamp,
404 'categories': categories,
406 if custom_fields:
407 ret.update(custom_fields(entry))
409 return ret
411 def _real_extract(self, url):
412 mobj = self._match_valid_url(url)
414 video_id = mobj.group('id')
415 provider_id = mobj.group('provider_id')
416 feed_id = mobj.group('feed_id')
417 filter_query = mobj.group('filter')
419 return self._extract_feed_info(provider_id, feed_id, filter_query, video_id)