7 from .common
import InfoExtractor
8 from ..networking
.exceptions
import HTTPError
16 get_element_by_attribute
,
26 _ENCODING_CHARS
= 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
29 def _pk_to_id(media_id
):
30 """Source: https://stackoverflow.com/questions/24437823/getting-instagram-post-url-from-media-id"""
31 return encode_base_n(int(media_id
.split('_')[0]), table
=_ENCODING_CHARS
)
34 def _id_to_pk(shortcode
):
35 """Covert a shortcode to a numeric value"""
36 return decode_base_n(shortcode
[:11], table
=_ENCODING_CHARS
)
39 class InstagramBaseIE(InfoExtractor
):
40 _NETRC_MACHINE
= 'instagram'
43 _API_BASE_URL
= 'https://i.instagram.com/api/v1'
44 _LOGIN_URL
= 'https://www.instagram.com/accounts/login'
46 'X-IG-App-ID': '936619743392459',
47 'X-ASBD-ID': '198387',
48 'X-IG-WWW-Claim': '0',
49 'Origin': 'https://www.instagram.com',
51 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36',
54 def _perform_login(self
, username
, password
):
55 if self
._IS
_LOGGED
_IN
:
58 login_webpage
= self
._download
_webpage
(
59 self
._LOGIN
_URL
, None, note
='Downloading login webpage', errnote
='Failed to download login webpage')
61 shared_data
= self
._parse
_json
(self
._search
_regex
(
62 r
'window\._sharedData\s*=\s*({.+?});', login_webpage
, 'shared data', default
='{}'), None)
64 login
= self
._download
_json
(
65 f
'{self._LOGIN_URL}/ajax/', None, note
='Logging in', headers
={
67 'X-Requested-With': 'XMLHttpRequest',
68 'X-CSRFToken': shared_data
['config']['csrf_token'],
69 'X-Instagram-AJAX': shared_data
['rollout_hash'],
70 'Referer': 'https://www.instagram.com/',
71 }, data
=urlencode_postdata({
72 'enc_password': f
'#PWD_INSTAGRAM_BROWSER:0:{int(time.time())}:{password}',
75 'optIntoOneTap': 'false',
76 'stopDeletionNonce': '',
77 'trustedDeviceRecords': '{}',
80 if not login
.get('authenticated'):
81 if login
.get('message'):
82 raise ExtractorError(f
'Unable to login: {login["message"]}')
83 elif login
.get('user'):
84 raise ExtractorError('Unable to login: Sorry, your password was incorrect. Please double-check your password.', expected
=True)
85 elif login
.get('user') is False:
86 raise ExtractorError('Unable to login: The username you entered doesn\'t belong to an account. Please check your username and try again.', expected
=True)
87 raise ExtractorError('Unable to login')
88 InstagramBaseIE
._IS
_LOGGED
_IN
= True
90 def _get_count(self
, media
, kind
, *keys
):
92 media
, (kind
, 'count'), *((f
'edge_media_{key}', 'count') for key
in keys
),
93 expected_type
=int_or_none
)
95 def _get_dimension(self
, name
, media
, webpage
=None):
97 traverse_obj(media
, ('dimensions', name
), expected_type
=int_or_none
)
98 or int_or_none(self
._html
_search
_meta
(
99 (f
'og:video:{name}', f
'video:{name}'), webpage
or '', default
=None)))
101 def _extract_nodes(self
, nodes
, is_direct
=False):
102 for idx
, node
in enumerate(nodes
, start
=1):
103 if node
.get('__typename') != 'GraphVideo' and node
.get('is_video') is not True:
106 video_id
= node
.get('shortcode')
110 'id': video_id
or node
['id'],
111 'url': node
.get('video_url'),
112 'width': self
._get
_dimension
('width', node
),
113 'height': self
._get
_dimension
('height', node
),
115 'Referer': 'https://www.instagram.com/',
123 'ie_key': 'Instagram',
125 'url': f
'https://instagram.com/p/{video_id}',
130 'title': node
.get('title') or (f
'Video {idx}' if is_direct
else None),
131 'description': traverse_obj(
132 node
, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type
=str),
133 'thumbnail': traverse_obj(
134 node
, 'display_url', 'thumbnail_src', 'display_src', expected_type
=url_or_none
),
135 'duration': float_or_none(node
.get('video_duration')),
136 'timestamp': int_or_none(node
.get('taken_at_timestamp')),
137 'view_count': int_or_none(node
.get('video_view_count')),
138 'comment_count': self
._get
_count
(node
, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'),
139 'like_count': self
._get
_count
(node
, 'likes', 'preview_like'),
142 def _extract_product_media(self
, product_media
):
143 media_id
= product_media
.get('code') or _pk_to_id(product_media
.get('pk'))
144 vcodec
= product_media
.get('video_codec')
145 dash_manifest_raw
= product_media
.get('video_dash_manifest')
146 videos_list
= product_media
.get('video_versions')
147 if not (dash_manifest_raw
or videos_list
):
151 'format_id': fmt
.get('id'),
152 'url': fmt
.get('url'),
153 'width': fmt
.get('width'),
154 'height': fmt
.get('height'),
156 } for fmt
in videos_list
or []]
157 if dash_manifest_raw
:
158 formats
.extend(self
._parse
_mpd
_formats
(self
._parse
_xml
(dash_manifest_raw
, media_id
), mpd_id
='dash'))
161 'url': thumbnail
.get('url'),
162 'width': thumbnail
.get('width'),
163 'height': thumbnail
.get('height'),
164 } for thumbnail
in traverse_obj(product_media
, ('image_versions2', 'candidates')) or []]
167 'duration': float_or_none(product_media
.get('video_duration')),
169 'thumbnails': thumbnails
,
172 def _extract_product(self
, product_info
):
173 if isinstance(product_info
, list):
174 product_info
= product_info
[0]
176 user_info
= product_info
.get('user') or {}
178 'id': _pk_to_id(traverse_obj(product_info
, 'pk', 'id', expected_type
=str_or_none
)[:19]),
179 'title': product_info
.get('title') or f
'Video by {user_info.get("username")}',
180 'description': traverse_obj(product_info
, ('caption', 'text'), expected_type
=str_or_none
),
181 'timestamp': int_or_none(product_info
.get('taken_at')),
182 'channel': user_info
.get('username'),
183 'uploader': user_info
.get('full_name'),
184 'uploader_id': str_or_none(user_info
.get('pk')),
185 'view_count': int_or_none(product_info
.get('view_count')),
186 'like_count': int_or_none(product_info
.get('like_count')),
187 'comment_count': int_or_none(product_info
.get('comment_count')),
188 '__post_extractor': self
.extract_comments(_pk_to_id(product_info
.get('pk'))),
190 'Referer': 'https://www.instagram.com/',
193 carousel_media
= product_info
.get('carousel_media')
198 'title': f
'Post by {user_info.get("username")}',
201 **self
._extract
_product
_media
(product_media
),
202 } for product_media
in carousel_media
],
207 **self
._extract
_product
_media
(product_info
),
210 def _get_comments(self
, video_id
):
211 comments_info
= self
._download
_json
(
212 f
'{self._API_BASE_URL}/media/{_id_to_pk(video_id)}/comments/?can_support_threading=true&permalink_enabled=false', video_id
,
213 fatal
=False, errnote
='Comments extraction failed', note
='Downloading comments info', headers
=self
._API
_HEADERS
) or {}
215 comment_data
= traverse_obj(comments_info
, ('edge_media_to_parent_comment', 'edges'), 'comments')
216 for comment_dict
in comment_data
or []:
218 'author': traverse_obj(comment_dict
, ('node', 'owner', 'username'), ('user', 'username')),
219 'author_id': traverse_obj(comment_dict
, ('node', 'owner', 'id'), ('user', 'pk')),
220 'author_thumbnail': traverse_obj(comment_dict
, ('node', 'owner', 'profile_pic_url'), ('user', 'profile_pic_url'), expected_type
=url_or_none
),
221 'id': traverse_obj(comment_dict
, ('node', 'id'), 'pk'),
222 'text': traverse_obj(comment_dict
, ('node', 'text'), 'text'),
223 'like_count': traverse_obj(comment_dict
, ('node', 'edge_liked_by', 'count'), 'comment_like_count', expected_type
=int_or_none
),
224 'timestamp': traverse_obj(comment_dict
, ('node', 'created_at'), 'created_at', expected_type
=int_or_none
),
228 class InstagramIOSIE(InfoExtractor
):
229 IE_DESC
= 'IOS instagram:// URL'
230 _VALID_URL
= r
'instagram://media\?id=(?P<id>[\d_]+)'
232 'url': 'instagram://media?id=482584233761418119',
233 'md5': '0d2da106a9d2631273e192b372806516',
237 'title': 'Video by naomipq',
238 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
239 'thumbnail': r
're:^https?://.*\.jpg',
241 'timestamp': 1371748545,
242 'upload_date': '20130620',
243 'uploader_id': 'naomipq',
244 'uploader': 'B E A U T Y F O R A S H E S',
246 'comment_count': int,
249 'add_ie': ['Instagram'],
252 def _real_extract(self
, url
):
253 video_id
= _pk_to_id(self
._match
_id
(url
))
254 return self
.url_result(f
'http://instagram.com/tv/{video_id}', InstagramIE
, video_id
)
257 class InstagramIE(InstagramBaseIE
):
258 _VALID_URL
= r
'(?P<url>https?://(?:www\.)?instagram\.com(?:/[^/]+)?/(?:p|tv|reels?(?!/audio/))/(?P<id>[^/?#&]+))'
259 _EMBED_REGEX
= [r
'<iframe[^>]+src=(["\'])(?P
<url
>(?
:https?
:)?
//(?
:www\
.)?instagram\
.com
/p
/[^
/]+/embed
.*?
)\
1']
261 'url
': 'https
://instagram
.com
/p
/aye83DjauH
/?foo
=bar
#abc',
262 'md5': '0d2da106a9d2631273e192b372806516',
266 'title': 'Video by naomipq',
267 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
268 'thumbnail': r
're:^https?://.*\.jpg',
270 'timestamp': 1371748545,
271 'upload_date': '20130620',
272 'uploader_id': '2815873',
273 'uploader': 'B E A U T Y F O R A S H E S',
274 'channel': 'naomipq',
276 'comment_count': int,
279 'expected_warnings': [
280 'General metadata extraction failed',
281 'Main webpage is locked behind the login page',
285 'url': 'https://www.instagram.com/reel/Chunk8-jurw/',
286 'md5': 'f6d8277f74515fa3ff9f5791426e42b1',
290 'title': 'Video by instagram',
291 'description': 'md5:c9cde483606ed6f80fbe9283a6a2b290',
292 'thumbnail': r
're:^https?://.*\.jpg',
294 'timestamp': 1661529231,
295 'upload_date': '20220826',
296 'uploader_id': '25025320',
297 'uploader': 'Instagram',
298 'channel': 'instagram',
300 'comment_count': int,
303 'expected_warnings': [
304 'General metadata extraction failed',
305 'Main webpage is locked behind the login page',
309 'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
315 'thumbnail': r
're:^https?://.*\.jpg',
323 'thumbnail': r
're:^https?://.*\.jpg',
331 'thumbnail': r
're:^https?://.*\.jpg',
337 'title': 'Post by instagram',
338 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
340 'expected_warnings': [
341 'General metadata extraction failed',
342 'Main webpage is locked behind the login page',
346 'url': 'https://www.instagram.com/tv/BkfuX9UB-eK/',
350 'title': 'Fingerboarding Tricks with @cass.fb',
351 'thumbnail': r
're:^https?://.*\.jpg',
353 'timestamp': 1530032919,
354 'upload_date': '20180626',
355 'uploader_id': '25025320',
356 'uploader': 'Instagram',
357 'channel': 'instagram',
359 'comment_count': int,
361 'description': 'Meet Cass Hirst (@cass.fb), a fingerboarding pro who can perform tiny ollies and kickflips while blindfolded.',
363 'expected_warnings': [
364 'General metadata extraction failed',
365 'Main webpage is locked behind the login page',
368 'url': 'https://instagram.com/p/-Cmh1cukG2/',
369 'only_matching': True,
371 'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
372 'only_matching': True,
374 'url': 'https://www.instagram.com/tv/aye83DjauH/',
375 'only_matching': True,
377 'url': 'https://www.instagram.com/reel/CDUMkliABpa/',
378 'only_matching': True,
380 'url': 'https://www.instagram.com/marvelskies.fc/reel/CWqAgUZgCku/',
381 'only_matching': True,
383 'url': 'https://www.instagram.com/reels/Cop84x6u7CP/',
384 'only_matching': True,
388 def _extract_embed_urls(cls
, url
, webpage
):
389 res
= tuple(super()._extract
_embed
_urls
(url
, webpage
))
393 mobj
= re
.search(r
'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\
1',
394 get_element_by_attribute('class', 'instagram
-media
', webpage) or '')
396 return [mobj.group('link
')]
398 def _real_extract(self, url):
399 video_id, url = self._match_valid_url(url).group('id', 'url
')
400 media, webpage = {}, ''
402 if self._get_cookies(url).get('sessionid
'):
403 info = traverse_obj(self._download_json(
404 f'{self
._API
_BASE
_URL
}/media
/{_id_to_pk(video_id
)}/info
/', video_id,
405 fatal=False, errnote='Video info extraction failed
',
406 note='Downloading video info
', headers=self._API_HEADERS), ('items
', 0))
409 return self._extract_product(media)
411 api_check = self._download_json(
412 f'{self
._API
_BASE
_URL
}/web
/get_ruling_for_content
/?content_type
=MEDIA
&target_id
={_id_to_pk(video_id
)}',
413 video_id, headers=self._API_HEADERS, fatal=False, note='Setting up session
', errnote=False) or {}
414 csrf_token = self._get_cookies('https
://www
.instagram
.com
').get('csrftoken
')
417 self.report_warning('No csrf token
set by Instagram API
', video_id)
419 csrf_token = csrf_token.value if api_check.get('status
') == 'ok
' else None
421 self.report_warning('Instagram API
is not granting access
', video_id)
424 'shortcode
': video_id,
425 'child_comment_count
': 3,
426 'fetch_comment_count
': 40,
427 'parent_comment_count
': 24,
428 'has_threaded_comments
': True,
430 general_info = self._download_json(
431 'https
://www
.instagram
.com
/graphql
/query
/', video_id, fatal=False, errnote=False,
434 'X
-CSRFToken
': csrf_token or '',
435 'X
-Requested
-With
': 'XMLHttpRequest
',
438 'query_hash
': '9f8827793ef34641b2fb195d4d41151c
',
439 'variables
': json.dumps(variables, separators=(',', ':')),
441 media.update(traverse_obj(general_info, ('data
', 'shortcode_media
')) or {})
444 self.report_warning('General metadata extraction
failed (some metadata might be missing
).', video_id)
445 webpage, urlh = self._download_webpage_handle(url, video_id)
446 shared_data = self._search_json(
447 r'window\
._sharedData\s
*=', webpage, 'shared data
', video_id, fatal=False) or {}
449 if shared_data and self._LOGIN_URL not in urlh.url:
450 media.update(traverse_obj(
451 shared_data, ('entry_data
', 'PostPage
', 0, 'graphql
', 'shortcode_media
'),
452 ('entry_data
', 'PostPage
', 0, 'media
'), expected_type=dict) or {})
454 self.report_warning('Main webpage
is locked behind the login page
. Retrying with embed
webpage (some metadata might be missing
).')
455 webpage = self._download_webpage(
456 f'{url}
/embed
/', video_id, note='Downloading embed webpage
', fatal=False) or ''
457 additional_data = self._search_json(
458 r'window\
.__additionalDataLoaded\s
*\
(\s
*[^
,]+,', webpage, 'additional data
', video_id, fatal=False)
459 if not additional_data and not media:
460 self.raise_login_required('Requested content
is not available
, rate
-limit reached
or login required
')
462 product_item = traverse_obj(additional_data, ('items
', 0), expected_type=dict)
464 media.update(product_item)
465 return self._extract_product(media)
467 media.update(traverse_obj(
468 additional_data, ('graphql
', 'shortcode_media
'), 'shortcode_media
', expected_type=dict) or {})
470 username = traverse_obj(media, ('owner
', 'username
')) or self._search_regex(
471 r'"owner"\s
*:\s
*{\s
*"username"\s
*:\s
*"(.+?)"', webpage, 'username
', fatal=False)
474 traverse_obj(media, ('edge_media_to_caption
', 'edges
', 0, 'node
', 'text
'), expected_type=str)
475 or media.get('caption
'))
477 description = self._search_regex(
478 r'"caption"\s
*:\s
*"(.+?)"', webpage, 'description
', default=None)
479 if description is not None:
480 description = lowercase_escape(description)
482 video_url = media.get('video_url
')
484 nodes = traverse_obj(media, ('edge_sidecar_to_children
', 'edges
', ..., 'node
'), expected_type=dict) or []
486 return self.playlist_result(
487 self._extract_nodes(nodes, True), video_id,
488 format_field(username, None, 'Post by
%s'), description)
490 video_url = self._og_search_video_url(webpage, secure=False)
494 'width
': self._get_dimension('width
', media, webpage),
495 'height
': self._get_dimension('height
', media, webpage),
497 dash = traverse_obj(media, ('dash_info
', 'video_dash_manifest
'))
499 formats.extend(self._parse_mpd_formats(self._parse_xml(dash, video_id), mpd_id='dash
'))
501 comment_data = traverse_obj(media, ('edge_media_to_parent_comment
', 'edges
'))
503 'author
': traverse_obj(comment_dict, ('node
', 'owner
', 'username
')),
504 'author_id
': traverse_obj(comment_dict, ('node
', 'owner
', 'id')),
505 'id': traverse_obj(comment_dict, ('node
', 'id')),
506 'text
': traverse_obj(comment_dict, ('node
', 'text
')),
507 'timestamp
': traverse_obj(comment_dict, ('node
', 'created_at
'), expected_type=int_or_none),
508 } for comment_dict in comment_data] if comment_data else None
510 display_resources = (
511 media.get('display_resources
')
512 or [{'src
': media.get(key)} for key in ('display_src
', 'display_url
')]
513 or [{'src
': self._og_search_thumbnail(webpage)}])
515 'url
': thumbnail['src
'],
516 'width
': thumbnail.get('config_width
'),
517 'height
': thumbnail.get('config_height
'),
518 } for thumbnail in display_resources if thumbnail.get('src
')]
523 'title
': media.get('title
') or f'Video by {username}
',
524 'description
': description,
525 'duration
': float_or_none(media.get('video_duration
')),
526 'timestamp
': traverse_obj(media, 'taken_at_timestamp
', 'date
', expected_type=int_or_none),
527 'uploader_id
': traverse_obj(media, ('owner
', 'id')),
528 'uploader
': traverse_obj(media, ('owner
', 'full_name
')),
530 'like_count
': self._get_count(media, 'likes
', 'preview_like
') or str_to_int(self._search_regex(
531 r'data
-log
-event
="likeCountClick"[^
>]*>[^\d
]*([\d
,\
.]+)', webpage, 'like count
', fatal=False)),
532 'comment_count
': self._get_count(media, 'comments
', 'preview_comment
', 'to_comment
', 'to_parent_comment
'),
533 'comments
': comments,
534 'thumbnails
': thumbnails,
536 'Referer
': 'https
://www
.instagram
.com
/',
541 class InstagramPlaylistBaseIE(InstagramBaseIE):
542 _gis_tmpl = None # used to cache GIS request type
544 def _parse_graphql(self, webpage, item_id):
545 # Reads a webpage and returns its GraphQL data.
546 return self._parse_json(
548 r'sharedData\s
*=\s
*({.+?
})\s
*;\s
*[<\n]', webpage, 'data
'),
551 def _extract_graphql(self, data, url):
552 # Parses GraphQL queries containing videos and generates a playlist.
553 uploader_id = self._match_id(url)
554 csrf_token = data['config
']['csrf_token
']
555 rhx_gis = data.get('rhx_gis
') or '3c7ca9dcefcf966d11dacf1f151335e8
'
558 for page_num in itertools.count(1):
563 variables.update(self._query_vars_for(data))
564 variables = json.dumps(variables)
567 gis_tmpls = [self._gis_tmpl]
572 f'{rhx_gis}
:{csrf_token}
',
573 '{}:{}:{}'.format(rhx_gis, csrf_token, self.get_param('http_headers
')['User
-Agent
']),
576 # try all of the ways to generate a GIS query, and not only use the
577 # first one that works, but cache it for future requests
578 for gis_tmpl in gis_tmpls:
580 json_data = self._download_json(
581 'https
://www
.instagram
.com
/graphql
/query
/', uploader_id,
582 f'Downloading JSON page {page_num}
', headers={
583 'X
-Requested
-With
': 'XMLHttpRequest
',
584 'X
-Instagram
-GIS
': hashlib.md5(
585 (f'{gis_tmpl}
:{variables}
').encode()).hexdigest(),
587 'query_hash
': self._QUERY_HASH,
588 'variables
': variables,
590 media = self._parse_timeline_from(json_data)
591 self._gis_tmpl = gis_tmpl
593 except ExtractorError as e:
594 # if it's an error caused by a bad query
, and there are
595 # more GIS templates to try, ignore it and keep trying
596 if isinstance(e
.cause
, HTTPError
) and e
.cause
.status
== 403:
597 if gis_tmpl
!= gis_tmpls
[-1]:
601 nodes
= traverse_obj(media
, ('edges', ..., 'node'), expected_type
=dict) or []
604 yield from self
._extract
_nodes
(nodes
)
606 has_next_page
= traverse_obj(media
, ('page_info', 'has_next_page'))
607 cursor
= traverse_obj(media
, ('page_info', 'end_cursor'), expected_type
=str)
608 if not has_next_page
or not cursor
:
611 def _real_extract(self
, url
):
612 user_or_tag
= self
._match
_id
(url
)
613 webpage
= self
._download
_webpage
(url
, user_or_tag
)
614 data
= self
._parse
_graphql
(webpage
, user_or_tag
)
616 self
._set
_cookie
('instagram.com', 'ig_pr', '1')
618 return self
.playlist_result(
619 self
._extract
_graphql
(data
, url
), user_or_tag
, user_or_tag
)
622 class InstagramUserIE(InstagramPlaylistBaseIE
):
624 _VALID_URL
= r
'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
625 IE_DESC
= 'Instagram user profile'
626 IE_NAME
= 'instagram:user'
628 'url': 'https://instagram.com/porsche',
635 'extract_flat': True,
636 'skip_download': True,
641 _QUERY_HASH
= ('42323d64886122307be10013ad2dcc44',)
644 def _parse_timeline_from(data
):
645 # extracts the media timeline data from a GraphQL result
646 return data
['data']['user']['edge_owner_to_timeline_media']
649 def _query_vars_for(data
):
650 # returns a dictionary of variables to add to the timeline query based
651 # on the GraphQL of the original page
653 'id': data
['entry_data']['ProfilePage'][0]['graphql']['user']['id'],
657 class InstagramTagIE(InstagramPlaylistBaseIE
):
658 _VALID_URL
= r
'https?://(?:www\.)?instagram\.com/explore/tags/(?P<id>[^/]+)'
659 IE_DESC
= 'Instagram hashtag search URLs'
660 IE_NAME
= 'instagram:tag'
662 'url': 'https://instagram.com/explore/tags/lolcats',
667 'playlist_count': 50,
669 'extract_flat': True,
670 'skip_download': True,
675 _QUERY_HASH
= ('f92f56d47dc7a55b606908374b43a314',)
678 def _parse_timeline_from(data
):
679 # extracts the media timeline data from a GraphQL result
680 return data
['data']['hashtag']['edge_hashtag_to_media']
683 def _query_vars_for(data
):
684 # returns a dictionary of variables to add to the timeline query based
685 # on the GraphQL of the original page
688 data
['entry_data']['TagPage'][0]['graphql']['hashtag']['name'],
692 class InstagramStoryIE(InstagramBaseIE
):
693 _VALID_URL
= r
'https?://(?:www\.)?instagram\.com/stories/(?P<user>[^/]+)/(?P<id>\d+)'
694 IE_NAME
= 'instagram:story'
697 'url': 'https://www.instagram.com/stories/highlights/18090946048123978/',
699 'id': '18090946048123978',
702 'playlist_mincount': 50,
705 def _real_extract(self
, url
):
706 username
, story_id
= self
._match
_valid
_url
(url
).groups()
707 story_info
= self
._download
_webpage
(url
, story_id
)
708 user_info
= self
._search
_json
(r
'"user":', story_info
, 'user info', story_id
, fatal
=False)
710 self
.raise_login_required('This content is unreachable')
712 user_id
= traverse_obj(user_info
, 'pk', 'id', expected_type
=str)
713 story_info_url
= user_id
if username
!= 'highlights' else f
'highlight:{story_id}'
714 if not story_info_url
: # user id is only mandatory for non-highlights
715 raise ExtractorError('Unable to extract user id')
717 videos
= traverse_obj(self
._download
_json
(
718 f
'{self._API_BASE_URL}/feed/reels_media/?reel_ids={story_info_url}',
719 story_id
, errnote
=False, fatal
=False, headers
=self
._API
_HEADERS
), 'reels')
721 self
.raise_login_required('You need to log in to access this content')
723 full_name
= traverse_obj(videos
, (f
'highlight:{story_id}', 'user', 'full_name'), (user_id
, 'user', 'full_name'))
724 story_title
= traverse_obj(videos
, (f
'highlight:{story_id}', 'title'))
726 story_title
= f
'Story by {username}'
728 highlights
= traverse_obj(videos
, (f
'highlight:{story_id}', 'items'), (user_id
, 'items'))
730 for highlight
in highlights
:
731 highlight_data
= self
._extract
_product
(highlight
)
732 if highlight_data
.get('formats'):
734 'uploader': full_name
,
735 'uploader_id': user_id
,
736 **filter_dict(highlight_data
),
738 return self
.playlist_result(info_data
, playlist_id
=story_id
, playlist_title
=story_title
)