4 from .common
import InfoExtractor
19 class RedditIE(InfoExtractor
):
20 _NETRC_MACHINE
= 'reddit'
21 _VALID_URL
= r
'https?://(?:\w+\.)?reddit(?:media)?\.com/(?P<slug>(?:(?:r|user)/[^/]+/)?comments/(?P<id>[^/?#&]+))'
23 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
27 'display_id': '6rrwyj',
28 'title': 'That small heart attack.',
29 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
30 'thumbnails': 'count:4',
31 'timestamp': 1501941939,
32 'upload_date': '20170805',
39 'channel_id': 'videos',
42 'skip_download': True,
45 # 1080p fallback format
46 'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/',
47 'md5': '8b5902cfda3006bf90faea7adf765a49',
49 'id': 'gyh95hiqc0b11',
51 'display_id': '90bu6w',
52 'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',
53 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
54 'thumbnails': 'count:7',
55 'timestamp': 1532051078,
56 'upload_date': '20180720',
57 'uploader': 'FootLoosePickleJuice',
67 'url': 'https://www.reddit.com/user/creepyt0es/comments/nip71r/i_plan_to_make_more_stickers_and_prints_check/',
69 'id': 'zasobba6wp071',
71 'display_id': 'nip71r',
72 'title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
73 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
74 'thumbnails': 'count:5',
75 'timestamp': 1621709093,
76 'upload_date': '20210522',
77 'uploader': 'creepyt0es',
83 'channel_id': 'u_creepyt0es',
86 'skip_download': True,
89 # videos embedded in reddit text post
90 'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',
94 'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
97 # crossposted reddit-hosted media
98 'url': 'https://www.reddit.com/r/dumbfuckers_club/comments/zjjw82/cringe/',
99 'md5': '746180895c7b75a9d6b05341f507699a',
101 'id': 'a1oneun6pa5a1',
103 'display_id': 'zjjw82',
105 'uploader': 'Otaku-senpai69420',
106 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
107 'upload_date': '20221212',
108 'timestamp': 1670812309,
111 'dislike_count': int,
112 'comment_count': int,
114 'channel_id': 'dumbfuckers_club',
117 # post link without subreddit
118 'url': 'https://www.reddit.com/comments/124pp33',
119 'md5': '15eec9d828adcef4468b741a7e45a395',
121 'id': 'antsenjc2jqa1',
123 'display_id': '124pp33',
124 'title': 'Harmless prank of some old friends',
125 'uploader': 'Dudezila',
126 'channel_id': 'ContagiousLaughter',
128 'upload_date': '20230328',
129 'timestamp': 1680012043,
130 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
132 'comment_count': int,
133 'dislike_count': int,
137 # quarantined subreddit post
138 'url': 'https://old.reddit.com/r/GenZedong/comments/12fujy3/based_hasan/',
139 'md5': '3156ea69e3c1f1b6259683c5abd36e71',
141 'id': '8bwtclfggpsa1',
143 'display_id': '12fujy3',
144 'title': 'Based Hasan?',
145 'uploader': 'KingNigelXLII',
146 'channel_id': 'GenZedong',
148 'upload_date': '20230408',
149 'timestamp': 1680979138,
151 'comment_count': int,
152 'dislike_count': int,
155 'skip': 'Requires account that has opted-in to the GenZedong subreddit',
157 # subtitles in HLS manifest
158 'url': 'https://www.reddit.com/r/Unexpected/comments/1cl9h0u/the_insurance_claim_will_be_interesting/',
160 'id': 'a2mdj5d57qyc1',
162 'display_id': '1cl9h0u',
163 'title': 'The insurance claim will be interesting',
164 'uploader': 'darrenpauli',
165 'channel_id': 'Unexpected',
167 'upload_date': '20240506',
168 'timestamp': 1714966382,
170 'comment_count': int,
171 'dislike_count': int,
173 'subtitles': {'en': 'mincount:1'},
176 'skip_download': True,
179 # subtitles from caption-url
180 'url': 'https://www.reddit.com/r/soccer/comments/1cxwzso/tottenham_1_0_newcastle_united_james_maddison_31/',
182 'id': 'xbmj4t3igy1d1',
184 'display_id': '1cxwzso',
185 'title': 'Tottenham [1] - 0 Newcastle United - James Maddison 31\'',
186 'uploader': 'Woodstovia',
187 'channel_id': 'soccer',
189 'upload_date': '20240522',
190 'timestamp': 1716373798,
192 'comment_count': int,
193 'dislike_count': int,
195 'subtitles': {'en': 'mincount:1'},
198 'skip_download': True,
199 'writesubtitles': True,
202 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
203 'only_matching': True,
206 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
207 'only_matching': True,
210 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
211 'only_matching': True,
214 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
215 'only_matching': True,
218 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
219 'only_matching': True,
221 # reddit video @ nm reddit
222 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
223 'only_matching': True,
225 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',
226 'only_matching': True,
229 def _perform_login(self
, username
, password
):
230 captcha
= self
._download
_json
(
231 'https://www.reddit.com/api/requires_captcha/login.json', None,
232 'Checking login requirement')['required']
234 raise ExtractorError('Reddit is requiring captcha before login', expected
=True)
235 login
= self
._download
_json
(
236 f
'https://www.reddit.com/api/login/{username}', None, data
=urlencode_postdata({
241 }), note
='Logging in', errnote
='Login request failed')
242 errors
= '; '.join(traverse_obj(login
, ('json', 'errors', ..., 1)))
244 raise ExtractorError(f
'Unable to login, Reddit API says {errors}', expected
=True)
245 elif not traverse_obj(login
, ('json', 'data', 'cookie', {str}
)):
246 raise ExtractorError('Unable to login, no cookie was returned')
248 def _get_subtitles(self
, video_id
):
249 # Fallback if there were no subtitles provided by DASH or HLS manifests
250 caption_url
= f
'https://v.redd.it/{video_id}/wh_ben_en.vtt'
251 if self
._is
_valid
_url
(caption_url
, video_id
, item
='subtitles'):
252 return {'en': [{'url': caption_url
}]}
254 def _real_extract(self
, url
):
255 slug
, video_id
= self
._match
_valid
_url
(url
).group('slug', 'id')
258 data
= self
._download
_json
(
259 f
'https://www.reddit.com/{slug}/.json', video_id
, expected_status
=403)
260 except ExtractorError
as e
:
261 if isinstance(e
.cause
, json
.JSONDecodeError
):
262 if self
._get
_cookies
('https://www.reddit.com/').get('reddit_session'):
263 raise ExtractorError('Your IP address is unable to access the Reddit API', expected
=True)
264 self
.raise_login_required('Account authentication is required')
267 if traverse_obj(data
, 'error') == 403:
268 reason
= data
.get('reason')
269 if reason
== 'quarantined':
270 self
.raise_login_required('Quarantined subreddit; an account that has opted in is required')
271 elif reason
== 'private':
272 self
.raise_login_required('Private subreddit; an account that has been approved is required')
274 raise ExtractorError(f
'HTTP Error 403 Forbidden; reason given: {reason}')
276 data
= data
[0]['data']['children'][0]['data']
277 video_url
= data
['url']
279 over_18
= data
.get('over_18')
282 elif over_18
is False:
289 def add_thumbnail(src
):
290 if not isinstance(src
, dict):
292 thumbnail_url
= url_or_none(src
.get('url'))
293 if not thumbnail_url
:
296 'url': unescapeHTML(thumbnail_url
),
297 'width': int_or_none(src
.get('width')),
298 'height': int_or_none(src
.get('height')),
299 'http_headers': {'Accept': '*/*'},
302 for image
in try_get(data
, lambda x
: x
['preview']['images']) or []:
303 if not isinstance(image
, dict):
305 add_thumbnail(image
.get('source'))
306 resolutions
= image
.get('resolutions')
307 if isinstance(resolutions
, list):
308 for resolution
in resolutions
:
309 add_thumbnail(resolution
)
312 'title': data
.get('title'),
313 'thumbnails': thumbnails
,
314 'timestamp': float_or_none(data
.get('created_utc')),
315 'uploader': data
.get('author'),
316 'channel_id': data
.get('subreddit'),
317 'like_count': int_or_none(data
.get('ups')),
318 'dislike_count': int_or_none(data
.get('downs')),
319 'comment_count': int_or_none(data
.get('num_comments')),
320 'age_limit': age_limit
,
323 parsed_url
= urllib
.parse
.urlparse(video_url
)
325 # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL
326 if 'reddit.com' in parsed_url
.netloc
and f
'/{video_id}/' in parsed_url
.path
:
328 for media
in traverse_obj(data
, ('media_metadata', ...), expected_type
=dict):
329 if not media
.get('id') or media
.get('e') != 'RedditVideo':
332 if media
.get('hlsUrl'):
333 formats
.extend(self
._extract
_m
3u8_formats
(
334 unescapeHTML(media
['hlsUrl']), video_id
, 'mp4', m3u8_id
='hls', fatal
=False))
335 if media
.get('dashUrl'):
336 formats
.extend(self
._extract
_mpd
_formats
(
337 unescapeHTML(media
['dashUrl']), video_id
, mpd_id
='dash', fatal
=False))
341 'display_id': video_id
,
346 return self
.playlist_result(entries
, video_id
, info
.get('title'))
347 raise ExtractorError('No media found', expected
=True)
349 # Check if media is hosted on reddit:
350 reddit_video
= traverse_obj(data
, (
351 (None, ('crosspost_parent_list', ...)), ('secure_media', 'media'), 'reddit_video'), get_all
=False)
354 try_get(reddit_video
, lambda x
: unescapeHTML(x
[y
]))
355 for y
in ('dash_url', 'hls_url')
359 display_id
= video_id
360 video_id
= self
._search
_regex
(
361 r
'https?://v\.redd\.it/(?P<id>[^/?#&]+)', reddit_video
['fallback_url'],
362 'video_id', default
=display_id
)
364 dash_playlist_url
= playlist_urls
[0] or f
'https://v.redd.it/{video_id}/DASHPlaylist.mpd'
365 hls_playlist_url
= playlist_urls
[1] or f
'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'
366 qs
= traverse_obj(parse_qs(hls_playlist_url
), {
367 'f': ('f', 0, {lambda x
: ','.join([x
, 'subsAll']) if x
else 'hd,subsAll'}),
369 hls_playlist_url
= update_url_query(hls_playlist_url
, qs
)
372 'url': unescapeHTML(reddit_video
['fallback_url']),
373 'height': int_or_none(reddit_video
.get('height')),
374 'width': int_or_none(reddit_video
.get('width')),
375 'tbr': int_or_none(reddit_video
.get('bitrate_kbps')),
379 'format_id': 'fallback',
380 'format_note': 'DASH video, mp4_dash',
382 hls_fmts
, subtitles
= self
._extract
_m
3u8_formats
_and
_subtitles
(
383 hls_playlist_url
, display_id
, 'mp4', m3u8_id
='hls', fatal
=False)
384 formats
.extend(hls_fmts
)
385 dash_fmts
, dash_subs
= self
._extract
_mpd
_formats
_and
_subtitles
(
386 dash_playlist_url
, display_id
, mpd_id
='dash', fatal
=False)
387 formats
.extend(dash_fmts
)
388 self
._merge
_subtitles
(dash_subs
, target
=subtitles
)
393 'display_id': display_id
,
395 'subtitles': subtitles
or self
.extract_subtitles(video_id
),
396 'duration': int_or_none(reddit_video
.get('duration')),
399 if parsed_url
.netloc
== 'v.redd.it':
400 self
.raise_no_formats('This video is processing', expected
=True, video_id
=video_id
)
403 'id': parsed_url
.path
.split('/')[1],
404 'display_id': video_id
,
407 # Not hosted on reddit, must continue extraction
410 'display_id': video_id
,
411 '_type': 'url_transparent',