[ie/youtube] Fix `uploader_id` extraction (#11818)
[yt-dlp.git] / yt_dlp / extractor / reddit.py
blob7325e547b3222030bb0bbc01bd165f3d857fe2e2
1 import json
2 import urllib.parse
4 from .common import InfoExtractor
5 from ..utils import (
6 ExtractorError,
7 float_or_none,
8 int_or_none,
9 parse_qs,
10 traverse_obj,
11 try_get,
12 unescapeHTML,
13 update_url_query,
14 url_or_none,
15 urlencode_postdata,
19 class RedditIE(InfoExtractor):
20 _NETRC_MACHINE = 'reddit'
21 _VALID_URL = r'https?://(?:\w+\.)?reddit(?:media)?\.com/(?P<slug>(?:(?:r|user)/[^/]+/)?comments/(?P<id>[^/?#&]+))'
22 _TESTS = [{
23 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
24 'info_dict': {
25 'id': 'zv89llsvexdz',
26 'ext': 'mp4',
27 'display_id': '6rrwyj',
28 'title': 'That small heart attack.',
29 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
30 'thumbnails': 'count:4',
31 'timestamp': 1501941939,
32 'upload_date': '20170805',
33 'uploader': 'Antw87',
34 'duration': 12,
35 'like_count': int,
36 'dislike_count': int,
37 'comment_count': int,
38 'age_limit': 0,
39 'channel_id': 'videos',
41 'params': {
42 'skip_download': True,
44 }, {
45 # 1080p fallback format
46 'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/',
47 'md5': '8b5902cfda3006bf90faea7adf765a49',
48 'info_dict': {
49 'id': 'gyh95hiqc0b11',
50 'ext': 'mp4',
51 'display_id': '90bu6w',
52 'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',
53 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
54 'thumbnails': 'count:7',
55 'timestamp': 1532051078,
56 'upload_date': '20180720',
57 'uploader': 'FootLoosePickleJuice',
58 'duration': 14,
59 'like_count': int,
60 'dislike_count': int,
61 'comment_count': int,
62 'age_limit': 0,
63 'channel_id': 'aww',
65 }, {
66 # User post
67 'url': 'https://www.reddit.com/user/creepyt0es/comments/nip71r/i_plan_to_make_more_stickers_and_prints_check/',
68 'info_dict': {
69 'id': 'zasobba6wp071',
70 'ext': 'mp4',
71 'display_id': 'nip71r',
72 'title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
73 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
74 'thumbnails': 'count:5',
75 'timestamp': 1621709093,
76 'upload_date': '20210522',
77 'uploader': 'creepyt0es',
78 'duration': 6,
79 'like_count': int,
80 'dislike_count': int,
81 'comment_count': int,
82 'age_limit': 18,
83 'channel_id': 'u_creepyt0es',
85 'params': {
86 'skip_download': True,
88 }, {
89 # videos embedded in reddit text post
90 'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',
91 'playlist_count': 2,
92 'info_dict': {
93 'id': 'wzqkxp',
94 'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
96 }, {
97 # crossposted reddit-hosted media
98 'url': 'https://www.reddit.com/r/dumbfuckers_club/comments/zjjw82/cringe/',
99 'md5': '746180895c7b75a9d6b05341f507699a',
100 'info_dict': {
101 'id': 'a1oneun6pa5a1',
102 'ext': 'mp4',
103 'display_id': 'zjjw82',
104 'title': 'Cringe',
105 'uploader': 'Otaku-senpai69420',
106 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
107 'upload_date': '20221212',
108 'timestamp': 1670812309,
109 'duration': 16,
110 'like_count': int,
111 'dislike_count': int,
112 'comment_count': int,
113 'age_limit': 0,
114 'channel_id': 'dumbfuckers_club',
116 }, {
117 # post link without subreddit
118 'url': 'https://www.reddit.com/comments/124pp33',
119 'md5': '15eec9d828adcef4468b741a7e45a395',
120 'info_dict': {
121 'id': 'antsenjc2jqa1',
122 'ext': 'mp4',
123 'display_id': '124pp33',
124 'title': 'Harmless prank of some old friends',
125 'uploader': 'Dudezila',
126 'channel_id': 'ContagiousLaughter',
127 'duration': 17,
128 'upload_date': '20230328',
129 'timestamp': 1680012043,
130 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
131 'age_limit': 0,
132 'comment_count': int,
133 'dislike_count': int,
134 'like_count': int,
136 }, {
137 # quarantined subreddit post
138 'url': 'https://old.reddit.com/r/GenZedong/comments/12fujy3/based_hasan/',
139 'md5': '3156ea69e3c1f1b6259683c5abd36e71',
140 'info_dict': {
141 'id': '8bwtclfggpsa1',
142 'ext': 'mp4',
143 'display_id': '12fujy3',
144 'title': 'Based Hasan?',
145 'uploader': 'KingNigelXLII',
146 'channel_id': 'GenZedong',
147 'duration': 16,
148 'upload_date': '20230408',
149 'timestamp': 1680979138,
150 'age_limit': 0,
151 'comment_count': int,
152 'dislike_count': int,
153 'like_count': int,
155 'skip': 'Requires account that has opted-in to the GenZedong subreddit',
156 }, {
157 # subtitles in HLS manifest
158 'url': 'https://www.reddit.com/r/Unexpected/comments/1cl9h0u/the_insurance_claim_will_be_interesting/',
159 'info_dict': {
160 'id': 'a2mdj5d57qyc1',
161 'ext': 'mp4',
162 'display_id': '1cl9h0u',
163 'title': 'The insurance claim will be interesting',
164 'uploader': 'darrenpauli',
165 'channel_id': 'Unexpected',
166 'duration': 53,
167 'upload_date': '20240506',
168 'timestamp': 1714966382,
169 'age_limit': 0,
170 'comment_count': int,
171 'dislike_count': int,
172 'like_count': int,
173 'subtitles': {'en': 'mincount:1'},
175 'params': {
176 'skip_download': True,
178 }, {
179 # subtitles from caption-url
180 'url': 'https://www.reddit.com/r/soccer/comments/1cxwzso/tottenham_1_0_newcastle_united_james_maddison_31/',
181 'info_dict': {
182 'id': 'xbmj4t3igy1d1',
183 'ext': 'mp4',
184 'display_id': '1cxwzso',
185 'title': 'Tottenham [1] - 0 Newcastle United - James Maddison 31\'',
186 'uploader': 'Woodstovia',
187 'channel_id': 'soccer',
188 'duration': 30,
189 'upload_date': '20240522',
190 'timestamp': 1716373798,
191 'age_limit': 0,
192 'comment_count': int,
193 'dislike_count': int,
194 'like_count': int,
195 'subtitles': {'en': 'mincount:1'},
197 'params': {
198 'skip_download': True,
199 'writesubtitles': True,
201 }, {
202 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
203 'only_matching': True,
204 }, {
205 # imgur
206 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
207 'only_matching': True,
208 }, {
209 # imgur @ old reddit
210 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
211 'only_matching': True,
212 }, {
213 # streamable
214 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
215 'only_matching': True,
216 }, {
217 # youtube
218 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
219 'only_matching': True,
220 }, {
221 # reddit video @ nm reddit
222 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
223 'only_matching': True,
224 }, {
225 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',
226 'only_matching': True,
229 def _perform_login(self, username, password):
230 captcha = self._download_json(
231 'https://www.reddit.com/api/requires_captcha/login.json', None,
232 'Checking login requirement')['required']
233 if captcha:
234 raise ExtractorError('Reddit is requiring captcha before login', expected=True)
235 login = self._download_json(
236 f'https://www.reddit.com/api/login/{username}', None, data=urlencode_postdata({
237 'op': 'login-main',
238 'user': username,
239 'passwd': password,
240 'api_type': 'json',
241 }), note='Logging in', errnote='Login request failed')
242 errors = '; '.join(traverse_obj(login, ('json', 'errors', ..., 1)))
243 if errors:
244 raise ExtractorError(f'Unable to login, Reddit API says {errors}', expected=True)
245 elif not traverse_obj(login, ('json', 'data', 'cookie', {str})):
246 raise ExtractorError('Unable to login, no cookie was returned')
248 def _get_subtitles(self, video_id):
249 # Fallback if there were no subtitles provided by DASH or HLS manifests
250 caption_url = f'https://v.redd.it/{video_id}/wh_ben_en.vtt'
251 if self._is_valid_url(caption_url, video_id, item='subtitles'):
252 return {'en': [{'url': caption_url}]}
254 def _real_extract(self, url):
255 slug, video_id = self._match_valid_url(url).group('slug', 'id')
257 try:
258 data = self._download_json(
259 f'https://www.reddit.com/{slug}/.json', video_id, expected_status=403)
260 except ExtractorError as e:
261 if isinstance(e.cause, json.JSONDecodeError):
262 if self._get_cookies('https://www.reddit.com/').get('reddit_session'):
263 raise ExtractorError('Your IP address is unable to access the Reddit API', expected=True)
264 self.raise_login_required('Account authentication is required')
265 raise
267 if traverse_obj(data, 'error') == 403:
268 reason = data.get('reason')
269 if reason == 'quarantined':
270 self.raise_login_required('Quarantined subreddit; an account that has opted in is required')
271 elif reason == 'private':
272 self.raise_login_required('Private subreddit; an account that has been approved is required')
273 else:
274 raise ExtractorError(f'HTTP Error 403 Forbidden; reason given: {reason}')
276 data = data[0]['data']['children'][0]['data']
277 video_url = data['url']
279 over_18 = data.get('over_18')
280 if over_18 is True:
281 age_limit = 18
282 elif over_18 is False:
283 age_limit = 0
284 else:
285 age_limit = None
287 thumbnails = []
289 def add_thumbnail(src):
290 if not isinstance(src, dict):
291 return
292 thumbnail_url = url_or_none(src.get('url'))
293 if not thumbnail_url:
294 return
295 thumbnails.append({
296 'url': unescapeHTML(thumbnail_url),
297 'width': int_or_none(src.get('width')),
298 'height': int_or_none(src.get('height')),
299 'http_headers': {'Accept': '*/*'},
302 for image in try_get(data, lambda x: x['preview']['images']) or []:
303 if not isinstance(image, dict):
304 continue
305 add_thumbnail(image.get('source'))
306 resolutions = image.get('resolutions')
307 if isinstance(resolutions, list):
308 for resolution in resolutions:
309 add_thumbnail(resolution)
311 info = {
312 'title': data.get('title'),
313 'thumbnails': thumbnails,
314 'timestamp': float_or_none(data.get('created_utc')),
315 'uploader': data.get('author'),
316 'channel_id': data.get('subreddit'),
317 'like_count': int_or_none(data.get('ups')),
318 'dislike_count': int_or_none(data.get('downs')),
319 'comment_count': int_or_none(data.get('num_comments')),
320 'age_limit': age_limit,
323 parsed_url = urllib.parse.urlparse(video_url)
325 # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL
326 if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path:
327 entries = []
328 for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict):
329 if not media.get('id') or media.get('e') != 'RedditVideo':
330 continue
331 formats = []
332 if media.get('hlsUrl'):
333 formats.extend(self._extract_m3u8_formats(
334 unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False))
335 if media.get('dashUrl'):
336 formats.extend(self._extract_mpd_formats(
337 unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False))
338 if formats:
339 entries.append({
340 'id': media['id'],
341 'display_id': video_id,
342 'formats': formats,
343 **info,
345 if entries:
346 return self.playlist_result(entries, video_id, info.get('title'))
347 raise ExtractorError('No media found', expected=True)
349 # Check if media is hosted on reddit:
350 reddit_video = traverse_obj(data, (
351 (None, ('crosspost_parent_list', ...)), ('secure_media', 'media'), 'reddit_video'), get_all=False)
352 if reddit_video:
353 playlist_urls = [
354 try_get(reddit_video, lambda x: unescapeHTML(x[y]))
355 for y in ('dash_url', 'hls_url')
358 # Update video_id
359 display_id = video_id
360 video_id = self._search_regex(
361 r'https?://v\.redd\.it/(?P<id>[^/?#&]+)', reddit_video['fallback_url'],
362 'video_id', default=display_id)
364 dash_playlist_url = playlist_urls[0] or f'https://v.redd.it/{video_id}/DASHPlaylist.mpd'
365 hls_playlist_url = playlist_urls[1] or f'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'
366 qs = traverse_obj(parse_qs(hls_playlist_url), {
367 'f': ('f', 0, {lambda x: ','.join([x, 'subsAll']) if x else 'hd,subsAll'}),
369 hls_playlist_url = update_url_query(hls_playlist_url, qs)
371 formats = [{
372 'url': unescapeHTML(reddit_video['fallback_url']),
373 'height': int_or_none(reddit_video.get('height')),
374 'width': int_or_none(reddit_video.get('width')),
375 'tbr': int_or_none(reddit_video.get('bitrate_kbps')),
376 'acodec': 'none',
377 'vcodec': 'h264',
378 'ext': 'mp4',
379 'format_id': 'fallback',
380 'format_note': 'DASH video, mp4_dash',
382 hls_fmts, subtitles = self._extract_m3u8_formats_and_subtitles(
383 hls_playlist_url, display_id, 'mp4', m3u8_id='hls', fatal=False)
384 formats.extend(hls_fmts)
385 dash_fmts, dash_subs = self._extract_mpd_formats_and_subtitles(
386 dash_playlist_url, display_id, mpd_id='dash', fatal=False)
387 formats.extend(dash_fmts)
388 self._merge_subtitles(dash_subs, target=subtitles)
390 return {
391 **info,
392 'id': video_id,
393 'display_id': display_id,
394 'formats': formats,
395 'subtitles': subtitles or self.extract_subtitles(video_id),
396 'duration': int_or_none(reddit_video.get('duration')),
399 if parsed_url.netloc == 'v.redd.it':
400 self.raise_no_formats('This video is processing', expected=True, video_id=video_id)
401 return {
402 **info,
403 'id': parsed_url.path.split('/')[1],
404 'display_id': video_id,
407 # Not hosted on reddit, must continue extraction
408 return {
409 **info,
410 'display_id': video_id,
411 '_type': 'url_transparent',
412 'url': video_url,