26 from .cache
import Cache
27 from .compat
import compat_os_name
, compat_shlex_quote
28 from .cookies
import load_cookies
29 from .downloader
import FFmpegFD
, get_suitable_downloader
, shorten_protocol_name
30 from .downloader
.rtmp
import rtmpdump_version
31 from .extractor
import gen_extractor_classes
, get_info_extractor
32 from .extractor
.common
import UnsupportedURLIE
33 from .extractor
.openload
import PhantomJSwrapper
34 from .minicurses
import format_text
35 from .plugins
import directories
as plugin_directories
36 from .postprocessor
import _PLUGIN_CLASSES
as plugin_pps
37 from .postprocessor
import (
39 FFmpegFixupDuplicateMoovPP
,
40 FFmpegFixupDurationPP
,
43 FFmpegFixupStretchedPP
,
44 FFmpegFixupTimestampPP
,
47 FFmpegVideoConvertorPP
,
48 MoveFilesAfterDownloadPP
,
51 from .postprocessor
.ffmpeg
import resolve_mapping
as resolve_recode_mapping
52 from .update
import REPOSITORY
, current_git_head
, detect_variant
79 PerRequestProxyHandler
,
86 UnavailableVideoError
,
88 YoutubeDLCookieProcessor
,
90 YoutubeDLRedirectHandler
,
106 format_decimal_suffix
,
123 orderedSet_from_options
,
127 remove_terminal_sequences
,
138 supports_terminal_sequences
,
148 windows_enable_vt_mode
,
152 from .version
import CHANNEL
, RELEASE_GIT_HEAD
, VARIANT
, __version__
154 if compat_os_name
== 'nt':
161 YoutubeDL objects are the ones responsible of downloading the
162 actual video file and writing it to disk if the user has requested
163 it, among some other tasks. In most cases there should be one per
164 program. As, given a video URL, the downloader doesn't know how to
165 extract all the needed information, task that InfoExtractors do, it
166 has to pass the URL to one of them.
168 For this, YoutubeDL objects have a method that allows
169 InfoExtractors to be registered in a given order. When it is passed
170 a URL, the YoutubeDL object handles it to the first InfoExtractor it
171 finds that reports being able to handle it. The InfoExtractor extracts
172 all the information about the video or videos the URL refers to, and
173 YoutubeDL process the extracted information, possibly using a File
174 Downloader to download the video.
176 YoutubeDL objects accept a lot of parameters. In order not to saturate
177 the object constructor with arguments, it receives a dictionary of
178 options instead. These options are available through the params
179 attribute for the InfoExtractors to use. The YoutubeDL also
180 registers itself as the downloader in charge for the InfoExtractors
181 that are added to it, so this is a "mutual registration".
185 username: Username for authentication purposes.
186 password: Password for authentication purposes.
187 videopassword: Password for accessing a video.
188 ap_mso: Adobe Pass multiple-system operator identifier.
189 ap_username: Multiple-system operator account username.
190 ap_password: Multiple-system operator account password.
191 usenetrc: Use netrc for authentication instead.
192 netrc_location: Location of the netrc file. Defaults to ~/.netrc.
193 verbose: Print additional info to stdout.
194 quiet: Do not print messages to stdout.
195 no_warnings: Do not print out anything for warnings.
196 forceprint: A dict with keys WHEN mapped to a list of templates to
197 print to stdout. The allowed keys are video or any of the
198 items in utils.POSTPROCESS_WHEN.
199 For compatibility, a single list is also accepted
200 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
201 a list of tuples with (template, filename)
202 forcejson: Force printing info_dict as JSON.
203 dump_single_json: Force printing the info_dict of the whole playlist
204 (or video) as a single JSON line.
205 force_write_download_archive: Force writing download archive regardless
206 of 'skip_download' or 'simulate'.
207 simulate: Do not download the video files. If unset (or None),
208 simulate only if listsubtitles, listformats or list_thumbnails is used
209 format: Video format code. see "FORMAT SELECTION" for more details.
210 You can also pass a function. The function takes 'ctx' as
211 argument and returns the formats to download.
212 See "build_format_selector" for an implementation
213 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
214 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
215 extracting metadata even if the video is not actually
216 available for download (experimental)
217 format_sort: A list of fields by which to sort the video formats.
218 See "Sorting Formats" for more details.
219 format_sort_force: Force the given format_sort. see "Sorting Formats"
221 prefer_free_formats: Whether to prefer video formats with free containers
222 over non-free ones of same quality.
223 allow_multiple_video_streams: Allow multiple video streams to be merged
225 allow_multiple_audio_streams: Allow multiple audio streams to be merged
227 check_formats Whether to test if the formats are downloadable.
228 Can be True (check all), False (check none),
229 'selected' (check selected formats),
230 or None (check only if requested by extractor)
231 paths: Dictionary of output paths. The allowed keys are 'home'
232 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
233 outtmpl: Dictionary of templates for output names. Allowed keys
234 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
235 For compatibility with youtube-dl, a single string can also be used
236 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
237 restrictfilenames: Do not allow "&" and spaces in file names
238 trim_file_name: Limit length of filename (extension excluded)
239 windowsfilenames: Force the filenames to be windows compatible
240 ignoreerrors: Do not stop on download/postprocessing errors.
241 Can be 'only_download' to ignore only download errors.
242 Default is 'only_download' for CLI, but False for API
243 skip_playlist_after_errors: Number of allowed failures until the rest of
244 the playlist is skipped
245 allowed_extractors: List of regexes to match against extractor names that are allowed
246 overwrites: Overwrite all video and metadata files if True,
247 overwrite only non-video files if None
248 and don't overwrite any file if False
249 For compatibility with youtube-dl,
250 "nooverwrites" may also be used instead
251 playlist_items: Specific indices of playlist to download.
252 playlistrandom: Download playlist items in random order.
253 lazy_playlist: Process playlist entries as they are received.
254 matchtitle: Download only matching titles.
255 rejecttitle: Reject downloads for matching titles.
256 logger: Log messages to a logging.Logger instance.
257 logtostderr: Print everything to stderr instead of stdout.
258 consoletitle: Display progress in console window's titlebar.
259 writedescription: Write the video description to a .description file
260 writeinfojson: Write the video description to a .info.json file
261 clean_infojson: Remove private fields from the infojson
262 getcomments: Extract video comments. This will not be written to disk
263 unless writeinfojson is also given
264 writeannotations: Write the video annotations to a .annotations.xml file
265 writethumbnail: Write the thumbnail image to a file
266 allow_playlist_files: Whether to write playlists' description, infojson etc
267 also to disk when using the 'write*' options
268 write_all_thumbnails: Write all thumbnail formats to files
269 writelink: Write an internet shortcut file, depending on the
270 current platform (.url/.webloc/.desktop)
271 writeurllink: Write a Windows internet shortcut file (.url)
272 writewebloclink: Write a macOS internet shortcut file (.webloc)
273 writedesktoplink: Write a Linux internet shortcut file (.desktop)
274 writesubtitles: Write the video subtitles to a file
275 writeautomaticsub: Write the automatically generated subtitles to a file
276 listsubtitles: Lists all available subtitles for the video
277 subtitlesformat: The format code for subtitles
278 subtitleslangs: List of languages of the subtitles to download (can be regex).
279 The list may contain "all" to refer to all the available
280 subtitles. The language can be prefixed with a "-" to
281 exclude it from the requested languages, e.g. ['all', '-live_chat']
282 keepvideo: Keep the video file after post-processing
283 daterange: A DateRange object, download only if the upload_date is in the range.
284 skip_download: Skip the actual download of the video file
285 cachedir: Location of the cache files in the filesystem.
286 False to disable filesystem cache.
287 noplaylist: Download single video instead of a playlist if in doubt.
288 age_limit: An integer representing the user's age in years.
289 Unsuitable videos for the given age are skipped.
290 min_views: An integer representing the minimum view count the video
291 must have in order to not be skipped.
292 Videos without view count information are always
293 downloaded. None for no limit.
294 max_views: An integer representing the maximum view count.
295 Videos that are more popular than that are not
297 Videos without view count information are always
298 downloaded. None for no limit.
299 download_archive: A set, or the name of a file where all downloads are recorded.
300 Videos already present in the file are not downloaded again.
301 break_on_existing: Stop the download process after attempting to download a
302 file that is in the archive.
303 break_per_url: Whether break_on_reject and break_on_existing
304 should act on each input URL as opposed to for the entire queue
305 cookiefile: File name or text stream from where cookies should be read and dumped to
306 cookiesfrombrowser: A tuple containing the name of the browser, the profile
307 name/path from where cookies are loaded, the name of the keyring,
308 and the container name, e.g. ('chrome', ) or
309 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
310 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
311 support RFC 5746 secure renegotiation
312 nocheckcertificate: Do not verify SSL certificates
313 client_certificate: Path to client certificate file in PEM format. May include the private key
314 client_certificate_key: Path to private key file for client certificate
315 client_certificate_password: Password for client certificate private key, if encrypted.
316 If not provided and the key is encrypted, yt-dlp will ask interactively
317 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
318 (Only supported by some extractors)
319 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
320 http_headers: A dictionary of custom headers to be used for all requests
321 proxy: URL of the proxy server to use
322 geo_verification_proxy: URL of the proxy to use for IP address verification
323 on geo-restricted sites.
324 socket_timeout: Time to wait for unresponsive hosts, in seconds
325 bidi_workaround: Work around buggy terminals without bidirectional text
326 support, using fridibi
327 debug_printtraffic:Print out sent and received HTTP traffic
328 default_search: Prepend this string if an input url is not valid.
329 'auto' for elaborate guessing
330 encoding: Use this encoding instead of the system-specified.
331 extract_flat: Whether to resolve and process url_results further
332 * False: Always process (default)
333 * True: Never process
334 * 'in_playlist': Do not process inside playlist/multi_video
335 * 'discard': Always process, but don't return the result
336 from inside playlist/multi_video
337 * 'discard_in_playlist': Same as "discard", but only for
338 playlists (not multi_video)
339 wait_for_video: If given, wait for scheduled streams to become available.
340 The value should be a tuple containing the range
341 (min_secs, max_secs) to wait between retries
342 postprocessors: A list of dictionaries, each with an entry
343 * key: The name of the postprocessor. See
344 yt_dlp/postprocessor/__init__.py for a list.
345 * when: When to run the postprocessor. Allowed values are
346 the entries of utils.POSTPROCESS_WHEN
347 Assumed to be 'post_process' if not given
348 progress_hooks: A list of functions that get called on download
349 progress, with a dictionary with the entries
350 * status: One of "downloading", "error", or "finished".
351 Check this first and ignore unknown values.
352 * info_dict: The extracted info_dict
354 If status is one of "downloading", or "finished", the
355 following properties may also be present:
356 * filename: The final filename (always present)
357 * tmpfilename: The filename we're currently writing to
358 * downloaded_bytes: Bytes on disk
359 * total_bytes: Size of the whole file, None if unknown
360 * total_bytes_estimate: Guess of the eventual file size,
362 * elapsed: The number of seconds since download started.
363 * eta: The estimated time in seconds, None if unknown
364 * speed: The download speed in bytes/second, None if
366 * fragment_index: The counter of the currently
367 downloaded video fragment.
368 * fragment_count: The number of fragments (= individual
369 files that will be merged)
371 Progress hooks are guaranteed to be called at least once
372 (with status "finished") if the download is successful.
373 postprocessor_hooks: A list of functions that get called on postprocessing
374 progress, with a dictionary with the entries
375 * status: One of "started", "processing", or "finished".
376 Check this first and ignore unknown values.
377 * postprocessor: Name of the postprocessor
378 * info_dict: The extracted info_dict
380 Progress hooks are guaranteed to be called at least twice
381 (with status "started" and "finished") if the processing is successful.
382 merge_output_format: "/" separated list of extensions to use when merging formats.
383 final_ext: Expected final extension; used to detect when the file was
384 already downloaded and converted
385 fixup: Automatically correct known faults of the file.
387 - "never": do nothing
388 - "warn": only emit a warning
389 - "detect_or_warn": check whether we can do anything
390 about it, warn otherwise (default)
391 source_address: Client-side IP address to bind to.
392 sleep_interval_requests: Number of seconds to sleep between requests
394 sleep_interval: Number of seconds to sleep before each download when
395 used alone or a lower bound of a range for randomized
396 sleep before each download (minimum possible number
397 of seconds to sleep) when used along with
399 max_sleep_interval:Upper bound of a range for randomized sleep before each
400 download (maximum possible number of seconds to sleep).
401 Must only be used along with sleep_interval.
402 Actual sleep time will be a random float from range
403 [sleep_interval; max_sleep_interval].
404 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
405 listformats: Print an overview of available video formats and exit.
406 list_thumbnails: Print a table of all thumbnails and exit.
407 match_filter: A function that gets called for every video with the signature
408 (info_dict, *, incomplete: bool) -> Optional[str]
409 For backward compatibility with youtube-dl, the signature
410 (info_dict) -> Optional[str] is also allowed.
411 - If it returns a message, the video is ignored.
412 - If it returns None, the video is downloaded.
413 - If it returns utils.NO_DEFAULT, the user is interactively
414 asked whether to download the video.
415 - Raise utils.DownloadCancelled(msg) to abort remaining
416 downloads when a video is rejected.
417 match_filter_func in utils.py is one example for this.
418 no_color: Do not emit color codes in output.
419 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
422 Two-letter ISO 3166-2 country code that will be used for
423 explicit geographic restriction bypassing via faking
424 X-Forwarded-For HTTP header
426 IP range in CIDR notation that will be used similarly to
428 external_downloader: A dictionary of protocol keys and the executable of the
429 external downloader to use for it. The allowed protocols
430 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
431 Set the value to 'native' to use the native downloader
432 compat_opts: Compatibility options. See "Differences in default behavior".
433 The following options do not work when used through the API:
434 filename, abort-on-error, multistreams, no-live-chat, format-sort
435 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
436 Refer __init__.py for their implementation
437 progress_template: Dictionary of templates for progress outputs.
438 Allowed keys are 'download', 'postprocess',
439 'download-title' (console title) and 'postprocess-title'.
440 The template is mapped on a dictionary with keys 'progress' and 'info'
441 retry_sleep_functions: Dictionary of functions that takes the number of attempts
442 as argument and returns the time to sleep in seconds.
443 Allowed keys are 'http', 'fragment', 'file_access'
444 download_ranges: A callback function that gets called for every video with
445 the signature (info_dict, ydl) -> Iterable[Section].
446 Only the returned sections will be downloaded.
447 Each Section is a dict with the following keys:
448 * start_time: Start time of the section in seconds
449 * end_time: End time of the section in seconds
450 * title: Section title (Optional)
451 * index: Section number (Optional)
452 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
453 noprogress: Do not print the progress bar
454 live_from_start: Whether to download livestreams videos from the start
456 The following parameters are not used by YoutubeDL itself, they are used by
457 the downloader (see yt_dlp/downloader/common.py):
458 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
459 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
460 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
461 external_downloader_args, concurrent_fragment_downloads.
463 The following options are used by the post processors:
464 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
465 to the binary or its containing directory.
466 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
467 and a list of additional command-line arguments for the
468 postprocessor/executable. The dict can also have "PP+EXE" keys
469 which are used when the given exe is used by the given PP.
470 Use 'default' as the name for arguments to passed to all PP
471 For compatibility with youtube-dl, a single list of args
474 The following options are used by the extractors:
475 extractor_retries: Number of times to retry for known errors
476 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
477 hls_split_discontinuity: Split HLS playlists to different formats at
478 discontinuities such as ad breaks (default: False)
479 extractor_args: A dictionary of arguments to be passed to the extractors.
480 See "EXTRACTOR ARGUMENTS" for details.
481 E.g. {'youtube': {'skip': ['dash', 'hls']}}
482 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
484 The following options are deprecated and may be removed in the future:
486 break_on_reject: Stop the download process when encountering a video that
487 has been filtered out.
488 - `raise DownloadCancelled(msg)` in match_filter instead
489 force_generic_extractor: Force downloader to use the generic extractor
490 - Use allowed_extractors = ['generic', 'default']
491 playliststart: - Use playlist_items
492 Playlist item to start at.
493 playlistend: - Use playlist_items
494 Playlist item to end at.
495 playlistreverse: - Use playlist_items
496 Download playlist items in reverse order.
497 forceurl: - Use forceprint
498 Force printing final URL.
499 forcetitle: - Use forceprint
500 Force printing title.
501 forceid: - Use forceprint
503 forcethumbnail: - Use forceprint
504 Force printing thumbnail URL.
505 forcedescription: - Use forceprint
506 Force printing description.
507 forcefilename: - Use forceprint
508 Force printing final filename.
509 forceduration: - Use forceprint
510 Force printing duration.
511 allsubtitles: - Use subtitleslangs = ['all']
512 Downloads all the subtitles of the video
513 (requires writesubtitles or writeautomaticsub)
514 include_ads: - Doesn't work
516 call_home: - Not implemented
517 Boolean, true iff we are allowed to contact the
518 yt-dlp servers for debugging.
519 post_hooks: - Register a custom postprocessor
520 A list of functions that get called as the final step
521 for each video file, after all postprocessors have been
522 called. The filename will be passed as the only argument.
523 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
524 Use the native HLS downloader instead of ffmpeg/avconv
525 if True, otherwise use ffmpeg/avconv if False, otherwise
526 use downloader suggested by extractor if None.
527 prefer_ffmpeg: - avconv support is deprecated
528 If False, use avconv instead of ffmpeg if both are available,
529 otherwise prefer ffmpeg.
530 youtube_include_dash_manifest: - Use extractor_args
531 If True (default), DASH manifests and related
532 data will be downloaded and processed by extractor.
533 You can reduce network I/O by disabling it if you don't
534 care about DASH. (only for youtube)
535 youtube_include_hls_manifest: - Use extractor_args
536 If True (default), HLS manifests and related
537 data will be downloaded and processed by extractor.
538 You can reduce network I/O by disabling it if you don't
539 care about HLS. (only for youtube)
543 'width', 'height', 'asr', 'audio_channels', 'fps',
544 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
545 'timestamp', 'release_timestamp',
546 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
547 'average_rating', 'comment_count', 'age_limit',
548 'start_time', 'end_time',
549 'chapter_number', 'season_number', 'episode_number',
550 'track_number', 'disc_number', 'release_year',
554 # NB: Keep in sync with the docstring of extractor/common.py
555 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
556 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
557 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
558 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
559 'preference', 'language', 'language_preference', 'quality', 'source_preference',
560 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
561 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
563 _format_selection_exts
= {
564 'audio': set(MEDIA_EXTENSIONS
.common_audio
),
565 'video': set(MEDIA_EXTENSIONS
.common_video
+ ('3gp', )),
566 'storyboards': set(MEDIA_EXTENSIONS
.storyboards
),
569 def __init__(self
, params
=None, auto_init
=True):
570 """Create a FileDownloader object with the given options.
571 @param auto_init Whether to load the default extractors and print header (if verbose).
572 Set to 'no_verbose_header' to not print the header
578 self
._ies
_instances
= {}
579 self
._pps
= {k
: [] for k
in POSTPROCESS_WHEN
}
580 self
._printed
_messages
= set()
581 self
._first
_webpage
_request
= True
582 self
._post
_hooks
= []
583 self
._progress
_hooks
= []
584 self
._postprocessor
_hooks
= []
585 self
._download
_retcode
= 0
586 self
._num
_downloads
= 0
588 self
._playlist
_level
= 0
589 self
._playlist
_urls
= set()
590 self
.cache
= Cache(self
)
592 stdout
= sys
.stderr
if self
.params
.get('logtostderr') else sys
.stdout
593 self
._out
_files
= Namespace(
596 screen
=sys
.stderr
if self
.params
.get('quiet') else stdout
,
597 console
=None if compat_os_name
== 'nt' else next(
598 filter(supports_terminal_sequences
, (sys
.stderr
, sys
.stdout
)), None)
602 windows_enable_vt_mode()
603 except Exception as e
:
604 self
.write_debug(f
'Failed to enable VT mode: {e}')
606 self
._allow
_colors
= Namespace(**{
607 type_
: not self
.params
.get('no_color') and supports_terminal_sequences(stream
)
608 for type_
, stream
in self
._out
_files
.items_
if type_
!= 'console'
611 # The code is left like this to be reused for future deprecations
612 MIN_SUPPORTED
, MIN_RECOMMENDED
= (3, 7), (3, 7)
613 current_version
= sys
.version_info
[:2]
614 if current_version
< MIN_RECOMMENDED
:
615 msg
= ('Support for Python version %d.%d has been deprecated. '
616 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
617 '\n You will no longer receive updates on this version')
618 if current_version
< MIN_SUPPORTED
:
619 msg
= 'Python version %d.%d is no longer supported'
620 self
.deprecated_feature(
621 f
'{msg}! Please update to Python %d.%d or above' % (*current_version
, *MIN_RECOMMENDED
))
623 if self
.params
.get('allow_unplayable_formats'):
625 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
626 'This is a developer option intended for debugging. \n'
627 ' If you experience any issues while using this option, '
628 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
630 if self
.params
.get('bidi_workaround', False):
633 master
, slave
= pty
.openpty()
634 width
= shutil
.get_terminal_size().columns
635 width_args
= [] if width
is None else ['-w', str(width
)]
636 sp_kwargs
= {'stdin': subprocess
.PIPE
, 'stdout': slave
, 'stderr': self
._out
_files
.error
}
638 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
640 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
641 self
._output
_channel
= os
.fdopen(master
, 'rb')
642 except OSError as ose
:
643 if ose
.errno
== errno
.ENOENT
:
645 'Could not find fribidi executable, ignoring --bidi-workaround. '
646 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
650 self
.params
['compat_opts'] = set(self
.params
.get('compat_opts', ()))
651 if auto_init
and auto_init
!= 'no_verbose_header':
652 self
.print_debug_header()
654 def check_deprecated(param
, option
, suggestion
):
655 if self
.params
.get(param
) is not None:
656 self
.report_warning(f
'{option} is deprecated. Use {suggestion} instead')
660 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
661 if self
.params
.get('geo_verification_proxy') is None:
662 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
664 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
665 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
666 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
668 for msg
in self
.params
.get('_warnings', []):
669 self
.report_warning(msg
)
670 for msg
in self
.params
.get('_deprecation_warnings', []):
671 self
.deprecated_feature(msg
)
673 if 'list-formats' in self
.params
['compat_opts']:
674 self
.params
['listformats_table'] = False
676 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
677 # nooverwrites was unnecessarily changed to overwrites
678 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
679 # This ensures compatibility with both keys
680 self
.params
['overwrites'] = not self
.params
['nooverwrites']
681 elif self
.params
.get('overwrites') is None:
682 self
.params
.pop('overwrites', None)
684 self
.params
['nooverwrites'] = not self
.params
['overwrites']
686 if self
.params
.get('simulate') is None and any((
687 self
.params
.get('list_thumbnails'),
688 self
.params
.get('listformats'),
689 self
.params
.get('listsubtitles'),
691 self
.params
['simulate'] = 'list_only'
693 self
.params
.setdefault('forceprint', {})
694 self
.params
.setdefault('print_to_file', {})
696 # Compatibility with older syntax
697 if not isinstance(params
['forceprint'], dict):
698 self
.params
['forceprint'] = {'video': params
['forceprint']}
701 self
.add_default_info_extractors()
703 if (sys
.platform
!= 'win32'
704 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
705 and not self
.params
.get('restrictfilenames', False)):
706 # Unicode filesystem API will throw errors (#1474, #13027)
708 'Assuming --restrict-filenames since file system encoding '
709 'cannot encode all characters. '
710 'Set the LC_ALL environment variable to fix this.')
711 self
.params
['restrictfilenames'] = True
713 self
._parse
_outtmpl
()
715 # Creating format selector here allows us to catch syntax errors before the extraction
716 self
.format_selector
= (
717 self
.params
.get('format') if self
.params
.get('format') in (None, '-')
718 else self
.params
['format'] if callable(self
.params
['format'])
719 else self
.build_format_selector(self
.params
['format']))
721 # Set http_headers defaults according to std_headers
722 self
.params
['http_headers'] = merge_headers(std_headers
, self
.params
.get('http_headers', {}))
725 'post_hooks': self
.add_post_hook
,
726 'progress_hooks': self
.add_progress_hook
,
727 'postprocessor_hooks': self
.add_postprocessor_hook
,
729 for opt
, fn
in hooks
.items():
730 for ph
in self
.params
.get(opt
, []):
733 for pp_def_raw
in self
.params
.get('postprocessors', []):
734 pp_def
= dict(pp_def_raw
)
735 when
= pp_def
.pop('when', 'post_process')
736 self
.add_post_processor(
737 get_postprocessor(pp_def
.pop('key'))(self
, **pp_def
),
742 def preload_download_archive(fn
):
743 """Preload the archive, if any is specified"""
747 elif not is_path_like(fn
):
750 self
.write_debug(f
'Loading archive file {fn!r}')
752 with
locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
753 for line
in archive_file
:
754 archive
.add(line
.strip())
755 except OSError as ioe
:
756 if ioe
.errno
!= errno
.ENOENT
:
760 self
.archive
= preload_download_archive(self
.params
.get('download_archive'))
762 def warn_if_short_id(self
, argv
):
763 # short YouTube ID starting with dash?
765 i
for i
, a
in enumerate(argv
)
766 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
770 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
771 + ['--'] + [argv
[i
] for i
in idxs
]
774 'Long argument string detected. '
775 'Use -- to separate parameters and URLs, like this:\n%s' %
776 args_to_str(correct_argv
))
778 def add_info_extractor(self
, ie
):
779 """Add an InfoExtractor object to the end of the list."""
781 self
._ies
[ie_key
] = ie
782 if not isinstance(ie
, type):
783 self
._ies
_instances
[ie_key
] = ie
784 ie
.set_downloader(self
)
786 def get_info_extractor(self
, ie_key
):
788 Get an instance of an IE with name ie_key, it will try to get one from
789 the _ies list, if there's no instance it will create a new one and add
790 it to the extractor list.
792 ie
= self
._ies
_instances
.get(ie_key
)
794 ie
= get_info_extractor(ie_key
)()
795 self
.add_info_extractor(ie
)
798 def add_default_info_extractors(self
):
800 Add the InfoExtractors returned by gen_extractors to the end of the list
802 all_ies
= {ie
.IE_NAME
.lower(): ie
for ie
in gen_extractor_classes()}
803 all_ies
['end'] = UnsupportedURLIE()
805 ie_names
= orderedSet_from_options(
806 self
.params
.get('allowed_extractors', ['default']), {
807 'all': list(all_ies
),
808 'default': [name
for name
, ie
in all_ies
.items() if ie
._ENABLED
],
810 except re
.error
as e
:
811 raise ValueError(f
'Wrong regex for allowed_extractors: {e.pattern}')
812 for name
in ie_names
:
813 self
.add_info_extractor(all_ies
[name
])
814 self
.write_debug(f
'Loaded {len(ie_names)} extractors')
816 def add_post_processor(self
, pp
, when
='post_process'):
817 """Add a PostProcessor object to the end of the chain."""
818 assert when
in POSTPROCESS_WHEN
, f
'Invalid when={when}'
819 self
._pps
[when
].append(pp
)
820 pp
.set_downloader(self
)
822 def add_post_hook(self
, ph
):
823 """Add the post hook"""
824 self
._post
_hooks
.append(ph
)
826 def add_progress_hook(self
, ph
):
827 """Add the download progress hook"""
828 self
._progress
_hooks
.append(ph
)
830 def add_postprocessor_hook(self
, ph
):
831 """Add the postprocessing progress hook"""
832 self
._postprocessor
_hooks
.append(ph
)
833 for pps
in self
._pps
.values():
835 pp
.add_progress_hook(ph
)
837 def _bidi_workaround(self
, message
):
838 if not hasattr(self
, '_output_channel'):
841 assert hasattr(self
, '_output_process')
842 assert isinstance(message
, str)
843 line_count
= message
.count('\n') + 1
844 self
._output
_process
.stdin
.write((message
+ '\n').encode())
845 self
._output
_process
.stdin
.flush()
846 res
= ''.join(self
._output
_channel
.readline().decode()
847 for _
in range(line_count
))
848 return res
[:-len('\n')]
850 def _write_string(self
, message
, out
=None, only_once
=False):
852 if message
in self
._printed
_messages
:
854 self
._printed
_messages
.add(message
)
855 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
857 def to_stdout(self
, message
, skip_eol
=False, quiet
=None):
858 """Print message to stdout"""
859 if quiet
is not None:
860 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
861 'Use "YoutubeDL.to_screen" instead')
862 if skip_eol
is not False:
863 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
864 'Use "YoutubeDL.to_screen" instead')
865 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.out
)
867 def to_screen(self
, message
, skip_eol
=False, quiet
=None, only_once
=False):
868 """Print message to screen if not in quiet mode"""
869 if self
.params
.get('logger'):
870 self
.params
['logger'].debug(message
)
872 if (self
.params
.get('quiet') if quiet
is None else quiet
) and not self
.params
.get('verbose'):
875 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
876 self
._out
_files
.screen
, only_once
=only_once
)
878 def to_stderr(self
, message
, only_once
=False):
879 """Print message to stderr"""
880 assert isinstance(message
, str)
881 if self
.params
.get('logger'):
882 self
.params
['logger'].error(message
)
884 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.error
, only_once
=only_once
)
886 def _send_console_code(self
, code
):
887 if compat_os_name
== 'nt' or not self
._out
_files
.console
:
889 self
._write
_string
(code
, self
._out
_files
.console
)
891 def to_console_title(self
, message
):
892 if not self
.params
.get('consoletitle', False):
894 message
= remove_terminal_sequences(message
)
895 if compat_os_name
== 'nt':
896 if ctypes
.windll
.kernel32
.GetConsoleWindow():
897 # c_wchar_p() might not be necessary if `message` is
898 # already of type unicode()
899 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
901 self
._send
_console
_code
(f
'\033]0;{message}\007')
903 def save_console_title(self
):
904 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
906 self
._send
_console
_code
('\033[22;0t') # Save the title on stack
908 def restore_console_title(self
):
909 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
911 self
._send
_console
_code
('\033[23;0t') # Restore the title from stack
914 self
.save_console_title()
917 def __exit__(self
, *args
):
918 self
.restore_console_title()
920 if self
.params
.get('cookiefile') is not None:
921 self
.cookiejar
.save(ignore_discard
=True, ignore_expires
=True)
923 def trouble(self
, message
=None, tb
=None, is_error
=True):
924 """Determine action to take when a download problem appears.
926 Depending on if the downloader has been configured to ignore
927 download errors or not, this method may throw an exception or
928 not when errors are found, after printing the message.
930 @param tb If given, is additional traceback information
931 @param is_error Whether to raise error according to ignorerrors
933 if message
is not None:
934 self
.to_stderr(message
)
935 if self
.params
.get('verbose'):
937 if sys
.exc_info()[0]: # if .trouble has been called from an except block
939 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
940 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
941 tb
+= encode_compat_str(traceback
.format_exc())
943 tb_data
= traceback
.format_list(traceback
.extract_stack())
944 tb
= ''.join(tb_data
)
949 if not self
.params
.get('ignoreerrors'):
950 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
951 exc_info
= sys
.exc_info()[1].exc_info
953 exc_info
= sys
.exc_info()
954 raise DownloadError(message
, exc_info
)
955 self
._download
_retcode
= 1
959 EMPHASIS
='light blue',
965 SUPPRESS
='light black',
968 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
972 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
973 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', None) or 'ascii'
974 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
975 if fallback
is not None and text
!= original_text
:
977 return format_text(text
, f
) if allow_colors
else text
if fallback
is None else fallback
979 def _format_out(self
, *args
, **kwargs
):
980 return self
._format
_text
(self
._out
_files
.out
, self
._allow
_colors
.out
, *args
, **kwargs
)
982 def _format_screen(self
, *args
, **kwargs
):
983 return self
._format
_text
(self
._out
_files
.screen
, self
._allow
_colors
.screen
, *args
, **kwargs
)
985 def _format_err(self
, *args
, **kwargs
):
986 return self
._format
_text
(self
._out
_files
.error
, self
._allow
_colors
.error
, *args
, **kwargs
)
988 def report_warning(self
, message
, only_once
=False):
990 Print the message to stderr, it will be prefixed with 'WARNING:'
991 If stderr is a tty file the 'WARNING:' will be colored
993 if self
.params
.get('logger') is not None:
994 self
.params
['logger'].warning(message
)
996 if self
.params
.get('no_warnings'):
998 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
1000 def deprecation_warning(self
, message
, *, stacklevel
=0):
1001 deprecation_warning(
1002 message
, stacklevel
=stacklevel
+ 1, printer
=self
.report_error
, is_error
=False)
1004 def deprecated_feature(self
, message
):
1005 if self
.params
.get('logger') is not None:
1006 self
.params
['logger'].warning(f
'Deprecated Feature: {message}')
1007 self
.to_stderr(f
'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
1009 def report_error(self
, message
, *args
, **kwargs
):
1011 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1012 in red if stderr is a tty file.
1014 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args
, **kwargs
)
1016 def write_debug(self
, message
, only_once
=False):
1017 '''Log debug message or Print message to stderr'''
1018 if not self
.params
.get('verbose', False):
1020 message
= f
'[debug] {message}'
1021 if self
.params
.get('logger'):
1022 self
.params
['logger'].debug(message
)
1024 self
.to_stderr(message
, only_once
)
1026 def report_file_already_downloaded(self
, file_name
):
1027 """Report file has already been fully downloaded."""
1029 self
.to_screen('[download] %s has already been downloaded' % file_name
)
1030 except UnicodeEncodeError:
1031 self
.to_screen('[download] The file has already been downloaded')
1033 def report_file_delete(self
, file_name
):
1034 """Report that existing file will be deleted."""
1036 self
.to_screen('Deleting existing file %s' % file_name
)
1037 except UnicodeEncodeError:
1038 self
.to_screen('Deleting existing file')
1040 def raise_no_formats(self
, info
, forced
=False, *, msg
=None):
1041 has_drm
= info
.get('_has_drm')
1042 ignored
, expected
= self
.params
.get('ignore_no_formats_error'), bool(msg
)
1043 msg
= msg
or has_drm
and 'This video is DRM protected' or 'No video formats found!'
1044 if forced
or not ignored
:
1045 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
1046 expected
=has_drm
or ignored
or expected
)
1048 self
.report_warning(msg
)
1050 def parse_outtmpl(self
):
1051 self
.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1052 self
._parse
_outtmpl
()
1053 return self
.params
['outtmpl']
1055 def _parse_outtmpl(self
):
1057 if self
.params
.get('restrictfilenames'): # Remove spaces in the default template
1058 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
1060 outtmpl
= self
.params
.setdefault('outtmpl', {})
1061 if not isinstance(outtmpl
, dict):
1062 self
.params
['outtmpl'] = outtmpl
= {'default': outtmpl
}
1063 outtmpl
.update({k
: sanitize(v
) for k
, v
in DEFAULT_OUTTMPL
.items() if outtmpl
.get(k
) is None})
1065 def get_output_path(self
, dir_type
='', filename
=None):
1066 paths
= self
.params
.get('paths', {})
1067 assert isinstance(paths
, dict), '"paths" parameter must be a dictionary'
1068 path
= os
.path
.join(
1069 expand_path(paths
.get('home', '').strip()),
1070 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
1072 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
1075 def _outtmpl_expandpath(outtmpl
):
1076 # expand_path translates '%%' into '%' and '$$' into '$'
1077 # correspondingly that is not what we want since we need to keep
1078 # '%%' intact for template dict substitution step. Working around
1079 # with boundary-alike separator hack.
1080 sep
= ''.join(random
.choices(string
.ascii_letters
, k
=32))
1081 outtmpl
= outtmpl
.replace('%%', f
'%{sep}%').replace('$$', f
'${sep}$')
1083 # outtmpl should be expand_path'ed before template dict substitution
1084 # because meta fields may contain env variables we don't want to
1085 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1086 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1087 return expand_path(outtmpl
).replace(sep
, '')
1090 def escape_outtmpl(outtmpl
):
1091 ''' Escape any remaining strings like %s, %abc% etc. '''
1093 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
1094 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
1098 def validate_outtmpl(cls
, outtmpl
):
1099 ''' @return None or Exception object '''
1101 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljhqBUDS]'),
1102 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1103 cls
._outtmpl
_expandpath
(outtmpl
))
1105 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1107 except ValueError as err
:
1111 def _copy_infodict(info_dict
):
1112 info_dict
= dict(info_dict
)
1113 info_dict
.pop('__postprocessors', None)
1114 info_dict
.pop('__pending_error', None)
1117 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=False):
1118 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1119 @param sanitize Whether to sanitize the output as a filename.
1120 For backward compatibility, a function can also be passed
1123 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1125 info_dict
= self
._copy
_infodict
(info_dict
)
1126 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1127 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1128 if info_dict
.get('duration', None) is not None
1130 info_dict
['autonumber'] = int(self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
)
1131 info_dict
['video_autonumber'] = self
._num
_videos
1132 if info_dict
.get('resolution') is None:
1133 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1135 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1136 # of %(field)s to %(field)0Nd for backward compatibility
1137 field_size_compat_map
= {
1138 'playlist_index': number_of_digits(info_dict
.get('__last_playlist_index') or 0),
1139 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1140 'autonumber': self
.params
.get('autonumber_size') or 5,
1144 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1149 # Field is of the form key1.key2...
1150 # where keys (except first) can be string, int, slice or "{field, ...}"
1151 FIELD_INNER_RE
= r
'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r
'(?:-?\d+)'}
1152 FIELD_RE
= r
'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1153 'inner': FIELD_INNER_RE
,
1154 'field': rf
'\w*(?:\.{FIELD_INNER_RE})*'
1156 MATH_FIELD_RE
= rf
'(?:{FIELD_RE}|-?{NUMBER_RE})'
1157 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1158 INTERNAL_FORMAT_RE
= re
.compile(rf
'''(?xs)
1160 (?P<fields>{FIELD_RE})
1161 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1162 (?:>(?P<strf_format>.+?))?
1164 (?P<alternate>(?<!\\),[^|&)]+)?
1165 (?:&(?P<replacement>.*?))?
1166 (?:\|(?P<default>.*?))?
1169 def _traverse_infodict(fields
):
1170 fields
= [f
for x
in re
.split(r
'\.({.+?})\.?', fields
)
1171 for f
in ([x
] if x
.startswith('{') else x
.split('.'))]
1173 if fields
and not fields
[i
]:
1176 for i
, f
in enumerate(fields
):
1177 if not f
.startswith('{'):
1179 assert f
.endswith('}'), f
'No closing brace for {f} in {fields}'
1180 fields
[i
] = {k
: k
.split('.') for k
in f
[1:-1].split(',')}
1182 return traverse_obj(info_dict
, fields
, is_user_input
=True, traverse_string
=True)
1184 def get_value(mdict
):
1186 value
= _traverse_infodict(mdict
['fields'])
1189 value
= float_or_none(value
)
1190 if value
is not None:
1193 offset_key
= mdict
['maths']
1195 value
= float_or_none(value
)
1199 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1200 offset_key
).group(0)
1201 offset_key
= offset_key
[len(item
):]
1202 if operator
is None:
1203 operator
= MATH_FUNCTIONS
[item
]
1205 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1206 offset
= float_or_none(item
)
1208 offset
= float_or_none(_traverse_infodict(item
))
1210 value
= operator(value
, multiplier
* offset
)
1211 except (TypeError, ZeroDivisionError):
1214 # Datetime formatting
1215 if mdict
['strf_format']:
1216 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1218 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1219 if sanitize
and value
== '':
1223 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1225 def filename_sanitizer(key
, value
, restricted
=self
.params
.get('restrictfilenames')):
1226 return sanitize_filename(str(value
), restricted
=restricted
, is_id
=(
1227 bool(re
.search(r
'(^|[_.])id(\.|$)', key
))
1228 if 'filename-sanitization' in self
.params
['compat_opts']
1231 sanitizer
= sanitize
if callable(sanitize
) else filename_sanitizer
1232 sanitize
= bool(sanitize
)
1234 def _dumpjson_default(obj
):
1235 if isinstance(obj
, (set, LazyList
)):
1239 class _ReplacementFormatter(string
.Formatter
):
1240 def get_field(self
, field_name
, args
, kwargs
):
1241 if field_name
.isdigit():
1243 raise ValueError('Unsupported field')
1245 replacement_formatter
= _ReplacementFormatter()
1247 def create_key(outer_mobj
):
1248 if not outer_mobj
.group('has_key'):
1249 return outer_mobj
.group(0)
1250 key
= outer_mobj
.group('key')
1251 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1252 initial_field
= mobj
.group('fields') if mobj
else ''
1253 value
, replacement
, default
= None, None, na
1255 mobj
= mobj
.groupdict()
1256 default
= mobj
['default'] if mobj
['default'] is not None else default
1257 value
= get_value(mobj
)
1258 replacement
= mobj
['replacement']
1259 if value
is None and mobj
['alternate']:
1260 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['remaining'][1:])
1264 fmt
= outer_mobj
.group('format')
1265 if fmt
== 's' and value
is not None and key
in field_size_compat_map
.keys():
1266 fmt
= f
'0{field_size_compat_map[key]:d}d'
1270 elif replacement
is not None:
1272 value
= replacement_formatter
.format(replacement
, value
)
1276 flags
= outer_mobj
.group('conversion') or ''
1277 str_fmt
= f
'{fmt[:-1]}s'
1278 if fmt
[-1] == 'l': # list
1279 delim
= '\n' if '#' in flags
else ', '
1280 value
, fmt
= delim
.join(map(str, variadic(value
, allowed_types
=(str, bytes
)))), str_fmt
1281 elif fmt
[-1] == 'j': # json
1282 value
, fmt
= json
.dumps(
1283 value
, default
=_dumpjson_default
,
1284 indent
=4 if '#' in flags
else None, ensure_ascii
='+' not in flags
), str_fmt
1285 elif fmt
[-1] == 'h': # html
1286 value
, fmt
= escapeHTML(str(value
)), str_fmt
1287 elif fmt
[-1] == 'q': # quoted
1288 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1289 value
, fmt
= ' '.join(map(compat_shlex_quote
, value
)), str_fmt
1290 elif fmt
[-1] == 'B': # bytes
1291 value
= f
'%{str_fmt}'.encode() % str(value
).encode()
1292 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1293 elif fmt
[-1] == 'U': # unicode normalized
1294 value
, fmt
= unicodedata
.normalize(
1295 # "+" = compatibility equivalence, "#" = NFD
1296 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1298 elif fmt
[-1] == 'D': # decimal suffix
1299 num_fmt
, fmt
= fmt
[:-1].replace('#', ''), 's'
1300 value
= format_decimal_suffix(value
, f
'%{num_fmt}f%s' if num_fmt
else '%d%s',
1301 factor
=1024 if '#' in flags
else 1000)
1302 elif fmt
[-1] == 'S': # filename sanitization
1303 value
, fmt
= filename_sanitizer(initial_field
, value
, restricted
='#' in flags
), str_fmt
1304 elif fmt
[-1] == 'c':
1306 value
= str(value
)[0]
1309 elif fmt
[-1] not in 'rs': # numeric
1310 value
= float_or_none(value
)
1312 value
, fmt
= default
, 's'
1316 # If value is an object, sanitize might convert it to a string
1317 # So we convert it to repr first
1318 value
, fmt
= repr(value
), str_fmt
1319 if fmt
[-1] in 'csr':
1320 value
= sanitizer(initial_field
, value
)
1322 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1323 TMPL_DICT
[key
] = value
1324 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1326 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1328 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1329 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1330 return self
.escape_outtmpl(outtmpl
) % info_dict
1332 def _prepare_filename(self
, info_dict
, *, outtmpl
=None, tmpl_type
=None):
1333 assert None in (outtmpl
, tmpl_type
), 'outtmpl and tmpl_type are mutually exclusive'
1335 outtmpl
= self
.params
['outtmpl'].get(tmpl_type
or 'default', self
.params
['outtmpl']['default'])
1337 outtmpl
= self
._outtmpl
_expandpath
(outtmpl
)
1338 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, True)
1342 if tmpl_type
in ('', 'temp'):
1343 final_ext
, ext
= self
.params
.get('final_ext'), info_dict
.get('ext')
1344 if final_ext
and ext
and final_ext
!= ext
and filename
.endswith(f
'.{final_ext}'):
1345 filename
= replace_extension(filename
, ext
, final_ext
)
1347 force_ext
= OUTTMPL_TYPES
[tmpl_type
]
1349 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1351 # https://github.com/blackjack4494/youtube-dlc/issues/85
1352 trim_file_name
= self
.params
.get('trim_file_name', False)
1354 no_ext
, *ext
= filename
.rsplit('.', 2)
1355 filename
= join_nonempty(no_ext
[:trim_file_name
], *ext
, delim
='.')
1358 except ValueError as err
:
1359 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1362 def prepare_filename(self
, info_dict
, dir_type
='', *, outtmpl
=None, warn
=False):
1363 """Generate the output filename"""
1365 assert not dir_type
, 'outtmpl and dir_type are mutually exclusive'
1367 filename
= self
._prepare
_filename
(info_dict
, tmpl_type
=dir_type
, outtmpl
=outtmpl
)
1368 if not filename
and dir_type
not in ('', 'temp'):
1372 if not self
.params
.get('paths'):
1374 elif filename
== '-':
1375 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1376 elif os
.path
.isabs(filename
):
1377 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1378 if filename
== '-' or not filename
:
1381 return self
.get_output_path(dir_type
, filename
)
1383 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1384 """Returns None if the file should be downloaded"""
1385 _type
= info_dict
.get('_type', 'video')
1386 assert incomplete
or _type
== 'video', 'Only video result can be considered complete'
1388 video_title
= info_dict
.get('title', info_dict
.get('id', 'entry'))
1391 if _type
in ('playlist', 'multi_video'):
1393 elif _type
in ('url', 'url_transparent') and not try_call(
1394 lambda: self
.get_info_extractor(info_dict
['ie_key']).is_single_video(info_dict
['url'])):
1397 if 'title' in info_dict
:
1398 # This can happen when we're just evaluating the playlist
1399 title
= info_dict
['title']
1400 matchtitle
= self
.params
.get('matchtitle', False)
1402 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1403 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1404 rejecttitle
= self
.params
.get('rejecttitle', False)
1406 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1407 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1409 date
= info_dict
.get('upload_date')
1410 if date
is not None:
1411 dateRange
= self
.params
.get('daterange', DateRange())
1412 if date
not in dateRange
:
1413 return f
'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1414 view_count
= info_dict
.get('view_count')
1415 if view_count
is not None:
1416 min_views
= self
.params
.get('min_views')
1417 if min_views
is not None and view_count
< min_views
:
1418 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1419 max_views
= self
.params
.get('max_views')
1420 if max_views
is not None and view_count
> max_views
:
1421 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1422 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1423 return 'Skipping "%s" because it is age restricted' % video_title
1425 match_filter
= self
.params
.get('match_filter')
1426 if match_filter
is None:
1432 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1434 # For backward compatibility
1435 ret
= None if incomplete
else match_filter(info_dict
)
1436 except DownloadCancelled
as err
:
1437 if err
.msg
is not NO_DEFAULT
:
1439 ret
, cancelled
= err
.msg
, err
1441 if ret
is NO_DEFAULT
:
1443 filename
= self
._format
_screen
(self
.prepare_filename(info_dict
), self
.Styles
.FILENAME
)
1444 reply
= input(self
._format
_screen
(
1445 f
'Download "{filename}"? (Y/n): ', self
.Styles
.EMPHASIS
)).lower().strip()
1446 if reply
in {'y', ''}:
1450 raise type(cancelled
)(f
'Skipping {video_title}')
1451 return f
'Skipping {video_title}'
1454 if self
.in_download_archive(info_dict
):
1455 reason
= '%s has already been recorded in the archive' % video_title
1456 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1459 reason
= check_filter()
1460 except DownloadCancelled
as e
:
1461 reason
, break_opt
, break_err
= e
.msg
, 'match_filter', type(e
)
1463 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1464 if reason
is not None:
1466 self
.to_screen('[download] ' + reason
)
1467 if self
.params
.get(break_opt
, False):
1472 def add_extra_info(info_dict
, extra_info
):
1473 '''Set the keys from extra_info in info dict if they are missing'''
1474 for key
, value
in extra_info
.items():
1475 info_dict
.setdefault(key
, value
)
1477 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1478 process
=True, force_generic_extractor
=False):
1480 Extract and return the information dictionary of the URL
1483 @param url URL to extract
1486 @param download Whether to download videos
1487 @param process Whether to resolve all unresolved references (URLs, playlist items).
1488 Must be True for download to work
1489 @param ie_key Use only the extractor with this key
1491 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1492 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
1495 if extra_info
is None:
1498 if not ie_key
and force_generic_extractor
:
1502 ies
= {ie_key
: self
._ies
[ie_key
]} if ie_key
in self
._ies
else {}
1506 for key
, ie
in ies
.items():
1507 if not ie
.suitable(url
):
1510 if not ie
.working():
1511 self
.report_warning('The program functionality for this site has been marked as broken, '
1512 'and will probably not work.')
1514 temp_id
= ie
.get_temp_id(url
)
1515 if temp_id
is not None and self
.in_download_archive({'id': temp_id
, 'ie_key': key
}):
1516 self
.to_screen(f
'[{key}] {temp_id}: has already been recorded in the archive')
1517 if self
.params
.get('break_on_existing', False):
1518 raise ExistingVideoReached()
1520 return self
.__extract
_info
(url
, self
.get_info_extractor(key
), download
, extra_info
, process
)
1522 extractors_restricted
= self
.params
.get('allowed_extractors') not in (None, ['default'])
1523 self
.report_error(f
'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1524 tb
=False if extractors_restricted
else None)
1526 def _handle_extraction_exceptions(func
):
1527 @functools.wraps(func
)
1528 def wrapper(self
, *args
, **kwargs
):
1531 return func(self
, *args
, **kwargs
)
1532 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1534 except ReExtractInfo
as e
:
1536 self
.to_screen(f
'{e}; Re-extracting data')
1538 self
.to_stderr('\r')
1539 self
.report_warning(f
'{e}; Re-extracting data')
1541 except GeoRestrictedError
as e
:
1544 msg
+= '\nThis video is available in %s.' % ', '.join(
1545 map(ISO3166Utils
.short2full
, e
.countries
))
1546 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1547 self
.report_error(msg
)
1548 except ExtractorError
as e
: # An error we somewhat expected
1549 self
.report_error(str(e
), e
.format_traceback())
1550 except Exception as e
:
1551 if self
.params
.get('ignoreerrors'):
1552 self
.report_error(str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1558 def _wait_for_video(self
, ie_result
={}):
1559 if (not self
.params
.get('wait_for_video')
1560 or ie_result
.get('_type', 'video') != 'video'
1561 or ie_result
.get('formats') or ie_result
.get('url')):
1564 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1569 full_msg
= f
'{msg}\n'
1570 if not self
.params
.get('noprogress'):
1571 full_msg
= msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r'
1574 self
.to_screen(full_msg
, skip_eol
=True)
1577 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1578 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1579 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1580 diff
= round(random
.uniform(min_wait
, max_wait
) if (max_wait
and min_wait
) else (max_wait
or min_wait
), 0)
1581 self
.report_warning('Release time of video is not known')
1582 elif ie_result
and (diff
or 0) <= 0:
1583 self
.report_warning('Video should already be available according to extracted info')
1584 diff
= min(max(diff
or 0, min_wait
or 0), max_wait
or float('inf'))
1585 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1587 wait_till
= time
.time() + diff
1590 diff
= wait_till
- time
.time()
1593 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1594 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1596 except KeyboardInterrupt:
1598 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1599 except BaseException
as e
:
1600 if not isinstance(e
, ReExtractInfo
):
1604 @_handle_extraction_exceptions
1605 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1607 ie_result
= ie
.extract(url
)
1608 except UserNotLive
as e
:
1610 if self
.params
.get('wait_for_video'):
1611 self
.report_warning(e
)
1612 self
._wait
_for
_video
()
1614 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1615 self
.report_warning(f
'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1617 if isinstance(ie_result
, list):
1618 # Backwards compatibility: old IE result format
1620 '_type': 'compat_list',
1621 'entries': ie_result
,
1623 if extra_info
.get('original_url'):
1624 ie_result
.setdefault('original_url', extra_info
['original_url'])
1625 self
.add_default_extra_info(ie_result
, ie
, url
)
1627 self
._wait
_for
_video
(ie_result
)
1628 return self
.process_ie_result(ie_result
, download
, extra_info
)
1632 def add_default_extra_info(self
, ie_result
, ie
, url
):
1634 self
.add_extra_info(ie_result
, {
1636 'original_url': url
,
1638 webpage_url
= ie_result
.get('webpage_url')
1640 self
.add_extra_info(ie_result
, {
1641 'webpage_url_basename': url_basename(webpage_url
),
1642 'webpage_url_domain': get_domain(webpage_url
),
1645 self
.add_extra_info(ie_result
, {
1646 'extractor': ie
.IE_NAME
,
1647 'extractor_key': ie
.ie_key(),
1650 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1652 Take the result of the ie(may be modified) and resolve all unresolved
1653 references (URLs, playlist items).
1655 It will also download the videos if 'download'.
1656 Returns the resolved ie_result.
1658 if extra_info
is None:
1660 result_type
= ie_result
.get('_type', 'video')
1662 if result_type
in ('url', 'url_transparent'):
1663 ie_result
['url'] = sanitize_url(
1664 ie_result
['url'], scheme
='http' if self
.params
.get('prefer_insecure') else 'https')
1665 if ie_result
.get('original_url') and not extra_info
.get('original_url'):
1666 extra_info
= {'original_url': ie_result
['original_url'], **extra_info
}
1668 extract_flat
= self
.params
.get('extract_flat', False)
1669 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1670 or extract_flat
is True):
1671 info_copy
= ie_result
.copy()
1672 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1673 if ie
and not ie_result
.get('id'):
1674 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1675 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1676 self
.add_extra_info(info_copy
, extra_info
)
1677 info_copy
, _
= self
.pre_process(info_copy
)
1678 self
._fill
_common
_fields
(info_copy
, False)
1679 self
.__forced
_printings
(info_copy
)
1680 self
._raise
_pending
_errors
(info_copy
)
1681 if self
.params
.get('force_write_download_archive', False):
1682 self
.record_download_archive(info_copy
)
1685 if result_type
== 'video':
1686 self
.add_extra_info(ie_result
, extra_info
)
1687 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1688 self
._raise
_pending
_errors
(ie_result
)
1689 additional_urls
= (ie_result
or {}).get('additional_urls')
1691 # TODO: Improve MetadataParserPP to allow setting a list
1692 if isinstance(additional_urls
, str):
1693 additional_urls
= [additional_urls
]
1695 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1696 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1697 ie_result
['additional_entries'] = [
1699 url
, download
, extra_info
=extra_info
,
1700 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1701 for url
in additional_urls
1704 elif result_type
== 'url':
1705 # We have to add extra_info to the results because it may be
1706 # contained in a playlist
1707 return self
.extract_info(
1708 ie_result
['url'], download
,
1709 ie_key
=ie_result
.get('ie_key'),
1710 extra_info
=extra_info
)
1711 elif result_type
== 'url_transparent':
1712 # Use the information from the embedding page
1713 info
= self
.extract_info(
1714 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1715 extra_info
=extra_info
, download
=False, process
=False)
1717 # extract_info may return None when ignoreerrors is enabled and
1718 # extraction failed with an error, don't crash and return early
1723 exempted_fields
= {'_type', 'url', 'ie_key'}
1724 if not ie_result
.get('section_end') and ie_result
.get('section_start') is None:
1725 # For video clips, the id etc of the clip extractor should be used
1726 exempted_fields |
= {'id', 'extractor', 'extractor_key'}
1728 new_result
= info
.copy()
1729 new_result
.update(filter_dict(ie_result
, lambda k
, v
: v
is not None and k
not in exempted_fields
))
1731 # Extracted info may not be a video result (i.e.
1732 # info.get('_type', 'video') != video) but rather an url or
1733 # url_transparent. In such cases outer metadata (from ie_result)
1734 # should be propagated to inner one (info). For this to happen
1735 # _type of info should be overridden with url_transparent. This
1736 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1737 if new_result
.get('_type') == 'url':
1738 new_result
['_type'] = 'url_transparent'
1740 return self
.process_ie_result(
1741 new_result
, download
=download
, extra_info
=extra_info
)
1742 elif result_type
in ('playlist', 'multi_video'):
1743 # Protect from infinite recursion due to recursively nested playlists
1744 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1745 webpage_url
= ie_result
.get('webpage_url') # Playlists maynot have webpage_url
1746 if webpage_url
and webpage_url
in self
._playlist
_urls
:
1748 '[download] Skipping already downloaded playlist: %s'
1749 % ie_result
.get('title') or ie_result
.get('id'))
1752 self
._playlist
_level
+= 1
1753 self
._playlist
_urls
.add(webpage_url
)
1754 self
._fill
_common
_fields
(ie_result
, False)
1755 self
._sanitize
_thumbnails
(ie_result
)
1757 return self
.__process
_playlist
(ie_result
, download
)
1759 self
._playlist
_level
-= 1
1760 if not self
._playlist
_level
:
1761 self
._playlist
_urls
.clear()
1762 elif result_type
== 'compat_list':
1763 self
.report_warning(
1764 'Extractor %s returned a compat_list result. '
1765 'It needs to be updated.' % ie_result
.get('extractor'))
1768 self
.add_extra_info(r
, {
1769 'extractor': ie_result
['extractor'],
1770 'webpage_url': ie_result
['webpage_url'],
1771 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1772 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1773 'extractor_key': ie_result
['extractor_key'],
1776 ie_result
['entries'] = [
1777 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1778 for r
in ie_result
['entries']
1782 raise Exception('Invalid result type: %s' % result_type
)
1784 def _ensure_dir_exists(self
, path
):
1785 return make_dir(path
, self
.report_error
)
1788 def _playlist_infodict(ie_result
, strict
=False, **kwargs
):
1790 'playlist_count': ie_result
.get('playlist_count'),
1791 'playlist': ie_result
.get('title') or ie_result
.get('id'),
1792 'playlist_id': ie_result
.get('id'),
1793 'playlist_title': ie_result
.get('title'),
1794 'playlist_uploader': ie_result
.get('uploader'),
1795 'playlist_uploader_id': ie_result
.get('uploader_id'),
1800 if ie_result
.get('webpage_url'):
1802 'webpage_url': ie_result
['webpage_url'],
1803 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1804 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1808 'playlist_index': 0,
1809 '__last_playlist_index': max(ie_result
.get('requested_entries') or (0, 0)),
1810 'extractor': ie_result
['extractor'],
1811 'extractor_key': ie_result
['extractor_key'],
1814 def __process_playlist(self
, ie_result
, download
):
1815 """Process each entry in the playlist"""
1816 assert ie_result
['_type'] in ('playlist', 'multi_video')
1818 common_info
= self
._playlist
_infodict
(ie_result
, strict
=True)
1819 title
= common_info
.get('playlist') or '<Untitled>'
1820 if self
._match
_entry
(common_info
, incomplete
=True) is not None:
1822 self
.to_screen(f
'[download] Downloading {ie_result["_type"]}: {title}')
1824 all_entries
= PlaylistEntries(self
, ie_result
)
1825 entries
= orderedSet(all_entries
.get_requested_items(), lazy
=True)
1827 lazy
= self
.params
.get('lazy_playlist')
1829 resolved_entries
, n_entries
= [], 'N/A'
1830 ie_result
['requested_entries'], ie_result
['entries'] = None, None
1832 entries
= resolved_entries
= list(entries
)
1833 n_entries
= len(resolved_entries
)
1834 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1835 if not ie_result
.get('playlist_count'):
1836 # Better to do this after potentially exhausting entries
1837 ie_result
['playlist_count'] = all_entries
.get_full_count()
1839 extra
= self
._playlist
_infodict
(ie_result
, n_entries
=int_or_none(n_entries
))
1840 ie_copy
= collections
.ChainMap(ie_result
, extra
)
1842 _infojson_written
= False
1843 write_playlist_files
= self
.params
.get('allow_playlist_files', True)
1844 if write_playlist_files
and self
.params
.get('list_thumbnails'):
1845 self
.list_thumbnails(ie_result
)
1846 if write_playlist_files
and not self
.params
.get('simulate'):
1847 _infojson_written
= self
._write
_info
_json
(
1848 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1849 if _infojson_written
is None:
1851 if self
._write
_description
('playlist', ie_result
,
1852 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1854 # TODO: This should be passed to ThumbnailsConvertor if necessary
1855 self
._write
_thumbnails
('playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1858 if self
.params
.get('playlistreverse') or self
.params
.get('playlistrandom'):
1859 self
.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once
=True)
1860 elif self
.params
.get('playlistreverse'):
1862 elif self
.params
.get('playlistrandom'):
1863 random
.shuffle(entries
)
1865 self
.to_screen(f
'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
1866 f
'{format_field(ie_result, "playlist_count", " of %s")}')
1868 keep_resolved_entries
= self
.params
.get('extract_flat') != 'discard'
1869 if self
.params
.get('extract_flat') == 'discard_in_playlist':
1870 keep_resolved_entries
= ie_result
['_type'] != 'playlist'
1871 if keep_resolved_entries
:
1872 self
.write_debug('The information of all playlist entries will be held in memory')
1875 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
1876 for i
, (playlist_index
, entry
) in enumerate(entries
):
1878 resolved_entries
.append((playlist_index
, entry
))
1882 entry
['__x_forwarded_for_ip'] = ie_result
.get('__x_forwarded_for_ip')
1883 if not lazy
and 'playlist-index' in self
.params
.get('compat_opts', []):
1884 playlist_index
= ie_result
['requested_entries'][i
]
1886 entry_copy
= collections
.ChainMap(entry
, {
1888 'n_entries': int_or_none(n_entries
),
1889 'playlist_index': playlist_index
,
1890 'playlist_autonumber': i
+ 1,
1893 if self
._match
_entry
(entry_copy
, incomplete
=True) is not None:
1894 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1895 resolved_entries
[i
] = (playlist_index
, NO_DEFAULT
)
1898 self
.to_screen('[download] Downloading item %s of %s' % (
1899 self
._format
_screen
(i
+ 1, self
.Styles
.ID
), self
._format
_screen
(n_entries
, self
.Styles
.EMPHASIS
)))
1901 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, collections
.ChainMap({
1902 'playlist_index': playlist_index
,
1903 'playlist_autonumber': i
+ 1,
1905 if not entry_result
:
1907 if failures
>= max_failures
:
1909 f
'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1911 if keep_resolved_entries
:
1912 resolved_entries
[i
] = (playlist_index
, entry_result
)
1914 # Update with processed data
1915 ie_result
['entries'] = [e
for _
, e
in resolved_entries
if e
is not NO_DEFAULT
]
1916 ie_result
['requested_entries'] = [i
for i
, e
in resolved_entries
if e
is not NO_DEFAULT
]
1917 if ie_result
['requested_entries'] == try_call(lambda: list(range(1, ie_result
['playlist_count'] + 1))):
1918 # Do not set for full playlist
1919 ie_result
.pop('requested_entries')
1921 # Write the updated info to json
1922 if _infojson_written
is True and self
._write
_info
_json
(
1923 'updated playlist', ie_result
,
1924 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
1927 ie_result
= self
.run_all_pps('playlist', ie_result
)
1928 self
.to_screen(f
'[download] Finished downloading playlist: {title}')
1931 @_handle_extraction_exceptions
1932 def __process_iterable_entry(self
, entry
, download
, extra_info
):
1933 return self
.process_ie_result(
1934 entry
, download
=download
, extra_info
=extra_info
)
1936 def _build_format_filter(self
, filter_spec
):
1937 " Returns a function to filter the formats according to the filter_spec "
1947 operator_rex
= re
.compile(r
'''(?x)\s*
1949 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1950 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1951 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
1952 m
= operator_rex
.fullmatch(filter_spec
)
1955 comparison_value
= int(m
.group('value'))
1957 comparison_value
= parse_filesize(m
.group('value'))
1958 if comparison_value
is None:
1959 comparison_value
= parse_filesize(m
.group('value') + 'B')
1960 if comparison_value
is None:
1962 'Invalid value %r in format specification %r' % (
1963 m
.group('value'), filter_spec
))
1964 op
= OPERATORS
[m
.group('op')]
1969 '^=': lambda attr
, value
: attr
.startswith(value
),
1970 '$=': lambda attr
, value
: attr
.endswith(value
),
1971 '*=': lambda attr
, value
: value
in attr
,
1972 '~=': lambda attr
, value
: value
.search(attr
) is not None
1974 str_operator_rex
= re
.compile(r
'''(?x)\s*
1975 (?P<key>[a-zA-Z0-9._-]+)\s*
1976 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1978 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1979 (?(quote)(?P=quote))\s*
1980 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
1981 m
= str_operator_rex
.fullmatch(filter_spec
)
1983 if m
.group('op') == '~=':
1984 comparison_value
= re
.compile(m
.group('value'))
1986 comparison_value
= re
.sub(r
'''\\([\\"'])''', r
'\1', m
.group('value'))
1987 str_op
= STR_OPERATORS
[m
.group('op')]
1988 if m
.group('negation'):
1989 op
= lambda attr
, value
: not str_op(attr
, value
)
1994 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
1997 actual_value
= f
.get(m
.group('key'))
1998 if actual_value
is None:
1999 return m
.group('none_inclusive')
2000 return op(actual_value
, comparison_value
)
2003 def _check_formats(self
, formats
):
2005 self
.to_screen('[info] Testing format %s' % f
['format_id'])
2006 path
= self
.get_output_path('temp')
2007 if not self
._ensure
_dir
_exists
(f
'{path}/'):
2009 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
2012 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
2013 except (DownloadError
, OSError, ValueError) + network_exceptions
:
2016 if os
.path
.exists(temp_file
.name
):
2018 os
.remove(temp_file
.name
)
2020 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
2024 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
2026 def _default_format_spec(self
, info_dict
, download
=True):
2029 merger
= FFmpegMergerPP(self
)
2030 return merger
.available
and merger
.can_merge()
2033 not self
.params
.get('simulate')
2037 or info_dict
.get('is_live') and not self
.params
.get('live_from_start')
2038 or self
.params
['outtmpl']['default'] == '-'))
2041 or self
.params
.get('allow_multiple_audio_streams', False)
2042 or 'format-spec' in self
.params
['compat_opts'])
2045 'best/bestvideo+bestaudio' if prefer_best
2046 else 'bestvideo*+bestaudio/best' if not compat
2047 else 'bestvideo+bestaudio/best')
2049 def build_format_selector(self
, format_spec
):
2050 def syntax_error(note
, start
):
2052 'Invalid format specification: '
2053 '{}\n\t{}\n\t{}^'.format(note
, format_spec
, ' ' * start
[1]))
2054 return SyntaxError(message
)
2056 PICKFIRST
= 'PICKFIRST'
2060 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2062 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
2063 'video': self
.params
.get('allow_multiple_video_streams', False)}
2065 check_formats
= self
.params
.get('check_formats') == 'selected'
2067 def _parse_filter(tokens
):
2069 for type, string_
, start
, _
, _
in tokens
:
2070 if type == tokenize
.OP
and string_
== ']':
2071 return ''.join(filter_parts
)
2073 filter_parts
.append(string_
)
2075 def _remove_unused_ops(tokens
):
2076 # Remove operators that we don't use and join them with the surrounding strings.
2077 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2078 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
2079 last_string
, last_start
, last_end
, last_line
= None, None, None, None
2080 for type, string_
, start
, end
, line
in tokens
:
2081 if type == tokenize
.OP
and string_
== '[':
2083 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2085 yield type, string_
, start
, end
, line
2086 # everything inside brackets will be handled by _parse_filter
2087 for type, string_
, start
, end
, line
in tokens
:
2088 yield type, string_
, start
, end
, line
2089 if type == tokenize
.OP
and string_
== ']':
2091 elif type == tokenize
.OP
and string_
in ALLOWED_OPS
:
2093 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2095 yield type, string_
, start
, end
, line
2096 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
2098 last_string
= string_
2102 last_string
+= string_
2104 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2106 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
2108 current_selector
= None
2109 for type, string_
, start
, _
, _
in tokens
:
2110 # ENCODING is only defined in python 3.x
2111 if type == getattr(tokenize
, 'ENCODING', None):
2113 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
2114 current_selector
= FormatSelector(SINGLE
, string_
, [])
2115 elif type == tokenize
.OP
:
2117 if not inside_group
:
2118 # ')' will be handled by the parentheses group
2119 tokens
.restore_last_token()
2121 elif inside_merge
and string_
in ['/', ',']:
2122 tokens
.restore_last_token()
2124 elif inside_choice
and string_
== ',':
2125 tokens
.restore_last_token()
2127 elif string_
== ',':
2128 if not current_selector
:
2129 raise syntax_error('"," must follow a format selector', start
)
2130 selectors
.append(current_selector
)
2131 current_selector
= None
2132 elif string_
== '/':
2133 if not current_selector
:
2134 raise syntax_error('"/" must follow a format selector', start
)
2135 first_choice
= current_selector
2136 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
2137 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
2138 elif string_
== '[':
2139 if not current_selector
:
2140 current_selector
= FormatSelector(SINGLE
, 'best', [])
2141 format_filter
= _parse_filter(tokens
)
2142 current_selector
.filters
.append(format_filter
)
2143 elif string_
== '(':
2144 if current_selector
:
2145 raise syntax_error('Unexpected "("', start
)
2146 group
= _parse_format_selection(tokens
, inside_group
=True)
2147 current_selector
= FormatSelector(GROUP
, group
, [])
2148 elif string_
== '+':
2149 if not current_selector
:
2150 raise syntax_error('Unexpected "+"', start
)
2151 selector_1
= current_selector
2152 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
2154 raise syntax_error('Expected a selector', start
)
2155 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
2157 raise syntax_error(f
'Operator not recognized: "{string_}"', start
)
2158 elif type == tokenize
.ENDMARKER
:
2160 if current_selector
:
2161 selectors
.append(current_selector
)
2164 def _merge(formats_pair
):
2165 format_1
, format_2
= formats_pair
2168 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
2169 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
2171 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
2172 get_no_more
= {'video': False, 'audio': False}
2173 for (i
, fmt_info
) in enumerate(formats_info
):
2174 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
2177 for aud_vid
in ['audio', 'video']:
2178 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
2179 if get_no_more
[aud_vid
]:
2182 get_no_more
[aud_vid
] = True
2184 if len(formats_info
) == 1:
2185 return formats_info
[0]
2187 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
2188 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
2190 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
2191 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
2193 output_ext
= get_compatible_ext(
2194 vcodecs
=[f
.get('vcodec') for f
in video_fmts
],
2195 acodecs
=[f
.get('acodec') for f
in audio_fmts
],
2196 vexts
=[f
['ext'] for f
in video_fmts
],
2197 aexts
=[f
['ext'] for f
in audio_fmts
],
2198 preferences
=(try_call(lambda: self
.params
['merge_output_format'].split('/'))
2199 or self
.params
.get('prefer_free_formats') and ('webm', 'mkv')))
2201 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2204 'requested_formats': formats_info
,
2205 'format': '+'.join(filtered('format')),
2206 'format_id': '+'.join(filtered('format_id')),
2208 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2209 'language': '+'.join(orderedSet(filtered('language'))) or None,
2210 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2211 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2212 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2217 'width': the_only_video
.get('width'),
2218 'height': the_only_video
.get('height'),
2219 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2220 'fps': the_only_video
.get('fps'),
2221 'dynamic_range': the_only_video
.get('dynamic_range'),
2222 'vcodec': the_only_video
.get('vcodec'),
2223 'vbr': the_only_video
.get('vbr'),
2224 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2225 'aspect_ratio': the_only_video
.get('aspect_ratio'),
2230 'acodec': the_only_audio
.get('acodec'),
2231 'abr': the_only_audio
.get('abr'),
2232 'asr': the_only_audio
.get('asr'),
2233 'audio_channels': the_only_audio
.get('audio_channels')
2238 def _check_formats(formats
):
2239 if not check_formats
:
2242 yield from self
._check
_formats
(formats
)
2244 def _build_selector_function(selector
):
2245 if isinstance(selector
, list): # ,
2246 fs
= [_build_selector_function(s
) for s
in selector
]
2248 def selector_function(ctx
):
2251 return selector_function
2253 elif selector
.type == GROUP
: # ()
2254 selector_function
= _build_selector_function(selector
.selector
)
2256 elif selector
.type == PICKFIRST
: # /
2257 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2259 def selector_function(ctx
):
2261 picked_formats
= list(f(ctx
))
2263 return picked_formats
2266 elif selector
.type == MERGE
: # +
2267 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2269 def selector_function(ctx
):
2270 for pair
in itertools
.product(selector_1(ctx
), selector_2(ctx
)):
2273 elif selector
.type == SINGLE
: # atom
2274 format_spec
= selector
.selector
or 'best'
2276 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2277 if format_spec
== 'all':
2278 def selector_function(ctx
):
2279 yield from _check_formats(ctx
['formats'][::-1])
2280 elif format_spec
== 'mergeall':
2281 def selector_function(ctx
):
2282 formats
= list(_check_formats(
2283 f
for f
in ctx
['formats'] if f
.get('vcodec') != 'none' or f
.get('acodec') != 'none'))
2286 merged_format
= formats
[-1]
2287 for f
in formats
[-2::-1]:
2288 merged_format
= _merge((merged_format
, f
))
2292 format_fallback
, seperate_fallback
, format_reverse
, format_idx
= False, None, True, 1
2294 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2296 if mobj
is not None:
2297 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2298 format_reverse
= mobj
.group('bw')[0] == 'b'
2299 format_type
= (mobj
.group('type') or [None])[0]
2300 not_format_type
= {'v': 'a', 'a': 'v'}.get(format_type
)
2301 format_modified
= mobj
.group('mod') is not None
2303 format_fallback
= not format_type
and not format_modified
# for b, w
2305 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2306 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2307 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2308 if format_type
# bv, ba, wv, wa
2309 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2310 if not format_modified
# b, w
2311 else lambda f
: True) # b*, w*
2312 filter_f
= lambda f
: _filter_f(f
) and (
2313 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2315 if format_spec
in self
._format
_selection
_exts
['audio']:
2316 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2317 elif format_spec
in self
._format
_selection
_exts
['video']:
2318 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2319 seperate_fallback
= lambda f
: f
.get('ext') == format_spec
and f
.get('vcodec') != 'none'
2320 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2321 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2323 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2325 def selector_function(ctx
):
2326 formats
= list(ctx
['formats'])
2327 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2329 if format_fallback
and ctx
['incomplete_formats']:
2330 # for extractors with incomplete formats (audio only (soundcloud)
2331 # or video only (imgur)) best/worst will fallback to
2332 # best/worst {video,audio}-only format
2334 elif seperate_fallback
and not ctx
['has_merged_format']:
2335 # for compatibility with youtube-dl when there is no pre-merged format
2336 matches
= list(filter(seperate_fallback
, formats
))
2337 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2339 yield matches
[format_idx
- 1]
2340 except LazyList
.IndexError:
2343 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2345 def final_selector(ctx
):
2346 ctx_copy
= dict(ctx
)
2347 for _filter
in filters
:
2348 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2349 return selector_function(ctx_copy
)
2350 return final_selector
2352 stream
= io
.BytesIO(format_spec
.encode())
2354 tokens
= list(_remove_unused_ops(tokenize
.tokenize(stream
.readline
)))
2355 except tokenize
.TokenError
:
2356 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2358 class TokenIterator
:
2359 def __init__(self
, tokens
):
2360 self
.tokens
= tokens
2367 if self
.counter
>= len(self
.tokens
):
2368 raise StopIteration()
2369 value
= self
.tokens
[self
.counter
]
2375 def restore_last_token(self
):
2378 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2379 return _build_selector_function(parsed_selector
)
2381 def _calc_headers(self
, info_dict
):
2382 res
= merge_headers(self
.params
['http_headers'], info_dict
.get('http_headers') or {})
2383 if 'Youtubedl-No-Compression' in res
: # deprecated
2384 res
.pop('Youtubedl-No-Compression', None)
2385 res
['Accept-Encoding'] = 'identity'
2386 cookies
= self
._calc
_cookies
(info_dict
['url'])
2388 res
['Cookie'] = cookies
2390 if 'X-Forwarded-For' not in res
:
2391 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2392 if x_forwarded_for_ip
:
2393 res
['X-Forwarded-For'] = x_forwarded_for_ip
2397 def _calc_cookies(self
, url
):
2398 pr
= sanitized_Request(url
)
2399 self
.cookiejar
.add_cookie_header(pr
)
2400 return pr
.get_header('Cookie')
2402 def _sort_thumbnails(self
, thumbnails
):
2403 thumbnails
.sort(key
=lambda t
: (
2404 t
.get('preference') if t
.get('preference') is not None else -1,
2405 t
.get('width') if t
.get('width') is not None else -1,
2406 t
.get('height') if t
.get('height') is not None else -1,
2407 t
.get('id') if t
.get('id') is not None else '',
2410 def _sanitize_thumbnails(self
, info_dict
):
2411 thumbnails
= info_dict
.get('thumbnails')
2412 if thumbnails
is None:
2413 thumbnail
= info_dict
.get('thumbnail')
2415 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail
}]
2419 def check_thumbnails(thumbnails
):
2420 for t
in thumbnails
:
2421 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2423 self
.urlopen(HEADRequest(t
['url']))
2424 except network_exceptions
as err
:
2425 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2429 self
._sort
_thumbnails
(thumbnails
)
2430 for i
, t
in enumerate(thumbnails
):
2431 if t
.get('id') is None:
2433 if t
.get('width') and t
.get('height'):
2434 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2435 t
['url'] = sanitize_url(t
['url'])
2437 if self
.params
.get('check_formats') is True:
2438 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2440 info_dict
['thumbnails'] = thumbnails
2442 def _fill_common_fields(self
, info_dict
, final
=True):
2443 # TODO: move sanitization here
2445 title
= info_dict
['fulltitle'] = info_dict
.get('title')
2448 self
.write_debug('Extractor gave empty title. Creating a generic title')
2450 self
.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2451 info_dict
['title'] = f
'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2453 if info_dict
.get('duration') is not None:
2454 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2456 for ts_key
, date_key
in (
2457 ('timestamp', 'upload_date'),
2458 ('release_timestamp', 'release_date'),
2459 ('modified_timestamp', 'modified_date'),
2461 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2462 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2463 # see http://bugs.python.org/issue1646728)
2464 with contextlib
.suppress(ValueError, OverflowError, OSError):
2465 upload_date
= datetime
.datetime
.utcfromtimestamp(info_dict
[ts_key
])
2466 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2468 live_keys
= ('is_live', 'was_live')
2469 live_status
= info_dict
.get('live_status')
2470 if live_status
is None:
2471 for key
in live_keys
:
2472 if info_dict
.get(key
) is False:
2474 if info_dict
.get(key
):
2477 if all(info_dict
.get(key
) is False for key
in live_keys
):
2478 live_status
= 'not_live'
2480 info_dict
['live_status'] = live_status
2481 for key
in live_keys
:
2482 if info_dict
.get(key
) is None:
2483 info_dict
[key
] = (live_status
== key
)
2484 if live_status
== 'post_live':
2485 info_dict
['was_live'] = True
2487 # Auto generate title fields corresponding to the *_number fields when missing
2488 # in order to always have clean titles. This is very common for TV series.
2489 for field
in ('chapter', 'season', 'episode'):
2490 if final
and info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2491 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2493 def _raise_pending_errors(self
, info
):
2494 err
= info
.pop('__pending_error', None)
2496 self
.report_error(err
, tb
=False)
2498 def sort_formats(self
, info_dict
):
2499 formats
= self
._get
_formats
(info_dict
)
2500 formats
.sort(key
=FormatSorter(
2501 self
, info_dict
.get('_format_sort_fields') or []).calculate_preference
)
2503 def process_video_result(self
, info_dict
, download
=True):
2504 assert info_dict
.get('_type', 'video') == 'video'
2505 self
._num
_videos
+= 1
2507 if 'id' not in info_dict
:
2508 raise ExtractorError('Missing "id" field in extractor result', ie
=info_dict
['extractor'])
2509 elif not info_dict
.get('id'):
2510 raise ExtractorError('Extractor failed to obtain "id"', ie
=info_dict
['extractor'])
2512 def report_force_conversion(field
, field_not
, conversion
):
2513 self
.report_warning(
2514 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2515 % (field
, field_not
, conversion
))
2517 def sanitize_string_field(info
, string_field
):
2518 field
= info
.get(string_field
)
2519 if field
is None or isinstance(field
, str):
2521 report_force_conversion(string_field
, 'a string', 'string')
2522 info
[string_field
] = str(field
)
2524 def sanitize_numeric_fields(info
):
2525 for numeric_field
in self
._NUMERIC
_FIELDS
:
2526 field
= info
.get(numeric_field
)
2527 if field
is None or isinstance(field
, (int, float)):
2529 report_force_conversion(numeric_field
, 'numeric', 'int')
2530 info
[numeric_field
] = int_or_none(field
)
2532 sanitize_string_field(info_dict
, 'id')
2533 sanitize_numeric_fields(info_dict
)
2534 if info_dict
.get('section_end') and info_dict
.get('section_start') is not None:
2535 info_dict
['duration'] = round(info_dict
['section_end'] - info_dict
['section_start'], 3)
2536 if (info_dict
.get('duration') or 0) <= 0 and info_dict
.pop('duration', None):
2537 self
.report_warning('"duration" field is negative, there is an error in extractor')
2539 chapters
= info_dict
.get('chapters') or []
2540 if chapters
and chapters
[0].get('start_time'):
2541 chapters
.insert(0, {'start_time': 0})
2543 dummy_chapter
= {'end_time': 0, 'start_time': info_dict
.get('duration')}
2544 for idx
, (prev
, current
, next_
) in enumerate(zip(
2545 (dummy_chapter
, *chapters
), chapters
, (*chapters
[1:], dummy_chapter
)), 1):
2546 if current
.get('start_time') is None:
2547 current
['start_time'] = prev
.get('end_time')
2548 if not current
.get('end_time'):
2549 current
['end_time'] = next_
.get('start_time')
2550 if not current
.get('title'):
2551 current
['title'] = f
'<Untitled Chapter {idx}>'
2553 if 'playlist' not in info_dict
:
2554 # It isn't part of a playlist
2555 info_dict
['playlist'] = None
2556 info_dict
['playlist_index'] = None
2558 self
._sanitize
_thumbnails
(info_dict
)
2560 thumbnail
= info_dict
.get('thumbnail')
2561 thumbnails
= info_dict
.get('thumbnails')
2563 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2565 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2567 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2568 info_dict
['display_id'] = info_dict
['id']
2570 self
._fill
_common
_fields
(info_dict
)
2572 for cc_kind
in ('subtitles', 'automatic_captions'):
2573 cc
= info_dict
.get(cc_kind
)
2575 for _
, subtitle
in cc
.items():
2576 for subtitle_format
in subtitle
:
2577 if subtitle_format
.get('url'):
2578 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2579 if subtitle_format
.get('ext') is None:
2580 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2582 automatic_captions
= info_dict
.get('automatic_captions')
2583 subtitles
= info_dict
.get('subtitles')
2585 info_dict
['requested_subtitles'] = self
.process_subtitles(
2586 info_dict
['id'], subtitles
, automatic_captions
)
2588 formats
= self
._get
_formats
(info_dict
)
2590 # Backward compatibility with InfoExtractor._sort_formats
2591 field_preference
= (formats
or [{}])[0].pop('__sort_fields', None)
2592 if field_preference
:
2593 info_dict
['_format_sort_fields'] = field_preference
2595 # or None ensures --clean-infojson removes it
2596 info_dict
['_has_drm'] = any(f
.get('has_drm') for f
in formats
) or None
2597 if not self
.params
.get('allow_unplayable_formats'):
2598 formats
= [f
for f
in formats
if not f
.get('has_drm')]
2600 if formats
and all(f
.get('acodec') == f
.get('vcodec') == 'none' for f
in formats
):
2601 self
.report_warning(
2602 f
'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2603 'only images are available for download. Use --list-formats to see them'.capitalize())
2605 get_from_start
= not info_dict
.get('is_live') or bool(self
.params
.get('live_from_start'))
2606 if not get_from_start
:
2607 info_dict
['title'] += ' ' + datetime
.datetime
.now().strftime('%Y-%m-%d %H:%M')
2608 if info_dict
.get('is_live') and formats
:
2609 formats
= [f
for f
in formats
if bool(f
.get('is_from_start')) == get_from_start
]
2610 if get_from_start
and not formats
:
2611 self
.raise_no_formats(info_dict
, msg
=(
2612 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2613 'If you want to download from the current time, use --no-live-from-start'))
2615 def is_wellformed(f
):
2618 self
.report_warning(
2619 '"url" field is missing or empty - skipping format, '
2620 'there is an error in extractor')
2622 if isinstance(url
, bytes
):
2623 sanitize_string_field(f
, 'url')
2626 # Filter out malformed formats for better extraction robustness
2627 formats
= list(filter(is_wellformed
, formats
or []))
2630 self
.raise_no_formats(info_dict
)
2632 for format
in formats
:
2633 sanitize_string_field(format
, 'format_id')
2634 sanitize_numeric_fields(format
)
2635 format
['url'] = sanitize_url(format
['url'])
2636 if format
.get('ext') is None:
2637 format
['ext'] = determine_ext(format
['url']).lower()
2638 if format
.get('protocol') is None:
2639 format
['protocol'] = determine_protocol(format
)
2640 if format
.get('resolution') is None:
2641 format
['resolution'] = self
.format_resolution(format
, default
=None)
2642 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2643 format
['dynamic_range'] = 'SDR'
2644 if format
.get('aspect_ratio') is None:
2645 format
['aspect_ratio'] = try_call(lambda: round(format
['width'] / format
['height'], 2))
2646 if (info_dict
.get('duration') and format
.get('tbr')
2647 and not format
.get('filesize') and not format
.get('filesize_approx')):
2648 format
['filesize_approx'] = int(info_dict
['duration'] * format
['tbr'] * (1024 / 8))
2649 format
['http_headers'] = self
._calc
_headers
(collections
.ChainMap(format
, info_dict
))
2651 # This is copied to http_headers by the above _calc_headers and can now be removed
2652 if '__x_forwarded_for_ip' in info_dict
:
2653 del info_dict
['__x_forwarded_for_ip']
2657 '_format_sort_fields': info_dict
.get('_format_sort_fields')
2660 # Sanitize and group by format_id
2662 for i
, format
in enumerate(formats
):
2663 if not format
.get('format_id'):
2664 format
['format_id'] = str(i
)
2666 # Sanitize format_id from characters used in format selector expression
2667 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2668 formats_dict
.setdefault(format
['format_id'], []).append(format
)
2670 # Make sure all formats have unique format_id
2671 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2672 for format_id
, ambiguous_formats
in formats_dict
.items():
2673 ambigious_id
= len(ambiguous_formats
) > 1
2674 for i
, format
in enumerate(ambiguous_formats
):
2676 format
['format_id'] = '%s-%d' % (format_id
, i
)
2677 # Ensure there is no conflict between id and ext in format selection
2678 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2679 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2680 format
['format_id'] = 'f%s' % format
['format_id']
2682 if format
.get('format') is None:
2683 format
['format'] = '{id} - {res}{note}'.format(
2684 id=format
['format_id'],
2685 res
=self
.format_resolution(format
),
2686 note
=format_field(format
, 'format_note', ' (%s)'),
2689 if self
.params
.get('check_formats') is True:
2690 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2692 if not formats
or formats
[0] is not info_dict
:
2693 # only set the 'formats' fields if the original info_dict list them
2694 # otherwise we end up with a circular reference, the first (and unique)
2695 # element in the 'formats' field in info_dict is info_dict itself,
2696 # which can't be exported to json
2697 info_dict
['formats'] = formats
2699 info_dict
, _
= self
.pre_process(info_dict
)
2701 if self
._match
_entry
(info_dict
, incomplete
=self
._format
_fields
) is not None:
2704 self
.post_extract(info_dict
)
2705 info_dict
, _
= self
.pre_process(info_dict
, 'after_filter')
2707 # The pre-processors may have modified the formats
2708 formats
= self
._get
_formats
(info_dict
)
2710 list_only
= self
.params
.get('simulate') == 'list_only'
2711 interactive_format_selection
= not list_only
and self
.format_selector
== '-'
2712 if self
.params
.get('list_thumbnails'):
2713 self
.list_thumbnails(info_dict
)
2714 if self
.params
.get('listsubtitles'):
2715 if 'automatic_captions' in info_dict
:
2716 self
.list_subtitles(
2717 info_dict
['id'], automatic_captions
, 'automatic captions')
2718 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2719 if self
.params
.get('listformats') or interactive_format_selection
:
2720 self
.list_formats(info_dict
)
2722 # Without this printing, -F --print-json will not work
2723 self
.__forced
_printings
(info_dict
)
2726 format_selector
= self
.format_selector
2727 if format_selector
is None:
2728 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2729 self
.write_debug('Default format spec: %s' % req_format
)
2730 format_selector
= self
.build_format_selector(req_format
)
2733 if interactive_format_selection
:
2735 self
._format
_screen
('\nEnter format selector: ', self
.Styles
.EMPHASIS
))
2737 format_selector
= self
.build_format_selector(req_format
)
2738 except SyntaxError as err
:
2739 self
.report_error(err
, tb
=False, is_error
=False)
2742 formats_to_download
= list(format_selector({
2744 'has_merged_format': any('none' not in (f
.get('acodec'), f
.get('vcodec')) for f
in formats
),
2745 'incomplete_formats': (
2746 # All formats are video-only or
2747 all(f
.get('vcodec') != 'none' and f
.get('acodec') == 'none' for f
in formats
)
2748 # all formats are audio-only
2749 or all(f
.get('vcodec') == 'none' and f
.get('acodec') != 'none' for f
in formats
)),
2751 if interactive_format_selection
and not formats_to_download
:
2752 self
.report_error('Requested format is not available', tb
=False, is_error
=False)
2756 if not formats_to_download
:
2757 if not self
.params
.get('ignore_no_formats_error'):
2758 raise ExtractorError(
2759 'Requested format is not available. Use --list-formats for a list of available formats',
2760 expected
=True, video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2761 self
.report_warning('Requested format is not available')
2762 # Process what we can, even without any available formats.
2763 formats_to_download
= [{}]
2765 requested_ranges
= tuple(self
.params
.get('download_ranges', lambda *_
: [{}])(info_dict
, self
))
2766 best_format
, downloaded_formats
= formats_to_download
[-1], []
2768 if best_format
and requested_ranges
:
2769 def to_screen(*msg
):
2770 self
.to_screen(f
'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2772 to_screen(f
'Downloading {len(formats_to_download)} format(s):',
2773 (f
['format_id'] for f
in formats_to_download
))
2774 if requested_ranges
!= ({}, ):
2775 to_screen(f
'Downloading {len(requested_ranges)} time ranges:',
2776 (f
'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c
in requested_ranges
))
2777 max_downloads_reached
= False
2779 for fmt
, chapter
in itertools
.product(formats_to_download
, requested_ranges
):
2780 new_info
= self
._copy
_infodict
(info_dict
)
2781 new_info
.update(fmt
)
2782 offset
, duration
= info_dict
.get('section_start') or 0, info_dict
.get('duration') or float('inf')
2783 end_time
= offset
+ min(chapter
.get('end_time', duration
), duration
)
2784 if chapter
or offset
:
2786 'section_start': offset
+ chapter
.get('start_time', 0),
2787 # duration may not be accurate. So allow deviations <1sec
2788 'section_end': end_time
if end_time
<= offset
+ duration
+ 1 else None,
2789 'section_title': chapter
.get('title'),
2790 'section_number': chapter
.get('index'),
2792 downloaded_formats
.append(new_info
)
2794 self
.process_info(new_info
)
2795 except MaxDownloadsReached
:
2796 max_downloads_reached
= True
2797 self
._raise
_pending
_errors
(new_info
)
2798 # Remove copied info
2799 for key
, val
in tuple(new_info
.items()):
2800 if info_dict
.get(key
) == val
:
2802 if max_downloads_reached
:
2805 write_archive
= {f
.get('__write_download_archive', False) for f
in downloaded_formats
}
2806 assert write_archive
.issubset({True, False, 'ignore'})
2807 if True in write_archive
and False not in write_archive
:
2808 self
.record_download_archive(info_dict
)
2810 info_dict
['requested_downloads'] = downloaded_formats
2811 info_dict
= self
.run_all_pps('after_video', info_dict
)
2812 if max_downloads_reached
:
2813 raise MaxDownloadsReached()
2815 # We update the info dict with the selected best quality format (backwards compatibility)
2816 info_dict
.update(best_format
)
2819 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
2820 """Select the requested subtitles and their format"""
2821 available_subs
, normal_sub_langs
= {}, []
2822 if normal_subtitles
and self
.params
.get('writesubtitles'):
2823 available_subs
.update(normal_subtitles
)
2824 normal_sub_langs
= tuple(normal_subtitles
.keys())
2825 if automatic_captions
and self
.params
.get('writeautomaticsub'):
2826 for lang
, cap_info
in automatic_captions
.items():
2827 if lang
not in available_subs
:
2828 available_subs
[lang
] = cap_info
2830 if not available_subs
or (
2831 not self
.params
.get('writesubtitles')
2832 and not self
.params
.get('writeautomaticsub')):
2835 all_sub_langs
= tuple(available_subs
.keys())
2836 if self
.params
.get('allsubtitles', False):
2837 requested_langs
= all_sub_langs
2838 elif self
.params
.get('subtitleslangs', False):
2840 requested_langs
= orderedSet_from_options(
2841 self
.params
.get('subtitleslangs'), {'all': all_sub_langs
}, use_regex
=True)
2842 except re
.error
as e
:
2843 raise ValueError(f
'Wrong regex for subtitlelangs: {e.pattern}')
2845 requested_langs
= LazyList(itertools
.chain(
2846 ['en'] if 'en' in normal_sub_langs
else [],
2847 filter(lambda f
: f
.startswith('en'), normal_sub_langs
),
2848 ['en'] if 'en' in all_sub_langs
else [],
2849 filter(lambda f
: f
.startswith('en'), all_sub_langs
),
2850 normal_sub_langs
, all_sub_langs
,
2853 self
.to_screen(f
'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
2855 formats_query
= self
.params
.get('subtitlesformat', 'best')
2856 formats_preference
= formats_query
.split('/') if formats_query
else []
2858 for lang
in requested_langs
:
2859 formats
= available_subs
.get(lang
)
2861 self
.report_warning(f
'{lang} subtitles not available for {video_id}')
2863 for ext
in formats_preference
:
2867 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
2873 self
.report_warning(
2874 'No subtitle format found matching "%s" for language %s, '
2875 'using %s' % (formats_query
, lang
, f
['ext']))
2879 def _forceprint(self
, key
, info_dict
):
2880 if info_dict
is None:
2882 info_copy
= info_dict
.copy()
2883 info_copy
.setdefault('filename', self
.prepare_filename(info_dict
))
2884 if info_dict
.get('requested_formats') is not None:
2885 # For RTMP URLs, also include the playpath
2886 info_copy
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
2887 elif info_dict
.get('url'):
2888 info_copy
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
2889 info_copy
['formats_table'] = self
.render_formats_table(info_dict
)
2890 info_copy
['thumbnails_table'] = self
.render_thumbnails_table(info_dict
)
2891 info_copy
['subtitles_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('subtitles'))
2892 info_copy
['automatic_captions_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('automatic_captions'))
2894 def format_tmpl(tmpl
):
2895 mobj
= re
.fullmatch(r
'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl
)
2900 if tmpl
.startswith('{'):
2901 tmpl
, fmt
= f
'.{tmpl}', '%({})j'
2902 if tmpl
.endswith('='):
2903 tmpl
, fmt
= tmpl
[:-1], '{0} = %({0})#j'
2904 return '\n'.join(map(fmt
.format
, [tmpl
] if mobj
.group('dict') else tmpl
.split(',')))
2906 for tmpl
in self
.params
['forceprint'].get(key
, []):
2907 self
.to_stdout(self
.evaluate_outtmpl(format_tmpl(tmpl
), info_copy
))
2909 for tmpl
, file_tmpl
in self
.params
['print_to_file'].get(key
, []):
2910 filename
= self
.prepare_filename(info_dict
, outtmpl
=file_tmpl
)
2911 tmpl
= format_tmpl(tmpl
)
2912 self
.to_screen(f
'[info] Writing {tmpl!r} to: {filename}')
2913 if self
._ensure
_dir
_exists
(filename
):
2914 with
open(filename
, 'a', encoding
='utf-8', newline
='') as f
:
2915 f
.write(self
.evaluate_outtmpl(tmpl
, info_copy
) + os
.linesep
)
2919 def __forced_printings(self
, info_dict
, filename
=None, incomplete
=True):
2920 if (self
.params
.get('forcejson')
2921 or self
.params
['forceprint'].get('video')
2922 or self
.params
['print_to_file'].get('video')):
2923 self
.post_extract(info_dict
)
2925 info_dict
['filename'] = filename
2926 info_copy
= self
._forceprint
('video', info_dict
)
2928 def print_field(field
, actual_field
=None, optional
=False):
2929 if actual_field
is None:
2930 actual_field
= field
2931 if self
.params
.get(f
'force{field}') and (
2932 info_copy
.get(field
) is not None or (not optional
and not incomplete
)):
2933 self
.to_stdout(info_copy
[actual_field
])
2935 print_field('title')
2937 print_field('url', 'urls')
2938 print_field('thumbnail', optional
=True)
2939 print_field('description', optional
=True)
2941 print_field('filename')
2942 if self
.params
.get('forceduration') and info_copy
.get('duration') is not None:
2943 self
.to_stdout(formatSeconds(info_copy
['duration']))
2944 print_field('format')
2946 if self
.params
.get('forcejson'):
2947 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
2949 def dl(self
, name
, info
, subtitle
=False, test
=False):
2950 if not info
.get('url'):
2951 self
.raise_no_formats(info
, True)
2954 verbose
= self
.params
.get('verbose')
2957 'quiet': self
.params
.get('quiet') or not verbose
,
2959 'noprogress': not verbose
,
2961 'skip_unavailable_fragments': False,
2962 'keep_fragments': False,
2964 '_no_ytdl_file': True,
2967 params
= self
.params
2968 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
2970 for ph
in self
._progress
_hooks
:
2971 fd
.add_progress_hook(ph
)
2973 (f
['url'].split(',')[0] + ',<data>' if f
['url'].startswith('data:') else f
['url'])
2974 for f
in info
.get('requested_formats', []) or [info
])
2975 self
.write_debug(f
'Invoking {fd.FD_NAME} downloader on "{urls}"')
2977 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2978 # But it may contain objects that are not deep-copyable
2979 new_info
= self
._copy
_infodict
(info
)
2980 if new_info
.get('http_headers') is None:
2981 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
2982 return fd
.download(name
, new_info
, subtitle
)
2984 def existing_file(self
, filepaths
, *, default_overwrite
=True):
2985 existing_files
= list(filter(os
.path
.exists
, orderedSet(filepaths
)))
2986 if existing_files
and not self
.params
.get('overwrites', default_overwrite
):
2987 return existing_files
[0]
2989 for file in existing_files
:
2990 self
.report_file_delete(file)
2994 def process_info(self
, info_dict
):
2995 """Process a single resolved IE result. (Modifies it in-place)"""
2997 assert info_dict
.get('_type', 'video') == 'video'
2998 original_infodict
= info_dict
3000 if 'format' not in info_dict
and 'ext' in info_dict
:
3001 info_dict
['format'] = info_dict
['ext']
3003 if self
._match
_entry
(info_dict
) is not None:
3004 info_dict
['__write_download_archive'] = 'ignore'
3007 # Does nothing under normal operation - for backward compatibility of process_info
3008 self
.post_extract(info_dict
)
3010 def replace_info_dict(new_info
):
3012 if new_info
== info_dict
:
3015 info_dict
.update(new_info
)
3017 new_info
, _
= self
.pre_process(info_dict
, 'video')
3018 replace_info_dict(new_info
)
3019 self
._num
_downloads
+= 1
3021 # info_dict['_filename'] needs to be set for backward compatibility
3022 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
3023 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
3027 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
3029 def check_max_downloads():
3030 if self
._num
_downloads
>= float(self
.params
.get('max_downloads') or 'inf'):
3031 raise MaxDownloadsReached()
3033 if self
.params
.get('simulate'):
3034 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3035 check_max_downloads()
3038 if full_filename
is None:
3040 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
3042 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
3045 if self
._write
_description
('video', info_dict
,
3046 self
.prepare_filename(info_dict
, 'description')) is None:
3049 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
3050 if sub_files
is None:
3052 files_to_move
.update(dict(sub_files
))
3054 thumb_files
= self
._write
_thumbnails
(
3055 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
3056 if thumb_files
is None:
3058 files_to_move
.update(dict(thumb_files
))
3060 infofn
= self
.prepare_filename(info_dict
, 'infojson')
3061 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
3062 if _infojson_written
:
3063 info_dict
['infojson_filename'] = infofn
3064 # For backward compatibility, even though it was a private field
3065 info_dict
['__infojson_filename'] = infofn
3066 elif _infojson_written
is None:
3069 # Note: Annotations are deprecated
3071 if self
.params
.get('writeannotations', False):
3072 annofn
= self
.prepare_filename(info_dict
, 'annotation')
3074 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
3076 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
3077 self
.to_screen('[info] Video annotations are already present')
3078 elif not info_dict
.get('annotations'):
3079 self
.report_warning('There are no annotations to write.')
3082 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
3083 with
open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
3084 annofile
.write(info_dict
['annotations'])
3085 except (KeyError, TypeError):
3086 self
.report_warning('There are no annotations to write.')
3088 self
.report_error('Cannot write annotations file: ' + annofn
)
3091 # Write internet shortcut files
3092 def _write_link_file(link_type
):
3093 url
= try_get(info_dict
['webpage_url'], iri_to_uri
)
3095 self
.report_warning(
3096 f
'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3098 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
3099 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
3101 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
3102 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
3105 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3106 with
open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
3107 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
3108 template_vars
= {'url': url
}
3109 if link_type
== 'desktop':
3110 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
3111 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
3113 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
3118 'url': self
.params
.get('writeurllink'),
3119 'webloc': self
.params
.get('writewebloclink'),
3120 'desktop': self
.params
.get('writedesktoplink'),
3122 if self
.params
.get('writelink'):
3123 link_type
= ('webloc' if sys
.platform
== 'darwin'
3124 else 'desktop' if sys
.platform
.startswith('linux')
3126 write_links
[link_type
] = True
3128 if any(should_write
and not _write_link_file(link_type
)
3129 for link_type
, should_write
in write_links
.items()):
3132 new_info
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
3133 replace_info_dict(new_info
)
3135 if self
.params
.get('skip_download'):
3136 info_dict
['filepath'] = temp_filename
3137 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3138 info_dict
['__files_to_move'] = files_to_move
3139 replace_info_dict(self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
))
3140 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3143 info_dict
.setdefault('__postprocessors', [])
3146 def existing_video_file(*filepaths
):
3147 ext
= info_dict
.get('ext')
3148 converted
= lambda file: replace_extension(file, self
.params
.get('final_ext') or ext
, ext
)
3149 file = self
.existing_file(itertools
.chain(*zip(map(converted
, filepaths
), filepaths
)),
3150 default_overwrite
=False)
3152 info_dict
['ext'] = os
.path
.splitext(file)[1][1:]
3155 fd
, success
= None, True
3156 if info_dict
.get('protocol') or info_dict
.get('url'):
3157 fd
= get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-')
3158 if fd
is not FFmpegFD
and 'no-direct-merge' not in self
.params
['compat_opts'] and (
3159 info_dict
.get('section_start') or info_dict
.get('section_end')):
3160 msg
= ('This format cannot be partially downloaded' if FFmpegFD
.available()
3161 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3162 self
.report_error(f
'{msg}. Aborting')
3165 if info_dict
.get('requested_formats') is not None:
3166 requested_formats
= info_dict
['requested_formats']
3167 old_ext
= info_dict
['ext']
3168 if self
.params
.get('merge_output_format') is None:
3169 if (info_dict
['ext'] == 'webm'
3170 and info_dict
.get('thumbnails')
3171 # check with type instead of pp_key, __name__, or isinstance
3172 # since we dont want any custom PPs to trigger this
3173 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])): # noqa: E721
3174 info_dict
['ext'] = 'mkv'
3175 self
.report_warning(
3176 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3177 new_ext
= info_dict
['ext']
3179 def correct_ext(filename
, ext
=new_ext
):
3182 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
3184 os
.path
.splitext(filename
)[0]
3185 if filename_real_ext
in (old_ext
, new_ext
)
3187 return f
'{filename_wo_ext}.{ext}'
3189 # Ensure filename always has a correct extension for successful merge
3190 full_filename
= correct_ext(full_filename
)
3191 temp_filename
= correct_ext(temp_filename
)
3192 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3193 info_dict
['__real_download'] = False
3195 merger
= FFmpegMergerPP(self
)
3197 if dl_filename
is not None:
3198 self
.report_file_already_downloaded(dl_filename
)
3200 for f
in requested_formats
if fd
!= FFmpegFD
else []:
3201 f
['filepath'] = fname
= prepend_extension(
3202 correct_ext(temp_filename
, info_dict
['ext']),
3203 'f%s' % f
['format_id'], info_dict
['ext'])
3204 downloaded
.append(fname
)
3205 info_dict
['url'] = '\n'.join(f
['url'] for f
in requested_formats
)
3206 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3207 info_dict
['__real_download'] = real_download
3209 if self
.params
.get('allow_unplayable_formats'):
3210 self
.report_warning(
3211 'You have requested merging of multiple formats '
3212 'while also allowing unplayable formats to be downloaded. '
3213 'The formats won\'t be merged to prevent data corruption.')
3214 elif not merger
.available
:
3215 msg
= 'You have requested merging of multiple formats but ffmpeg is not installed'
3216 if not self
.params
.get('ignoreerrors'):
3217 self
.report_error(f
'{msg}. Aborting due to --abort-on-error')
3219 self
.report_warning(f
'{msg}. The formats won\'t be merged')
3221 if temp_filename
== '-':
3222 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
, self
.params
)
3223 else 'but the formats are incompatible for simultaneous download' if merger
.available
3224 else 'but ffmpeg is not installed')
3225 self
.report_warning(
3226 f
'You have requested downloading multiple formats to stdout {reason}. '
3227 'The formats will be streamed one after the other')
3228 fname
= temp_filename
3229 for f
in requested_formats
:
3230 new_info
= dict(info_dict
)
3231 del new_info
['requested_formats']
3233 if temp_filename
!= '-':
3234 fname
= prepend_extension(
3235 correct_ext(temp_filename
, new_info
['ext']),
3236 'f%s' % f
['format_id'], new_info
['ext'])
3237 if not self
._ensure
_dir
_exists
(fname
):
3239 f
['filepath'] = fname
3240 downloaded
.append(fname
)
3241 partial_success
, real_download
= self
.dl(fname
, new_info
)
3242 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
3243 success
= success
and partial_success
3245 if downloaded
and merger
.available
and not self
.params
.get('allow_unplayable_formats'):
3246 info_dict
['__postprocessors'].append(merger
)
3247 info_dict
['__files_to_merge'] = downloaded
3248 # Even if there were no downloads, it is being merged only now
3249 info_dict
['__real_download'] = True
3251 for file in downloaded
:
3252 files_to_move
[file] = None
3254 # Just a single file
3255 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3256 if dl_filename
is None or dl_filename
== temp_filename
:
3257 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3258 # So we should try to resume the download
3259 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3260 info_dict
['__real_download'] = real_download
3262 self
.report_file_already_downloaded(dl_filename
)
3264 dl_filename
= dl_filename
or temp_filename
3265 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3267 except network_exceptions
as err
:
3268 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
3270 except OSError as err
:
3271 raise UnavailableVideoError(err
)
3272 except (ContentTooShortError
, ) as err
:
3273 self
.report_error(f
'content too short (expected {err.expected} bytes and served {err.downloaded})')
3276 self
._raise
_pending
_errors
(info_dict
)
3277 if success
and full_filename
!= '-':
3281 fixup_policy
= self
.params
.get('fixup')
3282 vid
= info_dict
['id']
3284 if fixup_policy
in ('ignore', 'never'):
3286 elif fixup_policy
== 'warn':
3288 elif fixup_policy
!= 'force':
3289 assert fixup_policy
in ('detect_or_warn', None)
3290 if not info_dict
.get('__real_download'):
3293 def ffmpeg_fixup(cndn
, msg
, cls
):
3294 if not (do_fixup
and cndn
):
3296 elif do_fixup
== 'warn':
3297 self
.report_warning(f
'{vid}: {msg}')
3301 info_dict
['__postprocessors'].append(pp
)
3303 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
3305 stretched_ratio
= info_dict
.get('stretched_ratio')
3306 ffmpeg_fixup(stretched_ratio
not in (1, None),
3307 f
'Non-uniform pixel ratio {stretched_ratio}',
3308 FFmpegFixupStretchedPP
)
3310 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
3311 downloader
= downloader
.FD_NAME
if downloader
else None
3313 ext
= info_dict
.get('ext')
3314 postprocessed_by_ffmpeg
= info_dict
.get('requested_formats') or any((
3315 isinstance(pp
, FFmpegVideoConvertorPP
)
3316 and resolve_recode_mapping(ext
, pp
.mapping
)[0] not in (ext
, None)
3317 ) for pp
in self
._pps
['post_process'])
3319 if not postprocessed_by_ffmpeg
:
3320 ffmpeg_fixup(ext
== 'm4a' and info_dict
.get('container') == 'm4a_dash',
3321 'writing DASH m4a. Only some players support this container',
3323 ffmpeg_fixup(downloader
== 'hlsnative' and not self
.params
.get('hls_use_mpegts')
3324 or info_dict
.get('is_live') and self
.params
.get('hls_use_mpegts') is None,
3325 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3327 ffmpeg_fixup(info_dict
.get('is_live') and downloader
== 'dashsegments',
3328 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP
)
3330 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3331 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP
)
3335 replace_info_dict(self
.post_process(dl_filename
, info_dict
, files_to_move
))
3336 except PostProcessingError
as err
:
3337 self
.report_error('Postprocessing: %s' % str(err
))
3340 for ph
in self
._post
_hooks
:
3341 ph(info_dict
['filepath'])
3342 except Exception as err
:
3343 self
.report_error('post hooks: %s' % str(err
))
3345 info_dict
['__write_download_archive'] = True
3347 assert info_dict
is original_infodict
# Make sure the info_dict was modified in-place
3348 if self
.params
.get('force_write_download_archive'):
3349 info_dict
['__write_download_archive'] = True
3350 check_max_downloads()
3352 def __download_wrapper(self
, func
):
3353 @functools.wraps(func
)
3354 def wrapper(*args
, **kwargs
):
3356 res
= func(*args
, **kwargs
)
3357 except UnavailableVideoError
as e
:
3358 self
.report_error(e
)
3359 except DownloadCancelled
as e
:
3360 self
.to_screen(f
'[info] {e}')
3361 if not self
.params
.get('break_per_url'):
3363 self
._num
_downloads
= 0
3365 if self
.params
.get('dump_single_json', False):
3366 self
.post_extract(res
)
3367 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3370 def download(self
, url_list
):
3371 """Download a given list of URLs."""
3372 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3373 outtmpl
= self
.params
['outtmpl']['default']
3374 if (len(url_list
) > 1
3376 and '%' not in outtmpl
3377 and self
.params
.get('max_downloads') != 1):
3378 raise SameFileError(outtmpl
)
3380 for url
in url_list
:
3381 self
.__download
_wrapper
(self
.extract_info
)(
3382 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3384 return self
._download
_retcode
3386 def download_with_info_file(self
, info_filename
):
3387 with contextlib
.closing(fileinput
.FileInput(
3388 [info_filename
], mode
='r',
3389 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3390 # FileInput doesn't have a read method, we can't call json.load
3391 infos
= [self
.sanitize_info(info
, self
.params
.get('clean_infojson', True))
3392 for info
in variadic(json
.loads('\n'.join(f
)))]
3395 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3396 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3397 if not isinstance(e
, EntryNotInPlaylist
):
3398 self
.to_stderr('\r')
3399 webpage_url
= info
.get('webpage_url')
3400 if webpage_url
is None:
3402 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3403 self
.download([webpage_url
])
3404 return self
._download
_retcode
3407 def sanitize_info(info_dict
, remove_private_keys
=False):
3408 ''' Sanitize the infodict for converting to json '''
3409 if info_dict
is None:
3411 info_dict
.setdefault('epoch', int(time
.time()))
3412 info_dict
.setdefault('_type', 'video')
3413 info_dict
.setdefault('_version', {
3414 'version': __version__
,
3415 'current_git_head': current_git_head(),
3416 'release_git_head': RELEASE_GIT_HEAD
,
3417 'repository': REPOSITORY
,
3420 if remove_private_keys
:
3421 reject
= lambda k
, v
: v
is None or k
.startswith('__') or k
in {
3422 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3423 'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
3424 'playlist_autonumber', '_format_sort_fields',
3427 reject
= lambda k
, v
: False
3430 if isinstance(obj
, dict):
3431 return {k
: filter_fn(v
) for k
, v
in obj
.items() if not reject(k
, v
)}
3432 elif isinstance(obj
, (list, tuple, set, LazyList
)):
3433 return list(map(filter_fn
, obj
))
3434 elif obj
is None or isinstance(obj
, (str, int, float, bool)):
3439 return filter_fn(info_dict
)
3442 def filter_requested_info(info_dict
, actually_filter
=True):
3443 ''' Alias of sanitize_info for backward compatibility '''
3444 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3446 def _delete_downloaded_files(self
, *files_to_delete
, info
={}, msg
=None):
3447 for filename
in set(filter(None, files_to_delete
)):
3449 self
.to_screen(msg
% filename
)
3453 self
.report_warning(f
'Unable to delete file {filename}')
3454 if filename
in info
.get('__files_to_move', []): # NB: Delete even if None
3455 del info
['__files_to_move'][filename
]
3458 def post_extract(info_dict
):
3459 def actual_post_extract(info_dict
):
3460 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3461 for video_dict
in info_dict
.get('entries', {}):
3462 actual_post_extract(video_dict
or {})
3465 post_extractor
= info_dict
.pop('__post_extractor', None) or (lambda: {})
3466 info_dict
.update(post_extractor())
3468 actual_post_extract(info_dict
or {})
3470 def run_pp(self
, pp
, infodict
):
3471 files_to_delete
= []
3472 if '__files_to_move' not in infodict
:
3473 infodict
['__files_to_move'] = {}
3475 files_to_delete
, infodict
= pp
.run(infodict
)
3476 except PostProcessingError
as e
:
3477 # Must be True and not 'only_download'
3478 if self
.params
.get('ignoreerrors') is True:
3479 self
.report_error(e
)
3483 if not files_to_delete
:
3485 if self
.params
.get('keepvideo', False):
3486 for f
in files_to_delete
:
3487 infodict
['__files_to_move'].setdefault(f
, '')
3489 self
._delete
_downloaded
_files
(
3490 *files_to_delete
, info
=infodict
, msg
='Deleting original file %s (pass -k to keep)')
3493 def run_all_pps(self
, key
, info
, *, additional_pps
=None):
3495 self
._forceprint
(key
, info
)
3496 for pp
in (additional_pps
or []) + self
._pps
[key
]:
3497 info
= self
.run_pp(pp
, info
)
3500 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3501 info
= dict(ie_info
)
3502 info
['__files_to_move'] = files_to_move
or {}
3504 info
= self
.run_all_pps(key
, info
)
3505 except PostProcessingError
as err
:
3506 msg
= f
'Preprocessing: {err}'
3507 info
.setdefault('__pending_error', msg
)
3508 self
.report_error(msg
, is_error
=False)
3509 return info
, info
.pop('__files_to_move', None)
3511 def post_process(self
, filename
, info
, files_to_move
=None):
3512 """Run all the postprocessors on the given file."""
3513 info
['filepath'] = filename
3514 info
['__files_to_move'] = files_to_move
or {}
3515 info
= self
.run_all_pps('post_process', info
, additional_pps
=info
.get('__postprocessors'))
3516 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3517 del info
['__files_to_move']
3518 return self
.run_all_pps('after_move', info
)
3520 def _make_archive_id(self
, info_dict
):
3521 video_id
= info_dict
.get('id')
3524 # Future-proof against any change in case
3525 # and backwards compatibility with prior versions
3526 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3527 if extractor
is None:
3528 url
= str_or_none(info_dict
.get('url'))
3531 # Try to find matching extractor for the URL and take its ie_key
3532 for ie_key
, ie
in self
._ies
.items():
3533 if ie
.suitable(url
):
3538 return make_archive_id(extractor
, video_id
)
3540 def in_download_archive(self
, info_dict
):
3541 if not self
.archive
:
3544 vid_ids
= [self
._make
_archive
_id
(info_dict
)]
3545 vid_ids
.extend(info_dict
.get('_old_archive_ids') or [])
3546 return any(id_
in self
.archive
for id_
in vid_ids
)
3548 def record_download_archive(self
, info_dict
):
3549 fn
= self
.params
.get('download_archive')
3552 vid_id
= self
._make
_archive
_id
(info_dict
)
3555 self
.write_debug(f
'Adding to archive: {vid_id}')
3556 if is_path_like(fn
):
3557 with
locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3558 archive_file
.write(vid_id
+ '\n')
3559 self
.archive
.add(vid_id
)
3562 def format_resolution(format
, default
='unknown'):
3563 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3565 if format
.get('resolution') is not None:
3566 return format
['resolution']
3567 if format
.get('width') and format
.get('height'):
3568 return '%dx%d' % (format
['width'], format
['height'])
3569 elif format
.get('height'):
3570 return '%sp' % format
['height']
3571 elif format
.get('width'):
3572 return '%dx?' % format
['width']
3575 def _list_format_headers(self
, *headers
):
3576 if self
.params
.get('listformats_table', True) is not False:
3577 return [self
._format
_out
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3580 def _format_note(self
, fdict
):
3582 if fdict
.get('ext') in ['f4f', 'f4m']:
3583 res
+= '(unsupported)'
3584 if fdict
.get('language'):
3587 res
+= '[%s]' % fdict
['language']
3588 if fdict
.get('format_note') is not None:
3591 res
+= fdict
['format_note']
3592 if fdict
.get('tbr') is not None:
3595 res
+= '%4dk' % fdict
['tbr']
3596 if fdict
.get('container') is not None:
3599 res
+= '%s container' % fdict
['container']
3600 if (fdict
.get('vcodec') is not None
3601 and fdict
.get('vcodec') != 'none'):
3604 res
+= fdict
['vcodec']
3605 if fdict
.get('vbr') is not None:
3607 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3609 if fdict
.get('vbr') is not None:
3610 res
+= '%4dk' % fdict
['vbr']
3611 if fdict
.get('fps') is not None:
3614 res
+= '%sfps' % fdict
['fps']
3615 if fdict
.get('acodec') is not None:
3618 if fdict
['acodec'] == 'none':
3621 res
+= '%-5s' % fdict
['acodec']
3622 elif fdict
.get('abr') is not None:
3626 if fdict
.get('abr') is not None:
3627 res
+= '@%3dk' % fdict
['abr']
3628 if fdict
.get('asr') is not None:
3629 res
+= ' (%5dHz)' % fdict
['asr']
3630 if fdict
.get('filesize') is not None:
3633 res
+= format_bytes(fdict
['filesize'])
3634 elif fdict
.get('filesize_approx') is not None:
3637 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3640 def _get_formats(self
, info_dict
):
3641 if info_dict
.get('formats') is None:
3642 if info_dict
.get('url') and info_dict
.get('_type', 'video') == 'video':
3645 return info_dict
['formats']
3647 def render_formats_table(self
, info_dict
):
3648 formats
= self
._get
_formats
(info_dict
)
3651 if not self
.params
.get('listformats_table', True) is not False:
3654 format_field(f
, 'format_id'),
3655 format_field(f
, 'ext'),
3656 self
.format_resolution(f
),
3657 self
._format
_note
(f
)
3658 ] for f
in formats
if (f
.get('preference') or 0) >= -1000]
3659 return render_table(['format code', 'extension', 'resolution', 'note'], table
, extra_gap
=1)
3661 def simplified_codec(f
, field
):
3662 assert field
in ('acodec', 'vcodec')
3663 codec
= f
.get(field
, 'unknown')
3666 elif codec
!= 'none':
3667 return '.'.join(codec
.split('.')[:4])
3669 if field
== 'vcodec' and f
.get('acodec') == 'none':
3671 elif field
== 'acodec' and f
.get('vcodec') == 'none':
3673 return self
._format
_out
('audio only' if field
== 'vcodec' else 'video only',
3674 self
.Styles
.SUPPRESS
)
3676 delim
= self
._format
_out
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3679 self
._format
_out
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3680 format_field(f
, 'ext'),
3681 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3682 format_field(f
, 'fps', '\t%d', func
=round),
3683 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3684 format_field(f
, 'audio_channels', '\t%s'),
3686 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
) + format_field(f
, 'filesize_approx', '~\t%s', func
=format_bytes
),
3687 format_field(f
, 'tbr', '\t%dk', func
=round),
3688 shorten_protocol_name(f
.get('protocol', '')),
3690 simplified_codec(f
, 'vcodec'),
3691 format_field(f
, 'vbr', '\t%dk', func
=round),
3692 simplified_codec(f
, 'acodec'),
3693 format_field(f
, 'abr', '\t%dk', func
=round),
3694 format_field(f
, 'asr', '\t%s', func
=format_decimal_suffix
),
3696 self
._format
_out
('UNSUPPORTED', 'light red') if f
.get('ext') in ('f4f', 'f4m') else None,
3697 self
._format
_out
('DRM', 'light red') if f
.get('has_drm') else None,
3698 format_field(f
, 'language', '[%s]'),
3699 join_nonempty(format_field(f
, 'format_note'),
3700 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3703 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3704 header_line
= self
._list
_format
_headers
(
3705 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3706 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3708 return render_table(
3709 header_line
, table
, hide_empty
=True,
3710 delim
=self
._format
_out
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True))
3712 def render_thumbnails_table(self
, info_dict
):
3713 thumbnails
= list(info_dict
.get('thumbnails') or [])
3716 return render_table(
3717 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3718 [[t
.get('id'), t
.get('width') or 'unknown', t
.get('height') or 'unknown', t
['url']] for t
in thumbnails
])
3720 def render_subtitles_table(self
, video_id
, subtitles
):
3721 def _row(lang
, formats
):
3722 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3723 if len(set(names
)) == 1:
3724 names
= [] if names
[0] == 'unknown' else names
[:1]
3725 return [lang
, ', '.join(names
), ', '.join(exts
)]
3729 return render_table(
3730 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3731 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3734 def __list_table(self
, video_id
, name
, func
, *args
):
3737 self
.to_screen(f
'{video_id} has no {name}')
3739 self
.to_screen(f
'[info] Available {name} for {video_id}:')
3740 self
.to_stdout(table
)
3742 def list_formats(self
, info_dict
):
3743 self
.__list
_table
(info_dict
['id'], 'formats', self
.render_formats_table
, info_dict
)
3745 def list_thumbnails(self
, info_dict
):
3746 self
.__list
_table
(info_dict
['id'], 'thumbnails', self
.render_thumbnails_table
, info_dict
)
3748 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3749 self
.__list
_table
(video_id
, name
, self
.render_subtitles_table
, video_id
, subtitles
)
3751 def urlopen(self
, req
):
3752 """ Start an HTTP download """
3753 if isinstance(req
, str):
3754 req
= sanitized_Request(req
)
3755 return self
._opener
.open(req
, timeout
=self
._socket
_timeout
)
3757 def print_debug_header(self
):
3758 if not self
.params
.get('verbose'):
3761 from . import _IN_CLI
# Must be delayed import
3763 # These imports can be slow. So import them only as needed
3764 from .extractor
.extractors
import _LAZY_LOADER
3765 from .extractor
.extractors
import (
3766 _PLUGIN_CLASSES
as plugin_ies
,
3767 _PLUGIN_OVERRIDES
as plugin_ie_overrides
3770 def get_encoding(stream
):
3771 ret
= str(getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__))
3772 if not supports_terminal_sequences(stream
):
3773 from .utils
import WINDOWS_VT_MODE
# Must be imported locally
3774 ret
+= ' (No VT)' if WINDOWS_VT_MODE
is False else ' (No ANSI)'
3777 encoding_str
= 'Encodings: locale %s, fs %s, pref %s, %s' % (
3778 locale
.getpreferredencoding(),
3779 sys
.getfilesystemencoding(),
3780 self
.get_encoding(),
3782 f
'{key} {get_encoding(stream)}' for key
, stream
in self
._out
_files
.items_
3783 if stream
is not None and key
!= 'console')
3786 logger
= self
.params
.get('logger')
3788 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3789 write_debug(encoding_str
)
3791 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3792 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3794 source
= detect_variant()
3795 if VARIANT
not in (None, 'pip'):
3798 write_debug(join_nonempty(
3799 f
'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
3800 f
'{CHANNEL}@{__version__}',
3801 f
'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD
else '',
3802 '' if source
== 'unknown' else f
'({source})',
3803 '' if _IN_CLI
else 'API' if klass
== YoutubeDL
else f
'API:{self.__module__}.{klass.__qualname__}',
3807 write_debug(f
'params: {self.params}')
3809 if not _LAZY_LOADER
:
3810 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
3811 write_debug('Lazy loading extractors is forcibly disabled')
3813 write_debug('Lazy loading extractors is disabled')
3814 if self
.params
['compat_opts']:
3815 write_debug('Compatibility options: %s' % ', '.join(self
.params
['compat_opts']))
3817 if current_git_head():
3818 write_debug(f
'Git HEAD: {current_git_head()}')
3819 write_debug(system_identifier())
3821 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
3822 ffmpeg_features
= {key
for key
, val
in ffmpeg_features
.items() if val
}
3824 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features
))
3826 exe_versions
['rtmpdump'] = rtmpdump_version()
3827 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
3828 exe_str
= ', '.join(
3829 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
3831 write_debug('exe versions: %s' % exe_str
)
3833 from .compat
.compat_utils
import get_package_info
3834 from .dependencies
import available_dependencies
3836 write_debug('Optional libraries: %s' % (', '.join(sorted({
3837 join_nonempty(*get_package_info(m
)) for m
in available_dependencies
.values()
3840 self
._setup
_opener
()
3842 for handler
in self
._opener
.handlers
:
3843 if hasattr(handler
, 'proxies'):
3844 proxy_map
.update(handler
.proxies
)
3845 write_debug(f
'Proxy map: {proxy_map}')
3847 for plugin_type
, plugins
in {'Extractor': plugin_ies
, 'Post-Processor': plugin_pps
}.items():
3848 display_list
= ['%s%s' % (
3849 klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
3850 for name
, klass
in plugins
.items()]
3851 if plugin_type
== 'Extractor':
3852 display_list
.extend(f
'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
3853 for parent
, plugins
in plugin_ie_overrides
.items())
3854 if not display_list
:
3856 write_debug(f
'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
3858 plugin_dirs
= plugin_directories()
3860 write_debug(f
'Plugin directories: {plugin_dirs}')
3863 if False and self
.params
.get('call_home'):
3864 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode()
3865 write_debug('Public IP address: %s' % ipaddr
)
3866 latest_version
= self
.urlopen(
3867 'https://yt-dl.org/latest/version').read().decode()
3868 if version_tuple(latest_version
) > version_tuple(__version__
):
3869 self
.report_warning(
3870 'You are using an outdated version (newest version: %s)! '
3871 'See https://yt-dl.org/update if you need help updating.' %
3874 def _setup_opener(self
):
3875 if hasattr(self
, '_opener'):
3877 timeout_val
= self
.params
.get('socket_timeout')
3878 self
._socket
_timeout
= 20 if timeout_val
is None else float(timeout_val
)
3880 opts_cookiesfrombrowser
= self
.params
.get('cookiesfrombrowser')
3881 opts_cookiefile
= self
.params
.get('cookiefile')
3882 opts_proxy
= self
.params
.get('proxy')
3884 self
.cookiejar
= load_cookies(opts_cookiefile
, opts_cookiesfrombrowser
, self
)
3886 cookie_processor
= YoutubeDLCookieProcessor(self
.cookiejar
)
3887 if opts_proxy
is not None:
3888 if opts_proxy
== '':
3891 proxies
= {'http': opts_proxy
, 'https': opts_proxy
}
3893 proxies
= urllib
.request
.getproxies()
3894 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3895 if 'http' in proxies
and 'https' not in proxies
:
3896 proxies
['https'] = proxies
['http']
3897 proxy_handler
= PerRequestProxyHandler(proxies
)
3899 debuglevel
= 1 if self
.params
.get('debug_printtraffic') else 0
3900 https_handler
= make_HTTPS_handler(self
.params
, debuglevel
=debuglevel
)
3901 ydlh
= YoutubeDLHandler(self
.params
, debuglevel
=debuglevel
)
3902 redirect_handler
= YoutubeDLRedirectHandler()
3903 data_handler
= urllib
.request
.DataHandler()
3905 # When passing our own FileHandler instance, build_opener won't add the
3906 # default FileHandler and allows us to disable the file protocol, which
3907 # can be used for malicious purposes (see
3908 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3909 file_handler
= urllib
.request
.FileHandler()
3911 if not self
.params
.get('enable_file_urls'):
3912 def file_open(*args
, **kwargs
):
3913 raise urllib
.error
.URLError(
3914 'file:// URLs are explicitly disabled in yt-dlp for security reasons. '
3915 'Use --enable-file-urls to enable at your own risk.')
3916 file_handler
.file_open
= file_open
3918 opener
= urllib
.request
.build_opener(
3919 proxy_handler
, https_handler
, cookie_processor
, ydlh
, redirect_handler
, data_handler
, file_handler
)
3921 # Delete the default user-agent header, which would otherwise apply in
3922 # cases where our custom HTTP handler doesn't come into play
3923 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3924 opener
.addheaders
= []
3925 self
._opener
= opener
3927 def encode(self
, s
):
3928 if isinstance(s
, bytes
):
3929 return s
# Already encoded
3932 return s
.encode(self
.get_encoding())
3933 except UnicodeEncodeError as err
:
3934 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
3937 def get_encoding(self
):
3938 encoding
= self
.params
.get('encoding')
3939 if encoding
is None:
3940 encoding
= preferredencoding()
3943 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
3944 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3945 if overwrite
is None:
3946 overwrite
= self
.params
.get('overwrites', True)
3947 if not self
.params
.get('writeinfojson'):
3950 self
.write_debug(f
'Skipping writing {label} infojson')
3952 elif not self
._ensure
_dir
_exists
(infofn
):
3954 elif not overwrite
and os
.path
.exists(infofn
):
3955 self
.to_screen(f
'[info] {label.title()} metadata is already present')
3958 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
3960 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
3963 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
3966 def _write_description(self
, label
, ie_result
, descfn
):
3967 ''' Write description and returns True = written, False = skip, None = error '''
3968 if not self
.params
.get('writedescription'):
3971 self
.write_debug(f
'Skipping writing {label} description')
3973 elif not self
._ensure
_dir
_exists
(descfn
):
3975 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
3976 self
.to_screen(f
'[info] {label.title()} description is already present')
3977 elif ie_result
.get('description') is None:
3978 self
.to_screen(f
'[info] There\'s no {label} description to write')
3982 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
3983 with
open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
3984 descfile
.write(ie_result
['description'])
3986 self
.report_error(f
'Cannot write {label} description file {descfn}')
3990 def _write_subtitles(self
, info_dict
, filename
):
3991 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3993 subtitles
= info_dict
.get('requested_subtitles')
3994 if not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
3995 # subtitles download errors are already managed as troubles in relevant IE
3996 # that way it will silently go on when used with unsupporting IE
3999 self
.to_screen('[info] There are no subtitles for the requested languages')
4001 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
4002 if not sub_filename_base
:
4003 self
.to_screen('[info] Skipping writing video subtitles')
4006 for sub_lang
, sub_info
in subtitles
.items():
4007 sub_format
= sub_info
['ext']
4008 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
4009 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
4010 existing_sub
= self
.existing_file((sub_filename_final
, sub_filename
))
4012 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
4013 sub_info
['filepath'] = existing_sub
4014 ret
.append((existing_sub
, sub_filename_final
))
4017 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
4018 if sub_info
.get('data') is not None:
4020 # Use newline='' to prevent conversion of newline characters
4021 # See https://github.com/ytdl-org/youtube-dl/issues/10268
4022 with
open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
4023 subfile
.write(sub_info
['data'])
4024 sub_info
['filepath'] = sub_filename
4025 ret
.append((sub_filename
, sub_filename_final
))
4028 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
4032 sub_copy
= sub_info
.copy()
4033 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
4034 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
4035 sub_info
['filepath'] = sub_filename
4036 ret
.append((sub_filename
, sub_filename_final
))
4037 except (DownloadError
, ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
4038 msg
= f
'Unable to download video subtitles for {sub_lang!r}: {err}'
4039 if self
.params
.get('ignoreerrors') is not True: # False or 'only_download'
4040 if not self
.params
.get('ignoreerrors'):
4041 self
.report_error(msg
)
4042 raise DownloadError(msg
)
4043 self
.report_warning(msg
)
4046 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
4047 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
4048 write_all
= self
.params
.get('write_all_thumbnails', False)
4049 thumbnails
, ret
= [], []
4050 if write_all
or self
.params
.get('writethumbnail', False):
4051 thumbnails
= info_dict
.get('thumbnails') or []
4053 self
.to_screen(f
'[info] There are no {label} thumbnails to download')
4055 multiple
= write_all
and len(thumbnails
) > 1
4057 if thumb_filename_base
is None:
4058 thumb_filename_base
= filename
4059 if thumbnails
and not thumb_filename_base
:
4060 self
.write_debug(f
'Skipping writing {label} thumbnail')
4063 for idx
, t
in list(enumerate(thumbnails
))[::-1]:
4064 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
4065 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
4066 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
4067 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
4069 existing_thumb
= self
.existing_file((thumb_filename_final
, thumb_filename
))
4071 self
.to_screen('[info] %s is already present' % (
4072 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
4073 t
['filepath'] = existing_thumb
4074 ret
.append((existing_thumb
, thumb_filename_final
))
4076 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
4078 uf
= self
.urlopen(sanitized_Request(t
['url'], headers
=t
.get('http_headers', {})))
4079 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
4080 with
open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
4081 shutil
.copyfileobj(uf
, thumbf
)
4082 ret
.append((thumb_filename
, thumb_filename_final
))
4083 t
['filepath'] = thumb_filename
4084 except network_exceptions
as err
:
4086 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
4087 if ret
and not write_all
: