1 import concurrent
.futures
9 from .common
import FileDownloader
10 from .http
import HttpFD
11 from ..aes
import aes_cbc_decrypt_bytes
, unpad_pkcs7
12 from ..compat
import compat_os_name
13 from ..networking
import Request
14 from ..networking
.exceptions
import HTTPError
, IncompleteRead
15 from ..utils
import DownloadError
, RetryManager
, encodeFilename
, traverse_obj
16 from ..utils
.networking
import HTTPHeaderDict
17 from ..utils
.progress
import ProgressCalculator
20 class HttpQuietDownloader(HttpFD
):
21 def to_screen(self
, *args
, **kargs
):
24 to_console_title
= to_screen
27 class FragmentFD(FileDownloader
):
29 A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests).
33 fragment_retries: Number of times to retry a fragment for HTTP error
34 (DASH and hlsnative only). Default is 0 for API, but 10 for CLI
35 skip_unavailable_fragments:
36 Skip unavailable fragments (DASH and hlsnative only)
37 keep_fragments: Keep downloaded fragments on disk after downloading is
39 concurrent_fragment_downloads: The number of threads to use for native hls and dash downloads
40 _no_ytdl_file: Don't use .ytdl file
42 For each incomplete fragment download yt-dlp keeps on disk a special
43 bookkeeping file with download state and metadata (in future such files will
44 be used for any incomplete download handled by yt-dlp). This file is
45 used to properly handle resuming, check download file consistency and detect
46 potential errors. The file has a .ytdl extension and represents a standard
47 JSON file of the following format:
50 Dictionary of extractor related data. TBD.
53 Dictionary of downloader related data. May contain following data:
55 Dictionary with current (being downloaded) fragment data:
56 index: 0-based index of current fragment among all fragments
58 Total count of fragments
60 This feature is experimental and file format may change in future.
63 def report_retry_fragment(self
, err
, frag_index
, count
, retries
):
64 self
.deprecation_warning('yt_dlp.downloader.FragmentFD.report_retry_fragment is deprecated. '
65 'Use yt_dlp.downloader.FileDownloader.report_retry instead')
66 return self
.report_retry(err
, count
, retries
, frag_index
)
68 def report_skip_fragment(self
, frag_index
, err
=None):
69 err
= f
' {err};' if err
else ''
70 self
.to_screen(f
'[download]{err} Skipping fragment {frag_index:d} ...')
72 def _prepare_url(self
, info_dict
, url
):
73 headers
= info_dict
.get('http_headers')
74 return Request(url
, None, headers
) if headers
else url
76 def _prepare_and_start_frag_download(self
, ctx
, info_dict
):
77 self
._prepare
_frag
_download
(ctx
)
78 self
._start
_frag
_download
(ctx
, info_dict
)
80 def __do_ytdl_file(self
, ctx
):
81 return ctx
['live'] is not True and ctx
['tmpfilename'] != '-' and not self
.params
.get('_no_ytdl_file')
83 def _read_ytdl_file(self
, ctx
):
84 assert 'ytdl_corrupt' not in ctx
85 stream
, _
= self
.sanitize_open(self
.ytdl_filename(ctx
['filename']), 'r')
87 ytdl_data
= json
.loads(stream
.read())
88 ctx
['fragment_index'] = ytdl_data
['downloader']['current_fragment']['index']
89 if 'extra_state' in ytdl_data
['downloader']:
90 ctx
['extra_state'] = ytdl_data
['downloader']['extra_state']
92 ctx
['ytdl_corrupt'] = True
96 def _write_ytdl_file(self
, ctx
):
97 frag_index_stream
, _
= self
.sanitize_open(self
.ytdl_filename(ctx
['filename']), 'w')
100 'current_fragment': {
101 'index': ctx
['fragment_index'],
104 if 'extra_state' in ctx
:
105 downloader
['extra_state'] = ctx
['extra_state']
106 if ctx
.get('fragment_count') is not None:
107 downloader
['fragment_count'] = ctx
['fragment_count']
108 frag_index_stream
.write(json
.dumps({'downloader': downloader
}))
110 frag_index_stream
.close()
112 def _download_fragment(self
, ctx
, frag_url
, info_dict
, headers
=None, request_data
=None):
113 fragment_filename
= '%s-Frag%d' % (ctx
['tmpfilename'], ctx
['fragment_index'])
114 fragment_info_dict
= {
116 'http_headers': headers
or info_dict
.get('http_headers'),
117 'request_data': request_data
,
118 'ctx_id': ctx
.get('ctx_id'),
121 if ctx
['dl'].params
.get('continuedl', True):
122 frag_resume_len
= self
.filesize_or_none(self
.temp_name(fragment_filename
))
123 fragment_info_dict
['frag_resume_len'] = ctx
['frag_resume_len'] = frag_resume_len
125 success
, _
= ctx
['dl'].download(fragment_filename
, fragment_info_dict
)
128 if fragment_info_dict
.get('filetime'):
129 ctx
['fragment_filetime'] = fragment_info_dict
.get('filetime')
130 ctx
['fragment_filename_sanitized'] = fragment_filename
133 def _read_fragment(self
, ctx
):
134 if not ctx
.get('fragment_filename_sanitized'):
137 down
, frag_sanitized
= self
.sanitize_open(ctx
['fragment_filename_sanitized'], 'rb')
138 except FileNotFoundError
:
142 ctx
['fragment_filename_sanitized'] = frag_sanitized
143 frag_content
= down
.read()
147 def _append_fragment(self
, ctx
, frag_content
):
149 ctx
['dest_stream'].write(frag_content
)
150 ctx
['dest_stream'].flush()
152 if self
.__do
_ytdl
_file
(ctx
):
153 self
._write
_ytdl
_file
(ctx
)
154 if not self
.params
.get('keep_fragments', False):
155 self
.try_remove(encodeFilename(ctx
['fragment_filename_sanitized']))
156 del ctx
['fragment_filename_sanitized']
158 def _prepare_frag_download(self
, ctx
):
159 if not ctx
.setdefault('live', False):
160 total_frags_str
= '%d' % ctx
['total_frags']
161 ad_frags
= ctx
.get('ad_frags', 0)
163 total_frags_str
+= ' (not including %d ad)' % ad_frags
165 total_frags_str
= 'unknown (live)'
166 self
.to_screen(f
'[{self.FD_NAME}] Total fragments: {total_frags_str}')
167 self
.report_destination(ctx
['filename'])
168 dl
= HttpQuietDownloader(self
.ydl
, {
173 'max_sleep_interval': 0,
174 'sleep_interval_subtitles': 0,
176 tmpfilename
= self
.temp_name(ctx
['filename'])
179 # Establish possible resume length
180 resume_len
= self
.filesize_or_none(tmpfilename
)
184 # Should be initialized before ytdl file check
186 'tmpfilename': tmpfilename
,
190 if self
.__do
_ytdl
_file
(ctx
):
191 ytdl_file_exists
= os
.path
.isfile(encodeFilename(self
.ytdl_filename(ctx
['filename'])))
192 continuedl
= self
.params
.get('continuedl', True)
193 if continuedl
and ytdl_file_exists
:
194 self
._read
_ytdl
_file
(ctx
)
195 is_corrupt
= ctx
.get('ytdl_corrupt') is True
196 is_inconsistent
= ctx
['fragment_index'] > 0 and resume_len
== 0
197 if is_corrupt
or is_inconsistent
:
199 '.ytdl file is corrupt' if is_corrupt
else
200 'Inconsistent state of incomplete fragment download')
202 f
'{message}. Restarting from the beginning ...')
203 ctx
['fragment_index'] = resume_len
= 0
204 if 'ytdl_corrupt' in ctx
:
205 del ctx
['ytdl_corrupt']
206 self
._write
_ytdl
_file
(ctx
)
211 self
._read
_ytdl
_file
(ctx
)
212 ctx
['fragment_index'] = resume_len
= 0
213 self
._write
_ytdl
_file
(ctx
)
214 assert ctx
['fragment_index'] == 0
216 dest_stream
, tmpfilename
= self
.sanitize_open(tmpfilename
, open_mode
)
220 'dest_stream': dest_stream
,
221 'tmpfilename': tmpfilename
,
222 # Total complete fragments downloaded so far in bytes
223 'complete_frags_downloaded_bytes': resume_len
,
226 def _start_frag_download(self
, ctx
, info_dict
):
227 resume_len
= ctx
['complete_frags_downloaded_bytes']
228 total_frags
= ctx
['total_frags']
229 ctx_id
= ctx
.get('ctx_id')
230 # Stores the download progress, updated by the progress hook
232 'status': 'downloading',
233 'downloaded_bytes': resume_len
,
234 'fragment_index': ctx
['fragment_index'],
235 'fragment_count': total_frags
,
236 'filename': ctx
['filename'],
237 'tmpfilename': ctx
['tmpfilename'],
240 ctx
['started'] = time
.time()
241 progress
= ProgressCalculator(resume_len
)
243 def frag_progress_hook(s
):
244 if s
['status'] not in ('downloading', 'finished'):
247 if not total_frags
and ctx
.get('fragment_count'):
248 state
['fragment_count'] = ctx
['fragment_count']
250 if ctx_id
is not None and s
.get('ctx_id') != ctx_id
:
253 state
['max_progress'] = ctx
.get('max_progress')
254 state
['progress_idx'] = ctx
.get('progress_idx')
256 state
['elapsed'] = progress
.elapsed
257 frag_total_bytes
= s
.get('total_bytes') or 0
258 s
['fragment_info_dict'] = s
.pop('info_dict', {})
260 # XXX: Fragment resume is not accounted for here
263 (ctx
['complete_frags_downloaded_bytes'] + frag_total_bytes
)
264 / (state
['fragment_index'] + 1) * total_frags
)
265 progress
.total
= estimated_size
266 progress
.update(s
.get('downloaded_bytes'))
267 state
['total_bytes_estimate'] = progress
.total
269 progress
.update(s
.get('downloaded_bytes'))
271 if s
['status'] == 'finished':
272 state
['fragment_index'] += 1
273 ctx
['fragment_index'] = state
['fragment_index']
274 progress
.thread_reset()
276 state
['downloaded_bytes'] = ctx
['complete_frags_downloaded_bytes'] = progress
.downloaded
277 state
['speed'] = ctx
['speed'] = progress
.speed
.smooth
278 state
['eta'] = progress
.eta
.smooth
280 self
._hook
_progress
(state
, info_dict
)
282 ctx
['dl'].add_progress_hook(frag_progress_hook
)
284 return ctx
['started']
286 def _finish_frag_download(self
, ctx
, info_dict
):
287 ctx
['dest_stream'].close()
288 if self
.__do
_ytdl
_file
(ctx
):
289 self
.try_remove(self
.ytdl_filename(ctx
['filename']))
290 elapsed
= time
.time() - ctx
['started']
292 to_file
= ctx
['tmpfilename'] != '-'
294 downloaded_bytes
= self
.filesize_or_none(ctx
['tmpfilename'])
296 downloaded_bytes
= ctx
['complete_frags_downloaded_bytes']
298 if not downloaded_bytes
:
300 self
.try_remove(ctx
['tmpfilename'])
301 self
.report_error('The downloaded file is empty')
304 self
.try_rename(ctx
['tmpfilename'], ctx
['filename'])
305 filetime
= ctx
.get('fragment_filetime')
306 if self
.params
.get('updatetime', True) and filetime
:
307 with contextlib
.suppress(Exception):
308 os
.utime(ctx
['filename'], (time
.time(), filetime
))
310 self
._hook
_progress
({
311 'downloaded_bytes': downloaded_bytes
,
312 'total_bytes': downloaded_bytes
,
313 'filename': ctx
['filename'],
314 'status': 'finished',
316 'ctx_id': ctx
.get('ctx_id'),
317 'max_progress': ctx
.get('max_progress'),
318 'progress_idx': ctx
.get('progress_idx'),
322 def _prepare_external_frag_download(self
, ctx
):
323 if 'live' not in ctx
:
326 total_frags_str
= '%d' % ctx
['total_frags']
327 ad_frags
= ctx
.get('ad_frags', 0)
329 total_frags_str
+= ' (not including %d ad)' % ad_frags
331 total_frags_str
= 'unknown (live)'
332 self
.to_screen(f
'[{self.FD_NAME}] Total fragments: {total_frags_str}')
334 tmpfilename
= self
.temp_name(ctx
['filename'])
336 # Should be initialized before ytdl file check
338 'tmpfilename': tmpfilename
,
342 def decrypter(self
, info_dict
):
346 if url
not in _key_cache
:
347 _key_cache
[url
] = self
.ydl
.urlopen(self
._prepare
_url
(info_dict
, url
)).read()
348 return _key_cache
[url
]
350 def decrypt_fragment(fragment
, frag_content
):
351 if frag_content
is None:
353 decrypt_info
= fragment
.get('decrypt_info')
354 if not decrypt_info
or decrypt_info
['METHOD'] != 'AES-128':
356 iv
= decrypt_info
.get('IV') or struct
.pack('>8xq', fragment
['media_sequence'])
357 decrypt_info
['KEY'] = (decrypt_info
.get('KEY')
358 or _get_key(traverse_obj(info_dict
, ('hls_aes', 'uri')) or decrypt_info
['URI']))
359 # Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
360 # size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
361 # not what it decrypts to.
362 if self
.params
.get('test', False):
364 return unpad_pkcs7(aes_cbc_decrypt_bytes(frag_content
, decrypt_info
['KEY'], iv
))
366 return decrypt_fragment
368 def download_and_append_fragments_multiple(self
, *args
, **kwargs
):
370 @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
371 all args must be either tuple or list
373 interrupt_trigger
= [True]
374 max_progress
= len(args
)
375 if max_progress
== 1:
376 return self
.download_and_append_fragments(*args
[0], **kwargs
)
377 max_workers
= self
.params
.get('concurrent_fragment_downloads', 1)
379 self
._prepare
_multiline
_status
(max_progress
)
380 is_live
= any(traverse_obj(args
, (..., 2, 'is_live')))
382 def thread_func(idx
, ctx
, fragments
, info_dict
, tpe
):
383 ctx
['max_progress'] = max_progress
384 ctx
['progress_idx'] = idx
385 return self
.download_and_append_fragments(
386 ctx
, fragments
, info_dict
, **kwargs
, tpe
=tpe
, interrupt_trigger
=interrupt_trigger
)
388 class FTPE(concurrent
.futures
.ThreadPoolExecutor
):
389 # has to stop this or it's going to wait on the worker thread itself
390 def __exit__(self
, exc_type
, exc_val
, exc_tb
):
393 if compat_os_name
== 'nt':
394 def future_result(future
):
397 return future
.result(0.1)
398 except KeyboardInterrupt:
400 except concurrent
.futures
.TimeoutError
:
403 def future_result(future
):
404 return future
.result()
406 def interrupt_trigger_iter(fg
):
408 if not interrupt_trigger
[0]:
413 for idx
, (ctx
, fragments
, info_dict
) in enumerate(args
):
414 tpe
= FTPE(math
.ceil(max_workers
/ max_progress
))
415 job
= tpe
.submit(thread_func
, idx
, ctx
, interrupt_trigger_iter(fragments
), info_dict
, tpe
)
416 spins
.append((tpe
, job
))
419 for tpe
, job
in spins
:
421 result
= result
and future_result(job
)
422 except KeyboardInterrupt:
423 interrupt_trigger
[0] = False
425 tpe
.shutdown(wait
=True)
426 if not interrupt_trigger
[0] and not is_live
:
427 raise KeyboardInterrupt
428 # we expect the user wants to stop and DO WANT the preceding postprocessors to run;
429 # so returning a intermediate result here instead of KeyboardInterrupt on live
432 def download_and_append_fragments(
433 self
, ctx
, fragments
, info_dict
, *, is_fatal
=(lambda idx
: False),
434 pack_func
=(lambda content
, idx
: content
), finish_func
=None,
435 tpe
=None, interrupt_trigger
=(True, )):
437 if not self
.params
.get('skip_unavailable_fragments', True):
438 is_fatal
= lambda _
: True
440 def download_fragment(fragment
, ctx
):
441 if not interrupt_trigger
[0]:
444 frag_index
= ctx
['fragment_index'] = fragment
['frag_index']
445 ctx
['last_error'] = None
446 headers
= HTTPHeaderDict(info_dict
.get('http_headers'))
447 byte_range
= fragment
.get('byte_range')
449 headers
['Range'] = 'bytes=%d-%d' % (byte_range
['start'], byte_range
['end'] - 1)
451 # Never skip the first fragment
452 fatal
= is_fatal(fragment
.get('index') or (frag_index
- 1))
454 def error_callback(err
, count
, retries
):
455 if fatal
and count
> retries
:
456 ctx
['dest_stream'].close()
457 self
.report_retry(err
, count
, retries
, frag_index
, fatal
)
458 ctx
['last_error'] = err
460 for retry
in RetryManager(self
.params
.get('fragment_retries'), error_callback
):
462 ctx
['fragment_count'] = fragment
.get('fragment_count')
463 if not self
._download
_fragment
(
464 ctx
, fragment
['url'], info_dict
, headers
, info_dict
.get('request_data')):
466 except (HTTPError
, IncompleteRead
) as err
:
469 except DownloadError
: # has own retry settings
473 def append_fragment(frag_content
, frag_index
, ctx
):
475 self
._append
_fragment
(ctx
, pack_func(frag_content
, frag_index
))
476 elif not is_fatal(frag_index
- 1):
477 self
.report_skip_fragment(frag_index
, 'fragment not found')
479 ctx
['dest_stream'].close()
480 self
.report_error(f
'fragment {frag_index} not found, unable to continue')
484 decrypt_fragment
= self
.decrypter(info_dict
)
486 max_workers
= math
.ceil(
487 self
.params
.get('concurrent_fragment_downloads', 1) / ctx
.get('max_progress', 1))
489 def _download_fragment(fragment
):
490 ctx_copy
= ctx
.copy()
491 download_fragment(fragment
, ctx_copy
)
492 return fragment
, fragment
['frag_index'], ctx_copy
.get('fragment_filename_sanitized')
494 with tpe
or concurrent
.futures
.ThreadPoolExecutor(max_workers
) as pool
:
496 for fragment
, frag_index
, frag_filename
in pool
.map(_download_fragment
, fragments
):
498 'fragment_filename_sanitized': frag_filename
,
499 'fragment_index': frag_index
,
501 if not append_fragment(decrypt_fragment(fragment
, self
._read
_fragment
(ctx
)), frag_index
, ctx
):
503 except KeyboardInterrupt:
504 self
._finish
_multiline
_status
()
506 'Interrupted by user. Waiting for all threads to shutdown...', is_error
=False, tb
=False)
507 pool
.shutdown(wait
=False)
510 for fragment
in fragments
:
511 if not interrupt_trigger
[0]:
514 download_fragment(fragment
, ctx
)
515 result
= append_fragment(
516 decrypt_fragment(fragment
, self
._read
_fragment
(ctx
)), fragment
['frag_index'], ctx
)
517 except KeyboardInterrupt:
518 if info_dict
.get('is_live'):
524 if finish_func
is not None:
525 ctx
['dest_stream'].write(finish_func())
526 ctx
['dest_stream'].flush()
527 return self
._finish
_frag
_download
(ctx
, info_dict
)