Release 2024.12.13
[yt-dlp.git] / yt_dlp / downloader / http.py
blob9c6dd8b799119b8c09194604ca77142ba6cab414
1 import os
2 import random
3 import time
5 from .common import FileDownloader
6 from ..networking import Request
7 from ..networking.exceptions import (
8 CertificateVerifyError,
9 HTTPError,
10 TransportError,
12 from ..utils import (
13 ContentTooShortError,
14 RetryManager,
15 ThrottledDownload,
16 XAttrMetadataError,
17 XAttrUnavailableError,
18 int_or_none,
19 parse_http_range,
20 try_call,
21 write_xattr,
23 from ..utils.networking import HTTPHeaderDict
26 class HttpFD(FileDownloader):
27 def real_download(self, filename, info_dict):
28 url = info_dict['url']
29 request_data = info_dict.get('request_data', None)
31 class DownloadContext(dict):
32 __getattr__ = dict.get
33 __setattr__ = dict.__setitem__
34 __delattr__ = dict.__delitem__
36 ctx = DownloadContext()
37 ctx.filename = filename
38 ctx.tmpfilename = self.temp_name(filename)
39 ctx.stream = None
41 # Disable compression
42 headers = HTTPHeaderDict({'Accept-Encoding': 'identity'}, info_dict.get('http_headers'))
44 is_test = self.params.get('test', False)
45 chunk_size = self._TEST_FILE_SIZE if is_test else (
46 self.params.get('http_chunk_size')
47 or info_dict.get('downloader_options', {}).get('http_chunk_size')
48 or 0)
50 ctx.open_mode = 'wb'
51 ctx.resume_len = 0
52 ctx.block_size = self.params.get('buffersize', 1024)
53 ctx.start_time = time.time()
55 # parse given Range
56 req_start, req_end, _ = parse_http_range(headers.get('Range'))
58 if self.params.get('continuedl', True):
59 # Establish possible resume length
60 if os.path.isfile(ctx.tmpfilename):
61 ctx.resume_len = os.path.getsize(ctx.tmpfilename)
63 ctx.is_resume = ctx.resume_len > 0
65 class SucceedDownload(Exception):
66 pass
68 class RetryDownload(Exception):
69 def __init__(self, source_error):
70 self.source_error = source_error
72 class NextFragment(Exception):
73 pass
75 def establish_connection():
76 ctx.chunk_size = (random.randint(int(chunk_size * 0.95), chunk_size)
77 if not is_test and chunk_size else chunk_size)
78 if ctx.resume_len > 0:
79 range_start = ctx.resume_len
80 if req_start is not None:
81 # offset the beginning of Range to be within request
82 range_start += req_start
83 if ctx.is_resume:
84 self.report_resuming_byte(ctx.resume_len)
85 ctx.open_mode = 'ab'
86 elif req_start is not None:
87 range_start = req_start
88 elif ctx.chunk_size > 0:
89 range_start = 0
90 else:
91 range_start = None
92 ctx.is_resume = False
94 if ctx.chunk_size:
95 chunk_aware_end = range_start + ctx.chunk_size - 1
96 # we're not allowed to download outside Range
97 range_end = chunk_aware_end if req_end is None else min(chunk_aware_end, req_end)
98 elif req_end is not None:
99 # there's no need for chunked downloads, so download until the end of Range
100 range_end = req_end
101 else:
102 range_end = None
104 if try_call(lambda: range_start > range_end):
105 ctx.resume_len = 0
106 ctx.open_mode = 'wb'
107 raise RetryDownload(Exception(f'Conflicting range. (start={range_start} > end={range_end})'))
109 if try_call(lambda: range_end >= ctx.content_len):
110 range_end = ctx.content_len - 1
112 request = Request(url, request_data, headers)
113 has_range = range_start is not None
114 if has_range:
115 request.headers['Range'] = f'bytes={int(range_start)}-{int_or_none(range_end) or ""}'
116 # Establish connection
117 try:
118 ctx.data = self.ydl.urlopen(request)
119 # When trying to resume, Content-Range HTTP header of response has to be checked
120 # to match the value of requested Range HTTP header. This is due to a webservers
121 # that don't support resuming and serve a whole file with no Content-Range
122 # set in response despite of requested Range (see
123 # https://github.com/ytdl-org/youtube-dl/issues/6057#issuecomment-126129799)
124 if has_range:
125 content_range = ctx.data.headers.get('Content-Range')
126 content_range_start, content_range_end, content_len = parse_http_range(content_range)
127 # Content-Range is present and matches requested Range, resume is possible
128 if range_start == content_range_start and (
129 # Non-chunked download
130 not ctx.chunk_size
131 # Chunked download and requested piece or
132 # its part is promised to be served
133 or content_range_end == range_end
134 or content_len < range_end):
135 ctx.content_len = content_len
136 if content_len or req_end:
137 ctx.data_len = min(content_len or req_end, req_end or content_len) - (req_start or 0)
138 return
139 # Content-Range is either not present or invalid. Assuming remote webserver is
140 # trying to send the whole file, resume is not possible, so wiping the local file
141 # and performing entire redownload
142 elif range_start > 0:
143 self.report_unable_to_resume()
144 ctx.resume_len = 0
145 ctx.open_mode = 'wb'
146 ctx.data_len = ctx.content_len = int_or_none(ctx.data.headers.get('Content-length', None))
147 except HTTPError as err:
148 if err.status == 416:
149 # Unable to resume (requested range not satisfiable)
150 try:
151 # Open the connection again without the range header
152 ctx.data = self.ydl.urlopen(
153 Request(url, request_data, headers))
154 content_length = ctx.data.headers['Content-Length']
155 except HTTPError as err:
156 if err.status < 500 or err.status >= 600:
157 raise
158 else:
159 # Examine the reported length
160 if (content_length is not None
161 and (ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)):
162 # The file had already been fully downloaded.
163 # Explanation to the above condition: in issue #175 it was revealed that
164 # YouTube sometimes adds or removes a few bytes from the end of the file,
165 # changing the file size slightly and causing problems for some users. So
166 # I decided to implement a suggested change and consider the file
167 # completely downloaded if the file size differs less than 100 bytes from
168 # the one in the hard drive.
169 self.report_file_already_downloaded(ctx.filename)
170 self.try_rename(ctx.tmpfilename, ctx.filename)
171 self._hook_progress({
172 'filename': ctx.filename,
173 'status': 'finished',
174 'downloaded_bytes': ctx.resume_len,
175 'total_bytes': ctx.resume_len,
176 }, info_dict)
177 raise SucceedDownload
178 else:
179 # The length does not match, we start the download over
180 self.report_unable_to_resume()
181 ctx.resume_len = 0
182 ctx.open_mode = 'wb'
183 return
184 elif err.status < 500 or err.status >= 600:
185 # Unexpected HTTP error
186 raise
187 raise RetryDownload(err)
188 except CertificateVerifyError:
189 raise
190 except TransportError as err:
191 raise RetryDownload(err)
193 def close_stream():
194 if ctx.stream is not None:
195 if ctx.tmpfilename != '-':
196 ctx.stream.close()
197 ctx.stream = None
199 def download():
200 data_len = ctx.data.headers.get('Content-length')
202 if ctx.data.headers.get('Content-encoding'):
203 # Content-encoding is present, Content-length is not reliable anymore as we are
204 # doing auto decompression. (See: https://github.com/yt-dlp/yt-dlp/pull/6176)
205 data_len = None
207 # Range HTTP header may be ignored/unsupported by a webserver
208 # (e.g. extractor/scivee.py, extractor/bambuser.py).
209 # However, for a test we still would like to download just a piece of a file.
210 # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
211 # block size when downloading a file.
212 if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
213 data_len = self._TEST_FILE_SIZE
215 if data_len is not None:
216 data_len = int(data_len) + ctx.resume_len
217 min_data_len = self.params.get('min_filesize')
218 max_data_len = self.params.get('max_filesize')
219 if min_data_len is not None and data_len < min_data_len:
220 self.to_screen(
221 f'\r[download] File is smaller than min-filesize ({data_len} bytes < {min_data_len} bytes). Aborting.')
222 return False
223 if max_data_len is not None and data_len > max_data_len:
224 self.to_screen(
225 f'\r[download] File is larger than max-filesize ({data_len} bytes > {max_data_len} bytes). Aborting.')
226 return False
228 byte_counter = 0 + ctx.resume_len
229 block_size = ctx.block_size
230 start = time.time()
232 # measure time over whole while-loop, so slow_down() and best_block_size() work together properly
233 now = None # needed for slow_down() in the first loop run
234 before = start # start measuring
236 def retry(e):
237 close_stream()
238 if ctx.tmpfilename == '-':
239 ctx.resume_len = byte_counter
240 else:
241 try:
242 ctx.resume_len = os.path.getsize(ctx.tmpfilename)
243 except FileNotFoundError:
244 ctx.resume_len = 0
245 raise RetryDownload(e)
247 while True:
248 try:
249 # Download and write
250 data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
251 except TransportError as err:
252 retry(err)
254 byte_counter += len(data_block)
256 # exit loop when download is finished
257 if len(data_block) == 0:
258 break
260 # Open destination file just in time
261 if ctx.stream is None:
262 try:
263 ctx.stream, ctx.tmpfilename = self.sanitize_open(
264 ctx.tmpfilename, ctx.open_mode)
265 assert ctx.stream is not None
266 ctx.filename = self.undo_temp_name(ctx.tmpfilename)
267 self.report_destination(ctx.filename)
268 except OSError as err:
269 self.report_error(f'unable to open for writing: {err}')
270 return False
272 if self.params.get('xattr_set_filesize', False) and data_len is not None:
273 try:
274 write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode())
275 except (XAttrUnavailableError, XAttrMetadataError) as err:
276 self.report_error(f'unable to set filesize xattr: {err}')
278 try:
279 ctx.stream.write(data_block)
280 except OSError as err:
281 self.to_stderr('\n')
282 self.report_error(f'unable to write data: {err}')
283 return False
285 # Apply rate limit
286 self.slow_down(start, now, byte_counter - ctx.resume_len)
288 # end measuring of one loop run
289 now = time.time()
290 after = now
292 # Adjust block size
293 if not self.params.get('noresizebuffer', False):
294 block_size = self.best_block_size(after - before, len(data_block))
296 before = after
298 # Progress message
299 speed = self.calc_speed(start, now, byte_counter - ctx.resume_len)
300 if ctx.data_len is None:
301 eta = None
302 else:
303 eta = self.calc_eta(start, time.time(), ctx.data_len - ctx.resume_len, byte_counter - ctx.resume_len)
305 self._hook_progress({
306 'status': 'downloading',
307 'downloaded_bytes': byte_counter,
308 'total_bytes': ctx.data_len,
309 'tmpfilename': ctx.tmpfilename,
310 'filename': ctx.filename,
311 'eta': eta,
312 'speed': speed,
313 'elapsed': now - ctx.start_time,
314 'ctx_id': info_dict.get('ctx_id'),
315 }, info_dict)
317 if data_len is not None and byte_counter == data_len:
318 break
320 if speed and speed < (self.params.get('throttledratelimit') or 0):
321 # The speed must stay below the limit for 3 seconds
322 # This prevents raising error when the speed temporarily goes down
323 if ctx.throttle_start is None:
324 ctx.throttle_start = now
325 elif now - ctx.throttle_start > 3:
326 if ctx.stream is not None and ctx.tmpfilename != '-':
327 ctx.stream.close()
328 raise ThrottledDownload
329 elif speed:
330 ctx.throttle_start = None
332 if ctx.stream is None:
333 self.to_stderr('\n')
334 self.report_error('Did not get any data blocks')
335 return False
337 if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len:
338 ctx.resume_len = byte_counter
339 raise NextFragment
341 if ctx.tmpfilename != '-':
342 ctx.stream.close()
344 if data_len is not None and byte_counter != data_len:
345 err = ContentTooShortError(byte_counter, int(data_len))
346 retry(err)
348 self.try_rename(ctx.tmpfilename, ctx.filename)
350 # Update file modification time
351 if self.params.get('updatetime', True):
352 info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.headers.get('last-modified', None))
354 self._hook_progress({
355 'downloaded_bytes': byte_counter,
356 'total_bytes': byte_counter,
357 'filename': ctx.filename,
358 'status': 'finished',
359 'elapsed': time.time() - ctx.start_time,
360 'ctx_id': info_dict.get('ctx_id'),
361 }, info_dict)
363 return True
365 for retry in RetryManager(self.params.get('retries'), self.report_retry):
366 try:
367 establish_connection()
368 return download()
369 except RetryDownload as err:
370 retry.error = err.source_error
371 continue
372 except NextFragment:
373 retry.error = None
374 retry.attempt -= 1
375 continue
376 except SucceedDownload:
377 return True
378 except: # noqa: E722
379 close_stream()
380 raise
381 return False