Fix the tag.
[python/dscho.git] / Lib / urllib2.py
blobf87d364f59a2dadf287891cb7a8936893b32a5c0
1 """An extensible library for opening URLs using a variety of protocols
3 The simplest way to use this module is to call the urlopen function,
4 which accepts a string containing a URL or a Request object (described
5 below). It opens the URL and returns the results as file-like
6 object; the returned object has some extra methods described below.
8 The OpenerDirector manages a collection of Handler objects that do
9 all the actual work. Each Handler implements a particular protocol or
10 option. The OpenerDirector is a composite object that invokes the
11 Handlers needed to open the requested URL. For example, the
12 HTTPHandler performs HTTP GET and POST requests and deals with
13 non-error returns. The HTTPRedirectHandler automatically deals with
14 HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
15 deals with digest authentication.
17 urlopen(url, data=None) -- Basic usage is the same as original
18 urllib. pass the url and optionally data to post to an HTTP URL, and
19 get a file-like object back. One difference is that you can also pass
20 a Request instance instead of URL. Raises a URLError (subclass of
21 IOError); for HTTP errors, raises an HTTPError, which can also be
22 treated as a valid response.
24 build_opener -- Function that creates a new OpenerDirector instance.
25 Will install the default handlers. Accepts one or more Handlers as
26 arguments, either instances or Handler classes that it will
27 instantiate. If one of the argument is a subclass of the default
28 handler, the argument will be installed instead of the default.
30 install_opener -- Installs a new opener as the default opener.
32 objects of interest:
33 OpenerDirector --
35 Request -- An object that encapsulates the state of a request. The
36 state can be as simple as the URL. It can also include extra HTTP
37 headers, e.g. a User-Agent.
39 BaseHandler --
41 exceptions:
42 URLError -- A subclass of IOError, individual protocols have their own
43 specific subclass.
45 HTTPError -- Also a valid HTTP response, so you can treat an HTTP error
46 as an exceptional event or valid response.
48 internals:
49 BaseHandler and parent
50 _call_chain conventions
52 Example usage:
54 import urllib2
56 # set up authentication info
57 authinfo = urllib2.HTTPBasicAuthHandler()
58 authinfo.add_password(realm='PDQ Application',
59 uri='https://mahler:8092/site-updates.py',
60 user='klem',
61 passwd='geheim$parole')
63 proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"})
65 # build a new opener that adds authentication and caching FTP handlers
66 opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler)
68 # install it
69 urllib2.install_opener(opener)
71 f = urllib2.urlopen('http://www.python.org/')
74 """
76 # XXX issues:
77 # If an authentication error handler that tries to perform
78 # authentication for some reason but fails, how should the error be
79 # signalled? The client needs to know the HTTP error code. But if
80 # the handler knows that the problem was, e.g., that it didn't know
81 # that hash algo that requested in the challenge, it would be good to
82 # pass that information along to the client, too.
83 # ftp errors aren't handled cleanly
84 # check digest against correct (i.e. non-apache) implementation
86 # Possible extensions:
87 # complex proxies XXX not sure what exactly was meant by this
88 # abstract factory for opener
90 import base64
91 import hashlib
92 import httplib
93 import io
94 import mimetools
95 import os
96 import posixpath
97 import random
98 import re
99 import socket
100 import sys
101 import time
102 import urlparse
103 import bisect
105 from io import StringIO
107 from urllib import (unwrap, unquote, splittype, splithost, quote,
108 addinfourl, splitport, splitquery,
109 splitattr, ftpwrapper, noheaders, splituser, splitpasswd, splitvalue)
111 # support for FileHandler, proxies via environment variables
112 from urllib import localhost, url2pathname, getproxies
114 # used in User-Agent header sent
115 __version__ = sys.version[:3]
117 _opener = None
118 def urlopen(url, data=None, timeout=None):
119 global _opener
120 if _opener is None:
121 _opener = build_opener()
122 return _opener.open(url, data, timeout)
124 def install_opener(opener):
125 global _opener
126 _opener = opener
128 # do these error classes make sense?
129 # make sure all of the IOError stuff is overridden. we just want to be
130 # subtypes.
132 class URLError(IOError):
133 # URLError is a sub-type of IOError, but it doesn't share any of
134 # the implementation. need to override __init__ and __str__.
135 # It sets self.args for compatibility with other EnvironmentError
136 # subclasses, but args doesn't have the typical format with errno in
137 # slot 0 and strerror in slot 1. This may be better than nothing.
138 def __init__(self, reason):
139 self.args = reason,
140 self.reason = reason
142 def __str__(self):
143 return '<urlopen error %s>' % self.reason
145 class HTTPError(URLError, addinfourl):
146 """Raised when HTTP error occurs, but also acts like non-error return"""
147 __super_init = addinfourl.__init__
149 def __init__(self, url, code, msg, hdrs, fp):
150 self.code = code
151 self.msg = msg
152 self.hdrs = hdrs
153 self.fp = fp
154 self.filename = url
155 # The addinfourl classes depend on fp being a valid file
156 # object. In some cases, the HTTPError may not have a valid
157 # file object. If this happens, the simplest workaround is to
158 # not initialize the base classes.
159 if fp is not None:
160 self.__super_init(fp, hdrs, url, code)
162 def __str__(self):
163 return 'HTTP Error %s: %s' % (self.code, self.msg)
165 # copied from cookielib.py
166 _cut_port_re = re.compile(r":\d+$")
167 def request_host(request):
168 """Return request-host, as defined by RFC 2965.
170 Variation from RFC: returned value is lowercased, for convenient
171 comparison.
174 url = request.get_full_url()
175 host = urlparse.urlparse(url)[1]
176 if host == "":
177 host = request.get_header("Host", "")
179 # remove port, if present
180 host = _cut_port_re.sub("", host, 1)
181 return host.lower()
183 class Request:
185 def __init__(self, url, data=None, headers={},
186 origin_req_host=None, unverifiable=False):
187 # unwrap('<URL:type://host/path>') --> 'type://host/path'
188 self.__original = unwrap(url)
189 self.type = None
190 # self.__r_type is what's left after doing the splittype
191 self.host = None
192 self.port = None
193 self.data = data
194 self.headers = {}
195 for key, value in headers.items():
196 self.add_header(key, value)
197 self.unredirected_hdrs = {}
198 if origin_req_host is None:
199 origin_req_host = request_host(self)
200 self.origin_req_host = origin_req_host
201 self.unverifiable = unverifiable
203 def __getattr__(self, attr):
204 # XXX this is a fallback mechanism to guard against these
205 # methods getting called in a non-standard order. this may be
206 # too complicated and/or unnecessary.
207 # XXX should the __r_XXX attributes be public?
208 if attr[:12] == '_Request__r_':
209 name = attr[12:]
210 if hasattr(Request, 'get_' + name):
211 getattr(self, 'get_' + name)()
212 return getattr(self, attr)
213 raise AttributeError(attr)
215 def get_method(self):
216 if self.has_data():
217 return "POST"
218 else:
219 return "GET"
221 # XXX these helper methods are lame
223 def add_data(self, data):
224 self.data = data
226 def has_data(self):
227 return self.data is not None
229 def get_data(self):
230 return self.data
232 def get_full_url(self):
233 return self.__original
235 def get_type(self):
236 if self.type is None:
237 self.type, self.__r_type = splittype(self.__original)
238 if self.type is None:
239 raise ValueError("unknown url type: %s" % self.__original)
240 return self.type
242 def get_host(self):
243 if self.host is None:
244 self.host, self.__r_host = splithost(self.__r_type)
245 if self.host:
246 self.host = unquote(self.host)
247 return self.host
249 def get_selector(self):
250 return self.__r_host
252 def set_proxy(self, host, type):
253 self.host, self.type = host, type
254 self.__r_host = self.__original
256 def get_origin_req_host(self):
257 return self.origin_req_host
259 def is_unverifiable(self):
260 return self.unverifiable
262 def add_header(self, key, val):
263 # useful for something like authentication
264 self.headers[key.capitalize()] = val
266 def add_unredirected_header(self, key, val):
267 # will not be added to a redirected request
268 self.unredirected_hdrs[key.capitalize()] = val
270 def has_header(self, header_name):
271 return (header_name in self.headers or
272 header_name in self.unredirected_hdrs)
274 def get_header(self, header_name, default=None):
275 return self.headers.get(
276 header_name,
277 self.unredirected_hdrs.get(header_name, default))
279 def header_items(self):
280 hdrs = self.unredirected_hdrs.copy()
281 hdrs.update(self.headers)
282 return list(hdrs.items())
284 class OpenerDirector:
285 def __init__(self):
286 client_version = "Python-urllib/%s" % __version__
287 self.addheaders = [('User-agent', client_version)]
288 # manage the individual handlers
289 self.handlers = []
290 self.handle_open = {}
291 self.handle_error = {}
292 self.process_response = {}
293 self.process_request = {}
295 def add_handler(self, handler):
296 if not hasattr(handler, "add_parent"):
297 raise TypeError("expected BaseHandler instance, got %r" %
298 type(handler))
300 added = False
301 for meth in dir(handler):
302 if meth in ["redirect_request", "do_open", "proxy_open"]:
303 # oops, coincidental match
304 continue
306 i = meth.find("_")
307 protocol = meth[:i]
308 condition = meth[i+1:]
310 if condition.startswith("error"):
311 j = condition.find("_") + i + 1
312 kind = meth[j+1:]
313 try:
314 kind = int(kind)
315 except ValueError:
316 pass
317 lookup = self.handle_error.get(protocol, {})
318 self.handle_error[protocol] = lookup
319 elif condition == "open":
320 kind = protocol
321 lookup = self.handle_open
322 elif condition == "response":
323 kind = protocol
324 lookup = self.process_response
325 elif condition == "request":
326 kind = protocol
327 lookup = self.process_request
328 else:
329 continue
331 handlers = lookup.setdefault(kind, [])
332 if handlers:
333 bisect.insort(handlers, handler)
334 else:
335 handlers.append(handler)
336 added = True
338 if added:
339 # the handlers must work in an specific order, the order
340 # is specified in a Handler attribute
341 bisect.insort(self.handlers, handler)
342 handler.add_parent(self)
344 def close(self):
345 # Only exists for backwards compatibility.
346 pass
348 def _call_chain(self, chain, kind, meth_name, *args):
349 # Handlers raise an exception if no one else should try to handle
350 # the request, or return None if they can't but another handler
351 # could. Otherwise, they return the response.
352 handlers = chain.get(kind, ())
353 for handler in handlers:
354 func = getattr(handler, meth_name)
356 result = func(*args)
357 if result is not None:
358 return result
360 def open(self, fullurl, data=None, timeout=None):
361 # accept a URL or a Request object
362 if isinstance(fullurl, str):
363 req = Request(fullurl, data)
364 else:
365 req = fullurl
366 if data is not None:
367 req.add_data(data)
369 req.timeout = timeout
370 protocol = req.get_type()
372 # pre-process request
373 meth_name = protocol+"_request"
374 for processor in self.process_request.get(protocol, []):
375 meth = getattr(processor, meth_name)
376 req = meth(req)
378 response = self._open(req, data)
380 # post-process response
381 meth_name = protocol+"_response"
382 for processor in self.process_response.get(protocol, []):
383 meth = getattr(processor, meth_name)
384 response = meth(req, response)
386 return response
388 def _open(self, req, data=None):
389 result = self._call_chain(self.handle_open, 'default',
390 'default_open', req)
391 if result:
392 return result
394 protocol = req.get_type()
395 result = self._call_chain(self.handle_open, protocol, protocol +
396 '_open', req)
397 if result:
398 return result
400 return self._call_chain(self.handle_open, 'unknown',
401 'unknown_open', req)
403 def error(self, proto, *args):
404 if proto in ('http', 'https'):
405 # XXX http[s] protocols are special-cased
406 dict = self.handle_error['http'] # https is not different than http
407 proto = args[2] # YUCK!
408 meth_name = 'http_error_%s' % proto
409 http_err = 1
410 orig_args = args
411 else:
412 dict = self.handle_error
413 meth_name = proto + '_error'
414 http_err = 0
415 args = (dict, proto, meth_name) + args
416 result = self._call_chain(*args)
417 if result:
418 return result
420 if http_err:
421 args = (dict, 'default', 'http_error_default') + orig_args
422 return self._call_chain(*args)
424 # XXX probably also want an abstract factory that knows when it makes
425 # sense to skip a superclass in favor of a subclass and when it might
426 # make sense to include both
428 def build_opener(*handlers):
429 """Create an opener object from a list of handlers.
431 The opener will use several default handlers, including support
432 for HTTP and FTP.
434 If any of the handlers passed as arguments are subclasses of the
435 default handlers, the default handlers will not be used.
437 def isclass(obj):
438 return isinstance(obj, type) or hasattr(obj, "__bases__")
440 opener = OpenerDirector()
441 default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
442 HTTPDefaultErrorHandler, HTTPRedirectHandler,
443 FTPHandler, FileHandler, HTTPErrorProcessor]
444 if hasattr(httplib, 'HTTPS'):
445 default_classes.append(HTTPSHandler)
446 skip = set()
447 for klass in default_classes:
448 for check in handlers:
449 if isclass(check):
450 if issubclass(check, klass):
451 skip.add(klass)
452 elif isinstance(check, klass):
453 skip.add(klass)
454 for klass in skip:
455 default_classes.remove(klass)
457 for klass in default_classes:
458 opener.add_handler(klass())
460 for h in handlers:
461 if isclass(h):
462 h = h()
463 opener.add_handler(h)
464 return opener
466 class BaseHandler:
467 handler_order = 500
469 def add_parent(self, parent):
470 self.parent = parent
472 def close(self):
473 # Only exists for backwards compatibility
474 pass
476 def __lt__(self, other):
477 if not hasattr(other, "handler_order"):
478 # Try to preserve the old behavior of having custom classes
479 # inserted after default ones (works only for custom user
480 # classes which are not aware of handler_order).
481 return True
482 return self.handler_order < other.handler_order
485 class HTTPErrorProcessor(BaseHandler):
486 """Process HTTP error responses."""
487 handler_order = 1000 # after all other processing
489 def http_response(self, request, response):
490 code, msg, hdrs = response.code, response.msg, response.info()
492 # According to RFC 2616, "2xx" code indicates that the client's
493 # request was successfully received, understood, and accepted.
494 if not (200 <= code < 300):
495 response = self.parent.error(
496 'http', request, response, code, msg, hdrs)
498 return response
500 https_response = http_response
502 class HTTPDefaultErrorHandler(BaseHandler):
503 def http_error_default(self, req, fp, code, msg, hdrs):
504 raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
506 class HTTPRedirectHandler(BaseHandler):
507 # maximum number of redirections to any single URL
508 # this is needed because of the state that cookies introduce
509 max_repeats = 4
510 # maximum total number of redirections (regardless of URL) before
511 # assuming we're in a loop
512 max_redirections = 10
514 def redirect_request(self, req, fp, code, msg, headers, newurl):
515 """Return a Request or None in response to a redirect.
517 This is called by the http_error_30x methods when a
518 redirection response is received. If a redirection should
519 take place, return a new Request to allow http_error_30x to
520 perform the redirect. Otherwise, raise HTTPError if no-one
521 else should try to handle this url. Return None if you can't
522 but another Handler might.
524 m = req.get_method()
525 if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
526 or code in (301, 302, 303) and m == "POST"):
527 # Strictly (according to RFC 2616), 301 or 302 in response
528 # to a POST MUST NOT cause a redirection without confirmation
529 # from the user (of urllib2, in this case). In practice,
530 # essentially all clients do redirect in this case, so we
531 # do the same.
532 # be conciliant with URIs containing a space
533 newurl = newurl.replace(' ', '%20')
534 newheaders = dict((k,v) for k,v in req.headers.items()
535 if k.lower() not in ("content-length", "content-type")
537 return Request(newurl,
538 headers=newheaders,
539 origin_req_host=req.get_origin_req_host(),
540 unverifiable=True)
541 else:
542 raise HTTPError(req.get_full_url(), code, msg, headers, fp)
544 # Implementation note: To avoid the server sending us into an
545 # infinite loop, the request object needs to track what URLs we
546 # have already seen. Do this by adding a handler-specific
547 # attribute to the Request object.
548 def http_error_302(self, req, fp, code, msg, headers):
549 # Some servers (incorrectly) return multiple Location headers
550 # (so probably same goes for URI). Use first header.
551 if 'location' in headers:
552 newurl = headers.getheaders('location')[0]
553 elif 'uri' in headers:
554 newurl = headers.getheaders('uri')[0]
555 else:
556 return
557 newurl = urlparse.urljoin(req.get_full_url(), newurl)
559 # XXX Probably want to forget about the state of the current
560 # request, although that might interact poorly with other
561 # handlers that also use handler-specific request attributes
562 new = self.redirect_request(req, fp, code, msg, headers, newurl)
563 if new is None:
564 return
566 # loop detection
567 # .redirect_dict has a key url if url was previously visited.
568 if hasattr(req, 'redirect_dict'):
569 visited = new.redirect_dict = req.redirect_dict
570 if (visited.get(newurl, 0) >= self.max_repeats or
571 len(visited) >= self.max_redirections):
572 raise HTTPError(req.get_full_url(), code,
573 self.inf_msg + msg, headers, fp)
574 else:
575 visited = new.redirect_dict = req.redirect_dict = {}
576 visited[newurl] = visited.get(newurl, 0) + 1
578 # Don't close the fp until we are sure that we won't use it
579 # with HTTPError.
580 fp.read()
581 fp.close()
583 return self.parent.open(new)
585 http_error_301 = http_error_303 = http_error_307 = http_error_302
587 inf_msg = "The HTTP server returned a redirect error that would " \
588 "lead to an infinite loop.\n" \
589 "The last 30x error message was:\n"
592 def _parse_proxy(proxy):
593 """Return (scheme, user, password, host/port) given a URL or an authority.
595 If a URL is supplied, it must have an authority (host:port) component.
596 According to RFC 3986, having an authority component means the URL must
597 have two slashes after the scheme:
599 >>> _parse_proxy('file:/ftp.example.com/')
600 Traceback (most recent call last):
601 ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
603 The first three items of the returned tuple may be None.
605 Examples of authority parsing:
607 >>> _parse_proxy('proxy.example.com')
608 (None, None, None, 'proxy.example.com')
609 >>> _parse_proxy('proxy.example.com:3128')
610 (None, None, None, 'proxy.example.com:3128')
612 The authority component may optionally include userinfo (assumed to be
613 username:password):
615 >>> _parse_proxy('joe:password@proxy.example.com')
616 (None, 'joe', 'password', 'proxy.example.com')
617 >>> _parse_proxy('joe:password@proxy.example.com:3128')
618 (None, 'joe', 'password', 'proxy.example.com:3128')
620 Same examples, but with URLs instead:
622 >>> _parse_proxy('http://proxy.example.com/')
623 ('http', None, None, 'proxy.example.com')
624 >>> _parse_proxy('http://proxy.example.com:3128/')
625 ('http', None, None, 'proxy.example.com:3128')
626 >>> _parse_proxy('http://joe:password@proxy.example.com/')
627 ('http', 'joe', 'password', 'proxy.example.com')
628 >>> _parse_proxy('http://joe:password@proxy.example.com:3128')
629 ('http', 'joe', 'password', 'proxy.example.com:3128')
631 Everything after the authority is ignored:
633 >>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
634 ('ftp', 'joe', 'password', 'proxy.example.com')
636 Test for no trailing '/' case:
638 >>> _parse_proxy('http://joe:password@proxy.example.com')
639 ('http', 'joe', 'password', 'proxy.example.com')
642 scheme, r_scheme = splittype(proxy)
643 if not r_scheme.startswith("/"):
644 # authority
645 scheme = None
646 authority = proxy
647 else:
648 # URL
649 if not r_scheme.startswith("//"):
650 raise ValueError("proxy URL with no authority: %r" % proxy)
651 # We have an authority, so for RFC 3986-compliant URLs (by ss 3.
652 # and 3.3.), path is empty or starts with '/'
653 end = r_scheme.find("/", 2)
654 if end == -1:
655 end = None
656 authority = r_scheme[2:end]
657 userinfo, hostport = splituser(authority)
658 if userinfo is not None:
659 user, password = splitpasswd(userinfo)
660 else:
661 user = password = None
662 return scheme, user, password, hostport
664 class ProxyHandler(BaseHandler):
665 # Proxies must be in front
666 handler_order = 100
668 def __init__(self, proxies=None):
669 if proxies is None:
670 proxies = getproxies()
671 assert hasattr(proxies, 'keys'), "proxies must be a mapping"
672 self.proxies = proxies
673 for type, url in proxies.items():
674 setattr(self, '%s_open' % type,
675 lambda r, proxy=url, type=type, meth=self.proxy_open: \
676 meth(r, proxy, type))
678 def proxy_open(self, req, proxy, type):
679 orig_type = req.get_type()
680 proxy_type, user, password, hostport = _parse_proxy(proxy)
681 if proxy_type is None:
682 proxy_type = orig_type
683 if user and password:
684 user_pass = '%s:%s' % (unquote(user), unquote(password))
685 creds = base64.b64encode(user_pass.encode()).decode("ascii")
686 req.add_header('Proxy-authorization', 'Basic ' + creds)
687 hostport = unquote(hostport)
688 req.set_proxy(hostport, proxy_type)
689 if orig_type == proxy_type:
690 # let other handlers take care of it
691 return None
692 else:
693 # need to start over, because the other handlers don't
694 # grok the proxy's URL type
695 # e.g. if we have a constructor arg proxies like so:
696 # {'http': 'ftp://proxy.example.com'}, we may end up turning
697 # a request for http://acme.example.com/a into one for
698 # ftp://proxy.example.com/a
699 return self.parent.open(req)
701 class HTTPPasswordMgr:
703 def __init__(self):
704 self.passwd = {}
706 def add_password(self, realm, uri, user, passwd):
707 # uri could be a single URI or a sequence
708 if isinstance(uri, str):
709 uri = [uri]
710 if not realm in self.passwd:
711 self.passwd[realm] = {}
712 for default_port in True, False:
713 reduced_uri = tuple(
714 [self.reduce_uri(u, default_port) for u in uri])
715 self.passwd[realm][reduced_uri] = (user, passwd)
717 def find_user_password(self, realm, authuri):
718 domains = self.passwd.get(realm, {})
719 for default_port in True, False:
720 reduced_authuri = self.reduce_uri(authuri, default_port)
721 for uris, authinfo in domains.items():
722 for uri in uris:
723 if self.is_suburi(uri, reduced_authuri):
724 return authinfo
725 return None, None
727 def reduce_uri(self, uri, default_port=True):
728 """Accept authority or URI and extract only the authority and path."""
729 # note HTTP URLs do not have a userinfo component
730 parts = urlparse.urlsplit(uri)
731 if parts[1]:
732 # URI
733 scheme = parts[0]
734 authority = parts[1]
735 path = parts[2] or '/'
736 else:
737 # host or host:port
738 scheme = None
739 authority = uri
740 path = '/'
741 host, port = splitport(authority)
742 if default_port and port is None and scheme is not None:
743 dport = {"http": 80,
744 "https": 443,
745 }.get(scheme)
746 if dport is not None:
747 authority = "%s:%d" % (host, dport)
748 return authority, path
750 def is_suburi(self, base, test):
751 """Check if test is below base in a URI tree
753 Both args must be URIs in reduced form.
755 if base == test:
756 return True
757 if base[0] != test[0]:
758 return False
759 common = posixpath.commonprefix((base[1], test[1]))
760 if len(common) == len(base[1]):
761 return True
762 return False
765 class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
767 def find_user_password(self, realm, authuri):
768 user, password = HTTPPasswordMgr.find_user_password(self, realm,
769 authuri)
770 if user is not None:
771 return user, password
772 return HTTPPasswordMgr.find_user_password(self, None, authuri)
775 class AbstractBasicAuthHandler:
777 # XXX this allows for multiple auth-schemes, but will stupidly pick
778 # the last one with a realm specified.
780 # allow for double- and single-quoted realm values
781 # (single quotes are a violation of the RFC, but appear in the wild)
782 rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
783 'realm=(["\'])(.*?)\\2', re.I)
785 # XXX could pre-emptively send auth info already accepted (RFC 2617,
786 # end of section 2, and section 1.2 immediately after "credentials"
787 # production).
789 def __init__(self, password_mgr=None):
790 if password_mgr is None:
791 password_mgr = HTTPPasswordMgr()
792 self.passwd = password_mgr
793 self.add_password = self.passwd.add_password
795 def http_error_auth_reqed(self, authreq, host, req, headers):
796 # host may be an authority (without userinfo) or a URL with an
797 # authority
798 # XXX could be multiple headers
799 authreq = headers.get(authreq, None)
800 if authreq:
801 mo = AbstractBasicAuthHandler.rx.search(authreq)
802 if mo:
803 scheme, quote, realm = mo.groups()
804 if scheme.lower() == 'basic':
805 return self.retry_http_basic_auth(host, req, realm)
807 def retry_http_basic_auth(self, host, req, realm):
808 user, pw = self.passwd.find_user_password(realm, host)
809 if pw is not None:
810 raw = "%s:%s" % (user, pw)
811 auth = "Basic " + base64.b64encode(raw.encode()).decode("ascii")
812 if req.headers.get(self.auth_header, None) == auth:
813 return None
814 req.add_header(self.auth_header, auth)
815 return self.parent.open(req)
816 else:
817 return None
820 class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
822 auth_header = 'Authorization'
824 def http_error_401(self, req, fp, code, msg, headers):
825 url = req.get_full_url()
826 return self.http_error_auth_reqed('www-authenticate',
827 url, req, headers)
830 class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
832 auth_header = 'Proxy-authorization'
834 def http_error_407(self, req, fp, code, msg, headers):
835 # http_error_auth_reqed requires that there is no userinfo component in
836 # authority. Assume there isn't one, since urllib2 does not (and
837 # should not, RFC 3986 s. 3.2.1) support requests for URLs containing
838 # userinfo.
839 authority = req.get_host()
840 return self.http_error_auth_reqed('proxy-authenticate',
841 authority, req, headers)
844 def randombytes(n):
845 """Return n random bytes."""
846 return os.urandom(n)
848 class AbstractDigestAuthHandler:
849 # Digest authentication is specified in RFC 2617.
851 # XXX The client does not inspect the Authentication-Info header
852 # in a successful response.
854 # XXX It should be possible to test this implementation against
855 # a mock server that just generates a static set of challenges.
857 # XXX qop="auth-int" supports is shaky
859 def __init__(self, passwd=None):
860 if passwd is None:
861 passwd = HTTPPasswordMgr()
862 self.passwd = passwd
863 self.add_password = self.passwd.add_password
864 self.retried = 0
865 self.nonce_count = 0
867 def reset_retry_count(self):
868 self.retried = 0
870 def http_error_auth_reqed(self, auth_header, host, req, headers):
871 authreq = headers.get(auth_header, None)
872 if self.retried > 5:
873 # Don't fail endlessly - if we failed once, we'll probably
874 # fail a second time. Hm. Unless the Password Manager is
875 # prompting for the information. Crap. This isn't great
876 # but it's better than the current 'repeat until recursion
877 # depth exceeded' approach <wink>
878 raise HTTPError(req.get_full_url(), 401, "digest auth failed",
879 headers, None)
880 else:
881 self.retried += 1
882 if authreq:
883 scheme = authreq.split()[0]
884 if scheme.lower() == 'digest':
885 return self.retry_http_digest_auth(req, authreq)
887 def retry_http_digest_auth(self, req, auth):
888 token, challenge = auth.split(' ', 1)
889 chal = parse_keqv_list(parse_http_list(challenge))
890 auth = self.get_authorization(req, chal)
891 if auth:
892 auth_val = 'Digest %s' % auth
893 if req.headers.get(self.auth_header, None) == auth_val:
894 return None
895 req.add_unredirected_header(self.auth_header, auth_val)
896 resp = self.parent.open(req)
897 return resp
899 def get_cnonce(self, nonce):
900 # The cnonce-value is an opaque
901 # quoted string value provided by the client and used by both client
902 # and server to avoid chosen plaintext attacks, to provide mutual
903 # authentication, and to provide some message integrity protection.
904 # This isn't a fabulous effort, but it's probably Good Enough.
905 s = "%s:%s:%s:" % (self.nonce_count, nonce, time.ctime())
906 b = s.encode("ascii") + randombytes(8)
907 dig = hashlib.sha1(b).hexdigest()
908 return dig[:16]
910 def get_authorization(self, req, chal):
911 try:
912 realm = chal['realm']
913 nonce = chal['nonce']
914 qop = chal.get('qop')
915 algorithm = chal.get('algorithm', 'MD5')
916 # mod_digest doesn't send an opaque, even though it isn't
917 # supposed to be optional
918 opaque = chal.get('opaque', None)
919 except KeyError:
920 return None
922 H, KD = self.get_algorithm_impls(algorithm)
923 if H is None:
924 return None
926 user, pw = self.passwd.find_user_password(realm, req.get_full_url())
927 if user is None:
928 return None
930 # XXX not implemented yet
931 if req.has_data():
932 entdig = self.get_entity_digest(req.get_data(), chal)
933 else:
934 entdig = None
936 A1 = "%s:%s:%s" % (user, realm, pw)
937 A2 = "%s:%s" % (req.get_method(),
938 # XXX selector: what about proxies and full urls
939 req.get_selector())
940 if qop == 'auth':
941 self.nonce_count += 1
942 ncvalue = '%08x' % self.nonce_count
943 cnonce = self.get_cnonce(nonce)
944 noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
945 respdig = KD(H(A1), noncebit)
946 elif qop is None:
947 respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
948 else:
949 # XXX handle auth-int.
950 raise URLError("qop '%s' is not supported." % qop)
952 # XXX should the partial digests be encoded too?
954 base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
955 'response="%s"' % (user, realm, nonce, req.get_selector(),
956 respdig)
957 if opaque:
958 base += ', opaque="%s"' % opaque
959 if entdig:
960 base += ', digest="%s"' % entdig
961 base += ', algorithm="%s"' % algorithm
962 if qop:
963 base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
964 return base
966 def get_algorithm_impls(self, algorithm):
967 # algorithm should be case-insensitive according to RFC2617
968 algorithm = algorithm.upper()
969 # lambdas assume digest modules are imported at the top level
970 if algorithm == 'MD5':
971 H = lambda x: hashlib.md5(x.encode("ascii")).hexdigest()
972 elif algorithm == 'SHA':
973 H = lambda x: hashlib.sha1(x.encode("ascii")).hexdigest()
974 # XXX MD5-sess
975 KD = lambda s, d: H("%s:%s" % (s, d))
976 return H, KD
978 def get_entity_digest(self, data, chal):
979 # XXX not implemented yet
980 return None
983 class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
984 """An authentication protocol defined by RFC 2069
986 Digest authentication improves on basic authentication because it
987 does not transmit passwords in the clear.
990 auth_header = 'Authorization'
991 handler_order = 490 # before Basic auth
993 def http_error_401(self, req, fp, code, msg, headers):
994 host = urlparse.urlparse(req.get_full_url())[1]
995 retry = self.http_error_auth_reqed('www-authenticate',
996 host, req, headers)
997 self.reset_retry_count()
998 return retry
1001 class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
1003 auth_header = 'Proxy-Authorization'
1004 handler_order = 490 # before Basic auth
1006 def http_error_407(self, req, fp, code, msg, headers):
1007 host = req.get_host()
1008 retry = self.http_error_auth_reqed('proxy-authenticate',
1009 host, req, headers)
1010 self.reset_retry_count()
1011 return retry
1013 class AbstractHTTPHandler(BaseHandler):
1015 def __init__(self, debuglevel=0):
1016 self._debuglevel = debuglevel
1018 def set_http_debuglevel(self, level):
1019 self._debuglevel = level
1021 def do_request_(self, request):
1022 host = request.get_host()
1023 if not host:
1024 raise URLError('no host given')
1026 if request.has_data(): # POST
1027 data = request.get_data()
1028 if not request.has_header('Content-type'):
1029 request.add_unredirected_header(
1030 'Content-type',
1031 'application/x-www-form-urlencoded')
1032 if not request.has_header('Content-length'):
1033 request.add_unredirected_header(
1034 'Content-length', '%d' % len(data))
1036 scheme, sel = splittype(request.get_selector())
1037 sel_host, sel_path = splithost(sel)
1038 if not request.has_header('Host'):
1039 request.add_unredirected_header('Host', sel_host or host)
1040 for name, value in self.parent.addheaders:
1041 name = name.capitalize()
1042 if not request.has_header(name):
1043 request.add_unredirected_header(name, value)
1045 return request
1047 def do_open(self, http_class, req):
1048 """Return an addinfourl object for the request, using http_class.
1050 http_class must implement the HTTPConnection API from httplib.
1051 The addinfourl return value is a file-like object. It also
1052 has methods and attributes including:
1053 - info(): return a mimetools.Message object for the headers
1054 - geturl(): return the original request URL
1055 - code: HTTP status code
1057 host = req.get_host()
1058 if not host:
1059 raise URLError('no host given')
1061 h = http_class(host, timeout=req.timeout) # will parse host:port
1062 h.set_debuglevel(self._debuglevel)
1064 headers = dict(req.headers)
1065 headers.update(req.unredirected_hdrs)
1066 # We want to make an HTTP/1.1 request, but the addinfourl
1067 # class isn't prepared to deal with a persistent connection.
1068 # It will try to read all remaining data from the socket,
1069 # which will block while the server waits for the next request.
1070 # So make sure the connection gets closed after the (only)
1071 # request.
1072 headers["Connection"] = "close"
1073 headers = dict(
1074 (name.title(), val) for name, val in headers.items())
1075 try:
1076 h.request(req.get_method(), req.get_selector(), req.data, headers)
1077 r = h.getresponse()
1078 except socket.error as err: # XXX what error?
1079 raise URLError(err)
1081 # Pick apart the HTTPResponse object to get the addinfourl
1082 # object initialized properly.
1084 # XXX Should an HTTPResponse object really be passed to
1085 # BufferedReader? If so, we should change httplib to support
1086 # this use directly.
1088 # Add some fake methods to the reader to satisfy BufferedReader.
1089 r.readable = lambda: True
1090 r.writable = r.seekable = lambda: False
1091 r._checkReadable = lambda: True
1092 r._checkWritable = lambda: False
1093 fp = io.BufferedReader(r)
1095 resp = addinfourl(fp, r.msg, req.get_full_url())
1096 resp.code = r.status
1097 resp.msg = r.reason
1098 return resp
1101 class HTTPHandler(AbstractHTTPHandler):
1103 def http_open(self, req):
1104 return self.do_open(httplib.HTTPConnection, req)
1106 http_request = AbstractHTTPHandler.do_request_
1108 if hasattr(httplib, 'HTTPS'):
1109 class HTTPSHandler(AbstractHTTPHandler):
1111 def https_open(self, req):
1112 return self.do_open(httplib.HTTPSConnection, req)
1114 https_request = AbstractHTTPHandler.do_request_
1116 class HTTPCookieProcessor(BaseHandler):
1117 def __init__(self, cookiejar=None):
1118 import cookielib
1119 if cookiejar is None:
1120 cookiejar = cookielib.CookieJar()
1121 self.cookiejar = cookiejar
1123 def http_request(self, request):
1124 self.cookiejar.add_cookie_header(request)
1125 return request
1127 def http_response(self, request, response):
1128 self.cookiejar.extract_cookies(response, request)
1129 return response
1131 https_request = http_request
1132 https_response = http_response
1134 class UnknownHandler(BaseHandler):
1135 def unknown_open(self, req):
1136 type = req.get_type()
1137 raise URLError('unknown url type: %s' % type)
1139 def parse_keqv_list(l):
1140 """Parse list of key=value strings where keys are not duplicated."""
1141 parsed = {}
1142 for elt in l:
1143 k, v = elt.split('=', 1)
1144 if v[0] == '"' and v[-1] == '"':
1145 v = v[1:-1]
1146 parsed[k] = v
1147 return parsed
1149 def parse_http_list(s):
1150 """Parse lists as described by RFC 2068 Section 2.
1152 In particular, parse comma-separated lists where the elements of
1153 the list may include quoted-strings. A quoted-string could
1154 contain a comma. A non-quoted string could have quotes in the
1155 middle. Neither commas nor quotes count if they are escaped.
1156 Only double-quotes count, not single-quotes.
1158 res = []
1159 part = ''
1161 escape = quote = False
1162 for cur in s:
1163 if escape:
1164 part += cur
1165 escape = False
1166 continue
1167 if quote:
1168 if cur == '\\':
1169 escape = True
1170 continue
1171 elif cur == '"':
1172 quote = False
1173 part += cur
1174 continue
1176 if cur == ',':
1177 res.append(part)
1178 part = ''
1179 continue
1181 if cur == '"':
1182 quote = True
1184 part += cur
1186 # append last part
1187 if part:
1188 res.append(part)
1190 return [part.strip() for part in res]
1192 class FileHandler(BaseHandler):
1193 # Use local file or FTP depending on form of URL
1194 def file_open(self, req):
1195 url = req.get_selector()
1196 if url[:2] == '//' and url[2:3] != '/':
1197 req.type = 'ftp'
1198 return self.parent.open(req)
1199 else:
1200 return self.open_local_file(req)
1202 # names for the localhost
1203 names = None
1204 def get_names(self):
1205 if FileHandler.names is None:
1206 try:
1207 FileHandler.names = (socket.gethostbyname('localhost'),
1208 socket.gethostbyname(socket.gethostname()))
1209 except socket.gaierror:
1210 FileHandler.names = (socket.gethostbyname('localhost'),)
1211 return FileHandler.names
1213 # not entirely sure what the rules are here
1214 def open_local_file(self, req):
1215 import email.utils
1216 import mimetypes
1217 host = req.get_host()
1218 file = req.get_selector()
1219 localfile = url2pathname(file)
1220 try:
1221 stats = os.stat(localfile)
1222 size = stats.st_size
1223 modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
1224 mtype = mimetypes.guess_type(file)[0]
1225 headers = mimetools.Message(StringIO(
1226 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
1227 (mtype or 'text/plain', size, modified)))
1228 if host:
1229 host, port = splitport(host)
1230 if not host or \
1231 (not port and _safe_gethostbyname(host) in self.get_names()):
1232 return addinfourl(open(localfile, 'rb'),
1233 headers, 'file:'+file)
1234 except OSError as msg:
1235 # urllib2 users shouldn't expect OSErrors coming from urlopen()
1236 raise URLError(msg)
1237 raise URLError('file not on local host')
1239 def _safe_gethostbyname(host):
1240 try:
1241 return socket.gethostbyname(host)
1242 except socket.gaierror:
1243 return None
1245 class FTPHandler(BaseHandler):
1246 def ftp_open(self, req):
1247 import ftplib
1248 import mimetypes
1249 host = req.get_host()
1250 if not host:
1251 raise URLError('ftp error: no host given')
1252 host, port = splitport(host)
1253 if port is None:
1254 port = ftplib.FTP_PORT
1255 else:
1256 port = int(port)
1258 # username/password handling
1259 user, host = splituser(host)
1260 if user:
1261 user, passwd = splitpasswd(user)
1262 else:
1263 passwd = None
1264 host = unquote(host)
1265 user = unquote(user or '')
1266 passwd = unquote(passwd or '')
1268 try:
1269 host = socket.gethostbyname(host)
1270 except socket.error as msg:
1271 raise URLError(msg)
1272 path, attrs = splitattr(req.get_selector())
1273 dirs = path.split('/')
1274 dirs = list(map(unquote, dirs))
1275 dirs, file = dirs[:-1], dirs[-1]
1276 if dirs and not dirs[0]:
1277 dirs = dirs[1:]
1278 try:
1279 fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
1280 type = file and 'I' or 'D'
1281 for attr in attrs:
1282 attr, value = splitvalue(attr)
1283 if attr.lower() == 'type' and \
1284 value in ('a', 'A', 'i', 'I', 'd', 'D'):
1285 type = value.upper()
1286 fp, retrlen = fw.retrfile(file, type)
1287 headers = ""
1288 mtype = mimetypes.guess_type(req.get_full_url())[0]
1289 if mtype:
1290 headers += "Content-type: %s\n" % mtype
1291 if retrlen is not None and retrlen >= 0:
1292 headers += "Content-length: %d\n" % retrlen
1293 sf = StringIO(headers)
1294 headers = mimetools.Message(sf)
1295 return addinfourl(fp, headers, req.get_full_url())
1296 except ftplib.all_errors as msg:
1297 raise URLError('ftp error: %s' % msg).with_traceback(sys.exc_info()[2])
1299 def connect_ftp(self, user, passwd, host, port, dirs, timeout):
1300 fw = ftpwrapper(user, passwd, host, port, dirs, timeout)
1301 return fw
1303 class CacheFTPHandler(FTPHandler):
1304 # XXX would be nice to have pluggable cache strategies
1305 # XXX this stuff is definitely not thread safe
1306 def __init__(self):
1307 self.cache = {}
1308 self.timeout = {}
1309 self.soonest = 0
1310 self.delay = 60
1311 self.max_conns = 16
1313 def setTimeout(self, t):
1314 self.delay = t
1316 def setMaxConns(self, m):
1317 self.max_conns = m
1319 def connect_ftp(self, user, passwd, host, port, dirs, timeout):
1320 key = user, host, port, '/'.join(dirs), timeout
1321 if key in self.cache:
1322 self.timeout[key] = time.time() + self.delay
1323 else:
1324 self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout)
1325 self.timeout[key] = time.time() + self.delay
1326 self.check_cache()
1327 return self.cache[key]
1329 def check_cache(self):
1330 # first check for old ones
1331 t = time.time()
1332 if self.soonest <= t:
1333 for k, v in list(self.timeout.items()):
1334 if v < t:
1335 self.cache[k].close()
1336 del self.cache[k]
1337 del self.timeout[k]
1338 self.soonest = min(list(self.timeout.values()))
1340 # then check the size
1341 if len(self.cache) == self.max_conns:
1342 for k, v in list(self.timeout.items()):
1343 if v == self.soonest:
1344 del self.cache[k]
1345 del self.timeout[k]
1346 break
1347 self.soonest = min(list(self.timeout.values()))