You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

393 lines
14 KiB

  1. """Parse (absolute and relative) URLs.
  2. urlparse module is based upon the following RFC specifications.
  3. RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
  4. and L. Masinter, January 2005.
  5. RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
  6. and L.Masinter, December 1999.
  7. RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
  8. Berners-Lee, R. Fielding, and L. Masinter, August 1998.
  9. RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zwinski, July 1998.
  10. RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
  11. 1995.
  12. RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
  13. McCahill, December 1994
  14. RFC 3986 is considered the current standard and any future changes to
  15. urlparse module should conform with it. The urlparse module is
  16. currently not entirely compliant with this RFC due to defacto
  17. scenarios for parsing, and for backward compatibility purposes, some
  18. parsing quirks from older RFCs are retained. The testcases in
  19. test_urlparse.py provides a good indicator of parsing behavior.
  20. """
  21. __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
  22. "urlsplit", "urlunsplit", "parse_qs", "parse_qsl"]
  23. # A classification of schemes ('' means apply by default)
  24. uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
  25. 'wais', 'file', 'https', 'shttp', 'mms',
  26. 'prospero', 'rtsp', 'rtspu', '', 'sftp',
  27. 'svn', 'svn+ssh']
  28. uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
  29. 'imap', 'wais', 'file', 'mms', 'https', 'shttp',
  30. 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
  31. 'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh']
  32. uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
  33. 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
  34. 'mms', '', 'sftp']
  35. # Characters valid in scheme names
  36. scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
  37. 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
  38. '0123456789'
  39. '+-.')
  40. MAX_CACHE_SIZE = 20
  41. _parse_cache = {}
  42. def clear_cache():
  43. """Clear the parse cache."""
  44. _parse_cache.clear()
  45. class ResultMixin(object):
  46. """Shared methods for the parsed result objects."""
  47. @property
  48. def username(self):
  49. netloc = self.netloc
  50. if "@" in netloc:
  51. userinfo = netloc.rsplit("@", 1)[0]
  52. if ":" in userinfo:
  53. userinfo = userinfo.split(":", 1)[0]
  54. return userinfo
  55. return None
  56. @property
  57. def password(self):
  58. netloc = self.netloc
  59. if "@" in netloc:
  60. userinfo = netloc.rsplit("@", 1)[0]
  61. if ":" in userinfo:
  62. return userinfo.split(":", 1)[1]
  63. return None
  64. @property
  65. def hostname(self):
  66. netloc = self.netloc.split('@')[-1]
  67. if '[' in netloc and ']' in netloc:
  68. return netloc.split(']')[0][1:].lower()
  69. elif ':' in netloc:
  70. return netloc.split(':')[0].lower()
  71. elif netloc == '':
  72. return None
  73. else:
  74. return netloc.lower()
  75. @property
  76. def port(self):
  77. netloc = self.netloc.split('@')[-1].split(']')[-1]
  78. if ':' in netloc:
  79. port = netloc.split(':')[1]
  80. port = int(port, 10)
  81. # verify legal port
  82. if (0 <= port <= 65535):
  83. return port
  84. return None
  85. from collections import namedtuple
  86. class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin):
  87. __slots__ = ()
  88. def geturl(self):
  89. return urlunsplit(self)
  90. class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin):
  91. __slots__ = ()
  92. def geturl(self):
  93. return urlunparse(self)
  94. def urlparse(url, scheme='', allow_fragments=True):
  95. """Parse a URL into 6 components:
  96. <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
  97. Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
  98. Note that we don't break the components up in smaller bits
  99. (e.g. netloc is a single string) and we don't expand % escapes."""
  100. tuple = urlsplit(url, scheme, allow_fragments)
  101. scheme, netloc, url, query, fragment = tuple
  102. if scheme in uses_params and ';' in url:
  103. url, params = _splitparams(url)
  104. else:
  105. params = ''
  106. return ParseResult(scheme, netloc, url, params, query, fragment)
  107. def _splitparams(url):
  108. if '/' in url:
  109. i = url.find(';', url.rfind('/'))
  110. if i < 0:
  111. return url, ''
  112. else:
  113. i = url.find(';')
  114. return url[:i], url[i+1:]
  115. def _splitnetloc(url, start=0):
  116. delim = len(url) # position of end of domain part of url, default is end
  117. for c in '/?#': # look for delimiters; the order is NOT important
  118. wdelim = url.find(c, start) # find first of this delim
  119. if wdelim >= 0: # if found
  120. delim = min(delim, wdelim) # use earliest delim position
  121. return url[start:delim], url[delim:] # return (domain, rest)
  122. def urlsplit(url, scheme='', allow_fragments=True):
  123. """Parse a URL into 5 components:
  124. <scheme>://<netloc>/<path>?<query>#<fragment>
  125. Return a 5-tuple: (scheme, netloc, path, query, fragment).
  126. Note that we don't break the components up in smaller bits
  127. (e.g. netloc is a single string) and we don't expand % escapes."""
  128. allow_fragments = bool(allow_fragments)
  129. key = url, scheme, allow_fragments, type(url), type(scheme)
  130. cached = _parse_cache.get(key, None)
  131. if cached:
  132. return cached
  133. if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
  134. clear_cache()
  135. netloc = query = fragment = ''
  136. i = url.find(':')
  137. if i > 0:
  138. if url[:i] == 'http': # optimize the common case
  139. scheme = url[:i].lower()
  140. url = url[i+1:]
  141. if url[:2] == '//':
  142. netloc, url = _splitnetloc(url, 2)
  143. if (('[' in netloc and ']' not in netloc) or
  144. (']' in netloc and '[' not in netloc)):
  145. raise ValueError("Invalid IPv6 URL")
  146. if allow_fragments and '#' in url:
  147. url, fragment = url.split('#', 1)
  148. if '?' in url:
  149. url, query = url.split('?', 1)
  150. v = SplitResult(scheme, netloc, url, query, fragment)
  151. _parse_cache[key] = v
  152. return v
  153. for c in url[:i]:
  154. if c not in scheme_chars:
  155. break
  156. else:
  157. # make sure "url" is not actually a port number (in which case
  158. # "scheme" is really part of the path)
  159. rest = url[i+1:]
  160. if not rest or any(c not in '0123456789' for c in rest):
  161. # not a port number
  162. scheme, url = url[:i].lower(), rest
  163. if url[:2] == '//':
  164. netloc, url = _splitnetloc(url, 2)
  165. if (('[' in netloc and ']' not in netloc) or
  166. (']' in netloc and '[' not in netloc)):
  167. raise ValueError("Invalid IPv6 URL")
  168. if allow_fragments and '#' in url:
  169. url, fragment = url.split('#', 1)
  170. if '?' in url:
  171. url, query = url.split('?', 1)
  172. v = SplitResult(scheme, netloc, url, query, fragment)
  173. _parse_cache[key] = v
  174. return v
  175. def urlunparse(data):
  176. """Put a parsed URL back together again. This may result in a
  177. slightly different, but equivalent URL, if the URL that was parsed
  178. originally had redundant delimiters, e.g. a ? with an empty query
  179. (the draft states that these are equivalent)."""
  180. scheme, netloc, url, params, query, fragment = data
  181. if params:
  182. url = "%s;%s" % (url, params)
  183. return urlunsplit((scheme, netloc, url, query, fragment))
  184. def urlunsplit(data):
  185. """Combine the elements of a tuple as returned by urlsplit() into a
  186. complete URL as a string. The data argument can be any five-item iterable.
  187. This may result in a slightly different, but equivalent URL, if the URL that
  188. was parsed originally had unnecessary delimiters (for example, a ? with an
  189. empty query; the RFC states that these are equivalent)."""
  190. scheme, netloc, url, query, fragment = data
  191. if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
  192. if url and url[:1] != '/': url = '/' + url
  193. url = '//' + (netloc or '') + url
  194. if scheme:
  195. url = scheme + ':' + url
  196. if query:
  197. url = url + '?' + query
  198. if fragment:
  199. url = url + '#' + fragment
  200. return url
  201. def urljoin(base, url, allow_fragments=True):
  202. """Join a base URL and a possibly relative URL to form an absolute
  203. interpretation of the latter."""
  204. if not base:
  205. return url
  206. if not url:
  207. return base
  208. bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
  209. urlparse(base, '', allow_fragments)
  210. scheme, netloc, path, params, query, fragment = \
  211. urlparse(url, bscheme, allow_fragments)
  212. if scheme != bscheme or scheme not in uses_relative:
  213. return url
  214. if scheme in uses_netloc:
  215. if netloc:
  216. return urlunparse((scheme, netloc, path,
  217. params, query, fragment))
  218. netloc = bnetloc
  219. if path[:1] == '/':
  220. return urlunparse((scheme, netloc, path,
  221. params, query, fragment))
  222. if not path and not params:
  223. path = bpath
  224. params = bparams
  225. if not query:
  226. query = bquery
  227. return urlunparse((scheme, netloc, path,
  228. params, query, fragment))
  229. segments = bpath.split('/')[:-1] + path.split('/')
  230. # XXX The stuff below is bogus in various ways...
  231. if segments[-1] == '.':
  232. segments[-1] = ''
  233. while '.' in segments:
  234. segments.remove('.')
  235. while 1:
  236. i = 1
  237. n = len(segments) - 1
  238. while i < n:
  239. if (segments[i] == '..'
  240. and segments[i-1] not in ('', '..')):
  241. del segments[i-1:i+1]
  242. break
  243. i = i+1
  244. else:
  245. break
  246. if segments == ['', '..']:
  247. segments[-1] = ''
  248. elif len(segments) >= 2 and segments[-1] == '..':
  249. segments[-2:] = ['']
  250. return urlunparse((scheme, netloc, '/'.join(segments),
  251. params, query, fragment))
  252. def urldefrag(url):
  253. """Removes any existing fragment from URL.
  254. Returns a tuple of the defragmented URL and the fragment. If
  255. the URL contained no fragments, the second element is the
  256. empty string.
  257. """
  258. if '#' in url:
  259. s, n, p, a, q, frag = urlparse(url)
  260. defrag = urlunparse((s, n, p, a, q, ''))
  261. return defrag, frag
  262. else:
  263. return url, ''
  264. # unquote method for parse_qs and parse_qsl
  265. # Cannot use directly from urllib as it would create a circular reference
  266. # because urllib uses urlparse methods (urljoin). If you update this function,
  267. # update it also in urllib. This code duplication does not existin in Python3.
  268. _hexdig = '0123456789ABCDEFabcdef'
  269. _hextochr = dict((a+b, chr(int(a+b,16)))
  270. for a in _hexdig for b in _hexdig)
  271. def unquote(s):
  272. """unquote('abc%20def') -> 'abc def'."""
  273. res = s.split('%')
  274. # fastpath
  275. if len(res) == 1:
  276. return s
  277. s = res[0]
  278. for item in res[1:]:
  279. try:
  280. s += _hextochr[item[:2]] + item[2:]
  281. except KeyError:
  282. s += '%' + item
  283. except UnicodeDecodeError:
  284. s += unichr(int(item[:2], 16)) + item[2:]
  285. return s
  286. def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
  287. """Parse a query given as a string argument.
  288. Arguments:
  289. qs: percent-encoded query string to be parsed
  290. keep_blank_values: flag indicating whether blank values in
  291. percent-encoded queries should be treated as blank strings.
  292. A true value indicates that blanks should be retained as
  293. blank strings. The default false value indicates that
  294. blank values are to be ignored and treated as if they were
  295. not included.
  296. strict_parsing: flag indicating what to do with parsing errors.
  297. If false (the default), errors are silently ignored.
  298. If true, errors raise a ValueError exception.
  299. """
  300. dict = {}
  301. for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
  302. if name in dict:
  303. dict[name].append(value)
  304. else:
  305. dict[name] = [value]
  306. return dict
  307. def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
  308. """Parse a query given as a string argument.
  309. Arguments:
  310. qs: percent-encoded query string to be parsed
  311. keep_blank_values: flag indicating whether blank values in
  312. percent-encoded queries should be treated as blank strings. A
  313. true value indicates that blanks should be retained as blank
  314. strings. The default false value indicates that blank values
  315. are to be ignored and treated as if they were not included.
  316. strict_parsing: flag indicating what to do with parsing errors. If
  317. false (the default), errors are silently ignored. If true,
  318. errors raise a ValueError exception.
  319. Returns a list, as G-d intended.
  320. """
  321. pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
  322. r = []
  323. for name_value in pairs:
  324. if not name_value and not strict_parsing:
  325. continue
  326. nv = name_value.split('=', 1)
  327. if len(nv) != 2:
  328. if strict_parsing:
  329. raise ValueError, "bad query field: %r" % (name_value,)
  330. # Handle case of a control-name with no equal sign
  331. if keep_blank_values:
  332. nv.append('')
  333. else:
  334. continue
  335. if len(nv[1]) or keep_blank_values:
  336. name = unquote(nv[0].replace('+', ' '))
  337. value = unquote(nv[1].replace('+', ' '))
  338. r.append((name, value))
  339. return r