util.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. # -*- coding: utf-8 -*-
  2. """
  3. babel.util
  4. ~~~~~~~~~~
  5. Various utility classes and functions.
  6. :copyright: (c) 2013-2021 by the Babel Team.
  7. :license: BSD, see LICENSE for more details.
  8. """
  9. import codecs
  10. import collections
  11. from datetime import timedelta, tzinfo
  12. import os
  13. import re
  14. import textwrap
  15. from babel._compat import izip, imap
  16. import pytz as _pytz
  17. from babel import localtime
  18. missing = object()
  19. def distinct(iterable):
  20. """Yield all items in an iterable collection that are distinct.
  21. Unlike when using sets for a similar effect, the original ordering of the
  22. items in the collection is preserved by this function.
  23. >>> print(list(distinct([1, 2, 1, 3, 4, 4])))
  24. [1, 2, 3, 4]
  25. >>> print(list(distinct('foobar')))
  26. ['f', 'o', 'b', 'a', 'r']
  27. :param iterable: the iterable collection providing the data
  28. """
  29. seen = set()
  30. for item in iter(iterable):
  31. if item not in seen:
  32. yield item
  33. seen.add(item)
  34. # Regexp to match python magic encoding line
  35. PYTHON_MAGIC_COMMENT_re = re.compile(
  36. br'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)', re.VERBOSE)
  37. def parse_encoding(fp):
  38. """Deduce the encoding of a source file from magic comment.
  39. It does this in the same way as the `Python interpreter`__
  40. .. __: https://docs.python.org/3.4/reference/lexical_analysis.html#encoding-declarations
  41. The ``fp`` argument should be a seekable file object.
  42. (From Jeff Dairiki)
  43. """
  44. pos = fp.tell()
  45. fp.seek(0)
  46. try:
  47. line1 = fp.readline()
  48. has_bom = line1.startswith(codecs.BOM_UTF8)
  49. if has_bom:
  50. line1 = line1[len(codecs.BOM_UTF8):]
  51. m = PYTHON_MAGIC_COMMENT_re.match(line1)
  52. if not m:
  53. try:
  54. import ast
  55. ast.parse(line1.decode('latin-1'))
  56. except (ImportError, SyntaxError, UnicodeEncodeError):
  57. # Either it's a real syntax error, in which case the source is
  58. # not valid python source, or line2 is a continuation of line1,
  59. # in which case we don't want to scan line2 for a magic
  60. # comment.
  61. pass
  62. else:
  63. line2 = fp.readline()
  64. m = PYTHON_MAGIC_COMMENT_re.match(line2)
  65. if has_bom:
  66. if m:
  67. magic_comment_encoding = m.group(1).decode('latin-1')
  68. if magic_comment_encoding != 'utf-8':
  69. raise SyntaxError(
  70. 'encoding problem: {0} with BOM'.format(
  71. magic_comment_encoding))
  72. return 'utf-8'
  73. elif m:
  74. return m.group(1).decode('latin-1')
  75. else:
  76. return None
  77. finally:
  78. fp.seek(pos)
  79. PYTHON_FUTURE_IMPORT_re = re.compile(
  80. r'from\s+__future__\s+import\s+\(*(.+)\)*')
  81. def parse_future_flags(fp, encoding='latin-1'):
  82. """Parse the compiler flags by :mod:`__future__` from the given Python
  83. code.
  84. """
  85. import __future__
  86. pos = fp.tell()
  87. fp.seek(0)
  88. flags = 0
  89. try:
  90. body = fp.read().decode(encoding)
  91. # Fix up the source to be (hopefully) parsable by regexpen.
  92. # This will likely do untoward things if the source code itself is broken.
  93. # (1) Fix `import (\n...` to be `import (...`.
  94. body = re.sub(r'import\s*\([\r\n]+', 'import (', body)
  95. # (2) Join line-ending commas with the next line.
  96. body = re.sub(r',\s*[\r\n]+', ', ', body)
  97. # (3) Remove backslash line continuations.
  98. body = re.sub(r'\\\s*[\r\n]+', ' ', body)
  99. for m in PYTHON_FUTURE_IMPORT_re.finditer(body):
  100. names = [x.strip().strip('()') for x in m.group(1).split(',')]
  101. for name in names:
  102. feature = getattr(__future__, name, None)
  103. if feature:
  104. flags |= feature.compiler_flag
  105. finally:
  106. fp.seek(pos)
  107. return flags
  108. def pathmatch(pattern, filename):
  109. """Extended pathname pattern matching.
  110. This function is similar to what is provided by the ``fnmatch`` module in
  111. the Python standard library, but:
  112. * can match complete (relative or absolute) path names, and not just file
  113. names, and
  114. * also supports a convenience pattern ("**") to match files at any
  115. directory level.
  116. Examples:
  117. >>> pathmatch('**.py', 'bar.py')
  118. True
  119. >>> pathmatch('**.py', 'foo/bar/baz.py')
  120. True
  121. >>> pathmatch('**.py', 'templates/index.html')
  122. False
  123. >>> pathmatch('./foo/**.py', 'foo/bar/baz.py')
  124. True
  125. >>> pathmatch('./foo/**.py', 'bar/baz.py')
  126. False
  127. >>> pathmatch('^foo/**.py', 'foo/bar/baz.py')
  128. True
  129. >>> pathmatch('^foo/**.py', 'bar/baz.py')
  130. False
  131. >>> pathmatch('**/templates/*.html', 'templates/index.html')
  132. True
  133. >>> pathmatch('**/templates/*.html', 'templates/foo/bar.html')
  134. False
  135. :param pattern: the glob pattern
  136. :param filename: the path name of the file to match against
  137. """
  138. symbols = {
  139. '?': '[^/]',
  140. '?/': '[^/]/',
  141. '*': '[^/]+',
  142. '*/': '[^/]+/',
  143. '**/': '(?:.+/)*?',
  144. '**': '(?:.+/)*?[^/]+',
  145. }
  146. if pattern.startswith('^'):
  147. buf = ['^']
  148. pattern = pattern[1:]
  149. elif pattern.startswith('./'):
  150. buf = ['^']
  151. pattern = pattern[2:]
  152. else:
  153. buf = []
  154. for idx, part in enumerate(re.split('([?*]+/?)', pattern)):
  155. if idx % 2:
  156. buf.append(symbols[part])
  157. elif part:
  158. buf.append(re.escape(part))
  159. match = re.match(''.join(buf) + '$', filename.replace(os.sep, '/'))
  160. return match is not None
  161. class TextWrapper(textwrap.TextWrapper):
  162. wordsep_re = re.compile(
  163. r'(\s+|' # any whitespace
  164. r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))' # em-dash
  165. )
  166. def wraptext(text, width=70, initial_indent='', subsequent_indent=''):
  167. """Simple wrapper around the ``textwrap.wrap`` function in the standard
  168. library. This version does not wrap lines on hyphens in words.
  169. :param text: the text to wrap
  170. :param width: the maximum line width
  171. :param initial_indent: string that will be prepended to the first line of
  172. wrapped output
  173. :param subsequent_indent: string that will be prepended to all lines save
  174. the first of wrapped output
  175. """
  176. wrapper = TextWrapper(width=width, initial_indent=initial_indent,
  177. subsequent_indent=subsequent_indent,
  178. break_long_words=False)
  179. return wrapper.wrap(text)
  180. # TODO (Babel 3.x): Remove this re-export
  181. odict = collections.OrderedDict
  182. class FixedOffsetTimezone(tzinfo):
  183. """Fixed offset in minutes east from UTC."""
  184. def __init__(self, offset, name=None):
  185. self._offset = timedelta(minutes=offset)
  186. if name is None:
  187. name = 'Etc/GMT%+d' % offset
  188. self.zone = name
  189. def __str__(self):
  190. return self.zone
  191. def __repr__(self):
  192. return '<FixedOffset "%s" %s>' % (self.zone, self._offset)
  193. def utcoffset(self, dt):
  194. return self._offset
  195. def tzname(self, dt):
  196. return self.zone
  197. def dst(self, dt):
  198. return ZERO
  199. # Export the localtime functionality here because that's
  200. # where it was in the past.
  201. UTC = _pytz.utc
  202. LOCALTZ = localtime.LOCALTZ
  203. get_localzone = localtime.get_localzone
  204. STDOFFSET = localtime.STDOFFSET
  205. DSTOFFSET = localtime.DSTOFFSET
  206. DSTDIFF = localtime.DSTDIFF
  207. ZERO = localtime.ZERO