import sys
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
-from babel.util import parse_encoding, pathmatch
+from babel.util import parse_encoding, parse_future_flags, pathmatch
from babel._compat import PY2, text_type
from textwrap import dedent
comment_tag = None
encoding = parse_encoding(fileobj) or options.get('encoding', 'UTF-8')
+ future_flags = parse_future_flags(fileobj, encoding)
if PY2:
next_line = fileobj.readline
# encoding
# https://sourceforge.net/tracker/?func=detail&atid=355470&
# aid=617979&group_id=5470
- value = eval('# coding=%s\n%s' % (str(encoding), value),
- {'__builtins__': {}}, {})
+ code = compile('# coding=%s\n%s' % (str(encoding), value),
+ '<string>', 'eval', future_flags)
+ value = eval(code, {'__builtins__': {}}, {})
if PY2 and not isinstance(value, text_type):
value = value.decode(encoding)
buf.append(value)
fp.seek(pos)
+PYTHON_FUTURE_IMPORT_re = re.compile(
+ r'from\s+__future__\s+import\s+\(*(.+)\)*')
+
+
+def parse_future_flags(fp, encoding='latin-1'):
+ """Parse the compiler flags by :mod:`__future__` from the given Python
+ code.
+ """
+ import __future__
+ pos = fp.tell()
+ fp.seek(0)
+ flags = 0
+ try:
+ body = fp.read().decode(encoding)
+ for m in PYTHON_FUTURE_IMPORT_re.finditer(body):
+ names = [x.strip() for x in m.group(1).split(',')]
+ for name in names:
+ flags |= getattr(__future__, name).compiler_flag
+ finally:
+ fp.seek(pos)
+ return flags
+
+
def pathmatch(pattern, filename):
"""Extended pathname pattern matching.
return [(1, None, (), ())]
for x in extract.extract(arbitrary_extractor, BytesIO(b"")):
assert x[0] == 1
+
+ def test_future(self):
+ buf = BytesIO(br"""
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+nbsp = _('\xa0')
+""")
+ messages = list(extract.extract('python', buf,
+ extract.DEFAULT_KEYWORDS, [], {}))
+ assert messages[0][1] == u'\xa0'