encoding = options.get('encoding', 'utf-8')
last_token = None
call_stack = -1
+ dotted = any('.' in kw for kw in keywords)
- for token in tokenize(fileobj.read().decode(encoding), jsx=options.get("jsx", True)):
+ for token in tokenize(
+ fileobj.read().decode(encoding),
+ jsx=options.get("jsx", True),
+ dotted=dotted
+ ):
if token.type == 'operator' and token.value == '(':
if funcname:
message_lineno = token.lineno
escapes = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'}
+name_re = re.compile(r'[\w$_][\w\d$_]*', re.UNICODE)
+dotted_name_re = re.compile(r'[\w$_][\w\d$_.]*[\w\d$_.]', re.UNICODE)
division_re = re.compile(r'/=?')
regex_re = re.compile(r'/(?:[^/\\]*(?:\\.[^/\\]*)*)/[a-zA-Z]*(?s)')
line_re = re.compile(r'(\r\n|\n|\r)')
line_join_re = re.compile(r'\\' + line_re.pattern)
uni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}')
-name_re = re.compile(r'(\$+\w*|[^\W\d]\w*)(?u)')
Token = namedtuple('Token', 'type value lineno')
(None, re.compile(r'<!--.*')),
('linecomment', re.compile(r'//.*')),
('multilinecomment', re.compile(r'/\*.*?\*/(?us)')),
+ ('dotted_name', dotted_name_re),
('name', name_re),
('number', re.compile(r'''(?x)(
(?:0|[1-9]\d*)
]
-def get_rules(jsx):
+def get_rules(jsx, dotted):
"""
Get a tokenization rule list given the passed syntax options.
for token_type, rule in _rules:
if not jsx and token_type and 'jsx' in token_type:
continue
+ if token_type == 'dotted_name':
+ if not dotted:
+ continue
+ token_type = 'name'
rules.append((token_type, rule))
return rules
return u''.join(result)
-def tokenize(source, jsx=True):
+def tokenize(source, jsx=True, dotted=True):
"""
Tokenize JavaScript/JSX source. Returns a generator of tokens.
:param jsx: Enable (limited) JSX parsing.
+ :param dotted: Read dotted names as single name token.
"""
may_divide = False
pos = 0
lineno = 1
end = len(source)
- rules = get_rules(jsx=jsx)
+ rules = get_rules(jsx=jsx, dotted=dotted)
while pos < end:
# handle regular rules first
assert messages == EXPECTED_JSX_MESSAGES
else:
assert messages != EXPECTED_JSX_MESSAGES
+
+
+def test_dotted_keyword_extract():
+ buf = BytesIO(b"msg1 = com.corporate.i18n.formatMessage('Insert coin to continue')")
+ messages = list(
+ extract.extract('javascript', buf, {"com.corporate.i18n.formatMessage": None}, [], {})
+ )
+
+ assert messages == [(1, 'Insert coin to continue', [], None)]
def test_unquote():
assert jslexer.unquote_string('""') == ''
assert jslexer.unquote_string(r'"h\u00ebllo"') == u"hëllo"
+
+
+def test_dollar_in_identifier():
+ assert list(jslexer.tokenize('dollar$dollar')) == [('name', 'dollar$dollar', 1)]
+
+
+def test_dotted_name():
+ assert list(jslexer.tokenize("foo.bar(quux)", dotted=True)) == [
+ ('name', 'foo.bar', 1),
+ ('operator', '(', 1),
+ ('name', 'quux', 1),
+ ('operator', ')', 1)
+ ]
+
+
+def test_dotted_name_end():
+ assert list(jslexer.tokenize("foo.bar", dotted=True)) == [
+ ('name', 'foo.bar', 1),
+ ]