'+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=',
'+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=',
'>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',
- '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.'
+ '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':'
]
operators.sort(lambda a, b: cmp(-len(a), -len(b)))
uni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}')
-class TokenError(ValueError):
- """Raised if the tokenizer stumbled upon invalid tokens."""
-
-
class Token(tuple):
"""Represents a token as returned by `tokenize`."""
__slots__ = ()
match = regex_re.match(source, pos)
token_type = 'regexp'
if match is None:
- raise TokenError('invalid syntax around line %d' % lineno)
+ # woops. invalid syntax. jump one char ahead and try again.
+ pos += 1
+ continue
token_value = match.group()
if token_type is not None: