NAME 'x' (1, 3) (1, 4)
""")
+ def test_multiline_non_ascii_fstring(self):
+ self.check_tokenize("""\
+a = f'''
+ Autorzy, którzy tą jednostkę mają wpisani jako AKTUALNA -- czyli'''""", """\
+ NAME 'a' (1, 0) (1, 1)
+ OP '=' (1, 2) (1, 3)
+ FSTRING_START "f\'\'\'" (1, 4) (1, 8)
+ FSTRING_MIDDLE '\\n Autorzy, którzy tą jednostkę mają wpisani jako AKTUALNA -- czyli' (1, 8) (2, 68)
+ FSTRING_END "\'\'\'" (2, 68) (2, 71)
+ """)
+
class GenerateTokensTest(TokenizeTest):
def check_tokenize(self, s, expected):
# Format the tokens in s in a table format.
/* Needed to cache line for performance */
PyObject *last_line;
Py_ssize_t last_lineno;
+ Py_ssize_t last_end_lineno;
Py_ssize_t byte_col_offset_diff;
} tokenizeriterobject;
self->last_line = NULL;
self->byte_col_offset_diff = 0;
self->last_lineno = 0;
+ self->last_end_lineno = 0;
return (PyObject *)self;
}
Py_XDECREF(it->last_line);
line = PyUnicode_DecodeUTF8(line_start, size, "replace");
it->last_line = line;
- it->byte_col_offset_diff = 0;
+ if (it->tok->lineno != it->last_end_lineno) {
+ it->byte_col_offset_diff = 0;
+ }
} else {
// Line hasn't changed so we reuse the cached one.
line = it->last_line;
Py_ssize_t lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : it->tok->lineno;
Py_ssize_t end_lineno = it->tok->lineno;
it->last_lineno = lineno;
+ it->last_end_lineno = end_lineno;
Py_ssize_t col_offset = -1;
Py_ssize_t end_col_offset = -1;