test_info = (# text, (block, indent))
("", (None, None)),
("[1,", (None, None)), # TokenError
- ("if 1:\n", ('if 1:\n', None)),
- ("if 1:\n 2\n 3\n", ('if 1:\n', ' 2\n')),
+ ("if 1:\n", ('if 1:', None)),
+ ("if 1:\n 2\n 3\n", ('if 1:', ' 2')),
)
for code, expected_pair in test_info:
with self.subTest(code=code):
"""
with TemporaryPyFile(SOURCE_CODES["nannynag_errored"]) as file_path:
out = f"{file_path!r}: *** Line 3: trouble in tab city! ***\n"
- out += "offending line: '\\tprint(\"world\")\\n'\n"
+ out += "offending line: '\\tprint(\"world\")'\n"
out += "inconsistent use of tabs and spaces in indentation\n"
tabnanny.verbose = 1
def test_when_nannynag_error(self):
"""A python source code file eligible for raising `tabnanny.NannyNag`."""
with TemporaryPyFile(SOURCE_CODES["nannynag_errored"]) as file_path:
- out = f"{file_path} 3 '\\tprint(\"world\")\\n'\n"
+ out = f"{file_path} 3 '\\tprint(\"world\")'\n"
self.verify_tabnanny_check(file_path, out=out)
def test_when_no_file(self):
"""Should display more error information if verbose mode is on."""
with TemporaryPyFile(SOURCE_CODES["nannynag_errored"]) as path:
stdout = textwrap.dedent(
- "offending line: '\\tprint(\"world\")\\n'"
+ "offending line: '\\tprint(\"world\")'"
).strip()
self.validate_cmd("-v", path, stdout=stdout, partial=True)
"""Should display detailed error information if double verbose is on."""
with TemporaryPyFile(SOURCE_CODES["nannynag_errored"]) as path:
stdout = textwrap.dedent(
- "offending line: '\\tprint(\"world\")\\n'"
+ "offending line: '\\tprint(\"world\")'"
).strip()
self.validate_cmd("-vv", path, stdout=stdout, partial=True)
e.exception.msg,
'unindent does not match any outer indentation level')
self.assertEqual(e.exception.offset, 9)
- self.assertEqual(e.exception.text, ' x += 5\n')
+ self.assertEqual(e.exception.text, ' x += 5')
def test_int(self):
# Ordinary integers and binary operators
# skip the initial encoding token and the end tokens
tokens = list(_tokenize(readline(), encoding='utf-8'))[:-2]
- expected_tokens = [TokenInfo(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"\n')]
+ expected_tokens = [TokenInfo(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
self.assertEqual(tokens, expected_tokens,
"bytes not decoded with encoding")
--- /dev/null
+Tokens emitted by the :mod:`tokenize` module do not include an implicit
+``\n`` character in the ``line`` attribute anymore. Patch by Pablo Galindo
int result = 0;
Py_ssize_t size = tok->inp - tok->buf;
+ assert(tok->buf[size-1] == '\n');
+ size -= 1; // Remove the newline character from the end of the line
error_line = PyUnicode_DecodeUTF8(tok->buf, size, "replace");
if (!error_line) {
result = -1;
}
Py_ssize_t size = it->tok->inp - it->tok->buf;
+ assert(it->tok->buf[size-1] == '\n');
+ size -= 1; // Remove the newline character from the end of the line
PyObject *line = PyUnicode_DecodeUTF8(it->tok->buf, size, "replace");
if (line == NULL) {
Py_DECREF(str);