]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
[3.14] gh-63161: Add more tests for source encoding (GH-139440) (GH-139442)
authorMiss Islington (bot) <31488909+miss-islington@users.noreply.github.com>
Tue, 7 Oct 2025 20:51:14 +0000 (22:51 +0200)
committerGitHub <noreply@github.com>
Tue, 7 Oct 2025 20:51:14 +0000 (22:51 +0200)
(cherry picked from commit b2f5ad0c6d7cfd249c41bfbcdd0a75a9f21f1e72)

Co-authored-by: Serhiy Storchaka <storchaka@gmail.com>
Lib/test/test_source_encoding.py
Lib/test/test_tokenize.py

index 1399f3fcd2d3938cd826c6a3c434c87f8eaf0766..5df407823821200fac43bb0a3ef13cbeacb7a31f 100644 (file)
@@ -172,6 +172,8 @@ class MiscSourceEncodingTest(unittest.TestCase):
             os.unlink(TESTFN)
 
 
+BUFSIZ = 2**13
+
 class AbstractSourceEncodingTest:
 
     def test_default_coding(self):
@@ -184,14 +186,20 @@ class AbstractSourceEncodingTest:
         self.check_script_output(src, br"'\xc3\u20ac'")
 
     def test_second_coding_line(self):
-        src = (b'#\n'
+        src = (b'#!/usr/bin/python\n'
+               b'#coding:iso8859-15\n'
+               b'print(ascii("\xc3\xa4"))\n')
+        self.check_script_output(src, br"'\xc3\u20ac'")
+
+    def test_second_coding_line_empty_first_line(self):
+        src = (b'\n'
                b'#coding:iso8859-15\n'
                b'print(ascii("\xc3\xa4"))\n')
         self.check_script_output(src, br"'\xc3\u20ac'")
 
     def test_third_coding_line(self):
         # Only first two lines are tested for a magic comment.
-        src = (b'#\n'
+        src = (b'#!/usr/bin/python\n'
                b'#\n'
                b'#coding:iso8859-15\n'
                b'print(ascii("\xc3\xa4"))\n')
@@ -209,13 +217,52 @@ class AbstractSourceEncodingTest:
                b'print(ascii("\xc3\xa4"))\n')
         self.check_script_output(src, br"'\xc3\u20ac'")
 
+    def test_double_coding_utf8(self):
+        src = (b'#coding:utf-8\n'
+               b'#coding:latin1\n'
+               b'print(ascii("\xc3\xa4"))\n')
+        self.check_script_output(src, br"'\xe4'")
+
+    def test_long_first_coding_line(self):
+        src = (b'#' + b' '*BUFSIZ + b'coding:iso8859-15\n'
+               b'print(ascii("\xc3\xa4"))\n')
+        self.check_script_output(src, br"'\xc3\u20ac'")
+
+    def test_long_second_coding_line(self):
+        src = (b'#!/usr/bin/python\n'
+               b'#' + b' '*BUFSIZ + b'coding:iso8859-15\n'
+               b'print(ascii("\xc3\xa4"))\n')
+        self.check_script_output(src, br"'\xc3\u20ac'")
+
+    def test_long_coding_line(self):
+        src = (b'#coding:iso-8859-15' + b' '*BUFSIZ + b'\n'
+               b'print(ascii("\xc3\xa4"))\n')
+        self.check_script_output(src, br"'\xc3\u20ac'")
+
+    def test_long_coding_name(self):
+        src = (b'#coding:iso-8859-1-' + b'x'*BUFSIZ + b'\n'
+               b'print(ascii("\xc3\xa4"))\n')
+        self.check_script_output(src, br"'\xc3\xa4'")
+
+    def test_long_first_utf8_line(self):
+        src = b'#' + b'\xc3\xa4'*(BUFSIZ//2) + b'\n'
+        self.check_script_output(src, b'')
+        src = b'# ' + b'\xc3\xa4'*(BUFSIZ//2) + b'\n'
+        self.check_script_output(src, b'')
+
+    def test_long_second_utf8_line(self):
+        src = b'\n#' + b'\xc3\xa4'*(BUFSIZ//2) + b'\n'
+        self.check_script_output(src, b'')
+        src = b'\n# ' + b'\xc3\xa4'*(BUFSIZ//2) + b'\n'
+        self.check_script_output(src, b'')
+
     def test_first_non_utf8_coding_line(self):
         src = (b'#coding:iso-8859-15 \xa4\n'
                b'print(ascii("\xc3\xa4"))\n')
         self.check_script_output(src, br"'\xc3\u20ac'")
 
     def test_second_non_utf8_coding_line(self):
-        src = (b'\n'
+        src = (b'#!/usr/bin/python\n'
                b'#coding:iso-8859-15 \xa4\n'
                b'print(ascii("\xc3\xa4"))\n')
         self.check_script_output(src, br"'\xc3\u20ac'")
@@ -224,27 +271,56 @@ class AbstractSourceEncodingTest:
         src = (b'\xef\xbb\xbfprint(ascii("\xc3\xa4"))\n')
         self.check_script_output(src, br"'\xe4'")
 
+    def test_utf8_bom_utf8_comments(self):
+        src = (b'\xef\xbb\xbf#\xc3\xa4\n'
+               b'#\xc3\xa4\n'
+               b'print(ascii("\xc3\xa4"))\n')
+        self.check_script_output(src, br"'\xe4'")
+
     def test_utf8_bom_and_utf8_coding_line(self):
         src = (b'\xef\xbb\xbf#coding:utf-8\n'
                b'print(ascii("\xc3\xa4"))\n')
         self.check_script_output(src, br"'\xe4'")
 
+    def test_utf8_non_utf8_comment_line_error(self):
+        src = (b'#coding: utf8\n'
+               b'#\n'
+               b'#\xa4\n'
+               b'raise RuntimeError\n')
+        self.check_script_error(src,
+                br"'utf-8' codec can't decode byte|"
+                br"encoding problem: utf8")
+
     def test_crlf(self):
         src = (b'print(ascii("""\r\n"""))\n')
-        out = self.check_script_output(src, br"'\n'")
+        self.check_script_output(src, br"'\n'")
 
     def test_crcrlf(self):
         src = (b'print(ascii("""\r\r\n"""))\n')
-        out = self.check_script_output(src, br"'\n\n'")
+        self.check_script_output(src, br"'\n\n'")
 
     def test_crcrcrlf(self):
         src = (b'print(ascii("""\r\r\r\n"""))\n')
-        out = self.check_script_output(src, br"'\n\n\n'")
+        self.check_script_output(src, br"'\n\n\n'")
 
     def test_crcrcrlf2(self):
         src = (b'#coding:iso-8859-1\n'
                b'print(ascii("""\r\r\r\n"""))\n')
-        out = self.check_script_output(src, br"'\n\n\n'")
+        self.check_script_output(src, br"'\n\n\n'")
+
+    def test_nul_in_first_coding_line(self):
+        src = (b'#coding:iso8859-15\x00\n'
+               b'\n'
+               b'\n'
+               b'raise RuntimeError\n')
+        self.check_script_error(src, br"source code (string )?cannot contain null bytes")
+
+    def test_nul_in_second_coding_line(self):
+        src = (b'#!/usr/bin/python\n'
+               b'#coding:iso8859-15\x00\n'
+               b'\n'
+               b'raise RuntimeError\n')
+        self.check_script_error(src, br"source code (string )?cannot contain null bytes")
 
 
 class UTF8ValidatorTest(unittest.TestCase):
@@ -324,6 +400,10 @@ class BytesSourceEncodingTest(AbstractSourceEncodingTest, unittest.TestCase):
         out = stdout.getvalue().encode('latin1')
         self.assertEqual(out.rstrip(), expected)
 
+    def check_script_error(self, src, expected):
+        with self.assertRaisesRegex(SyntaxError, expected.decode()) as cm:
+            exec(src)
+
 
 class FileSourceEncodingTest(AbstractSourceEncodingTest, unittest.TestCase):
 
@@ -335,6 +415,14 @@ class FileSourceEncodingTest(AbstractSourceEncodingTest, unittest.TestCase):
             res = script_helper.assert_python_ok(fn)
         self.assertEqual(res.out.rstrip(), expected)
 
+    def check_script_error(self, src, expected):
+        with tempfile.TemporaryDirectory() as tmpd:
+            fn = os.path.join(tmpd, 'test.py')
+            with open(fn, 'wb') as fp:
+                fp.write(src)
+            res = script_helper.assert_python_failure(fn)
+        self.assertRegex(res.err.rstrip().splitlines()[-1], b'SyntaxError.*?' + expected)
+
 
 if __name__ == "__main__":
     unittest.main()
index 1e485dfb1e2585786aab9d6ce85e2e91161a0143..8fdd03f347b632c54713dee709c55b91ea16b6e6 100644 (file)
@@ -1363,7 +1363,8 @@ class TestDetectEncoding(TestCase):
 
     def test_no_bom_no_encoding_cookie(self):
         lines = (
-            b'# something\n',
+            b'#!/home/\xc3\xa4/bin/python\n',
+            b'# something \xe2\x82\xac\n',
             b'print(something)\n',
             b'do_something(else)\n'
         )
@@ -1371,16 +1372,54 @@ class TestDetectEncoding(TestCase):
         self.assertEqual(encoding, 'utf-8')
         self.assertEqual(consumed_lines, list(lines[:2]))
 
+    def test_no_bom_no_encoding_cookie_first_line_error(self):
+        lines = (
+            b'#!/home/\xa4/bin/python\n\n',
+            b'print(something)\n',
+            b'do_something(else)\n'
+        )
+        with self.assertRaises(SyntaxError):
+            tokenize.detect_encoding(self.get_readline(lines))
+
+    def test_no_bom_no_encoding_cookie_second_line_error(self):
+        lines = (
+            b'#!/usr/bin/python\n',
+            b'# something \xe2\n',
+            b'print(something)\n',
+            b'do_something(else)\n'
+        )
+        with self.assertRaises(SyntaxError):
+            tokenize.detect_encoding(self.get_readline(lines))
+
     def test_bom_no_cookie(self):
         lines = (
-            b'\xef\xbb\xbf# something\n',
+            b'\xef\xbb\xbf#!/home/\xc3\xa4/bin/python\n',
             b'print(something)\n',
             b'do_something(else)\n'
         )
         encoding, consumed_lines = tokenize.detect_encoding(self.get_readline(lines))
         self.assertEqual(encoding, 'utf-8-sig')
         self.assertEqual(consumed_lines,
-                         [b'# something\n', b'print(something)\n'])
+                         [b'#!/home/\xc3\xa4/bin/python\n', b'print(something)\n'])
+
+    def test_bom_no_cookie_first_line_error(self):
+        lines = (
+            b'\xef\xbb\xbf#!/home/\xa4/bin/python\n',
+            b'print(something)\n',
+            b'do_something(else)\n'
+        )
+        with self.assertRaises(SyntaxError):
+            tokenize.detect_encoding(self.get_readline(lines))
+
+    def test_bom_no_cookie_second_line_error(self):
+        lines = (
+            b'\xef\xbb\xbf#!/usr/bin/python\n',
+            b'# something \xe2\n',
+            b'print(something)\n',
+            b'do_something(else)\n'
+        )
+        with self.assertRaises(SyntaxError):
+            tokenize.detect_encoding(self.get_readline(lines))
 
     def test_cookie_first_line_no_bom(self):
         lines = (
@@ -1456,27 +1495,58 @@ class TestDetectEncoding(TestCase):
         expected = [b"print('\xc2\xa3')\n"]
         self.assertEqual(consumed_lines, expected)
 
-    def test_cookie_second_line_commented_first_line(self):
+    def test_cookie_second_line_empty_first_line(self):
         lines = (
-            b"#print('\xc2\xa3')\n",
+            b'\n',
             b'# vim: set fileencoding=iso8859-15 :\n',
             b"print('\xe2\x82\xac')\n"
         )
         encoding, consumed_lines = tokenize.detect_encoding(self.get_readline(lines))
         self.assertEqual(encoding, 'iso8859-15')
-        expected = [b"#print('\xc2\xa3')\n", b'# vim: set fileencoding=iso8859-15 :\n']
+        expected = [b'\n', b'# vim: set fileencoding=iso8859-15 :\n']
         self.assertEqual(consumed_lines, expected)
 
-    def test_cookie_second_line_empty_first_line(self):
+    def test_cookie_third_line(self):
         lines = (
-            b'\n',
-            b'# vim: set fileencoding=iso8859-15 :\n',
-            b"print('\xe2\x82\xac')\n"
+            b'#!/home/\xc3\xa4/bin/python\n',
+            b'# something\n',
+            b'# vim: set fileencoding=ascii :\n',
+            b'print(something)\n',
+            b'do_something(else)\n'
+        )
+        encoding, consumed_lines = tokenize.detect_encoding(self.get_readline(lines))
+        self.assertEqual(encoding, 'utf-8')
+        self.assertEqual(consumed_lines, list(lines[:2]))
+
+    def test_double_coding_line(self):
+        # If the first line matches the second line is ignored.
+        lines = (
+            b'#coding:iso8859-15\n',
+            b'#coding:latin1\n',
+            b'print(something)\n'
         )
         encoding, consumed_lines = tokenize.detect_encoding(self.get_readline(lines))
         self.assertEqual(encoding, 'iso8859-15')
-        expected = [b'\n', b'# vim: set fileencoding=iso8859-15 :\n']
-        self.assertEqual(consumed_lines, expected)
+        self.assertEqual(consumed_lines, list(lines[:1]))
+
+    def test_double_coding_same_line(self):
+        lines = (
+            b'#coding:iso8859-15 coding:latin1\n',
+            b'print(something)\n'
+        )
+        encoding, consumed_lines = tokenize.detect_encoding(self.get_readline(lines))
+        self.assertEqual(encoding, 'iso8859-15')
+        self.assertEqual(consumed_lines, list(lines[:1]))
+
+    def test_double_coding_utf8(self):
+        lines = (
+            b'#coding:utf-8\n',
+            b'#coding:latin1\n',
+            b'print(something)\n'
+        )
+        encoding, consumed_lines = tokenize.detect_encoding(self.get_readline(lines))
+        self.assertEqual(encoding, 'utf-8')
+        self.assertEqual(consumed_lines, list(lines[:1]))
 
     def test_latin1_normalization(self):
         # See get_normal_name() in Parser/tokenizer/helpers.c.
@@ -1502,7 +1572,6 @@ class TestDetectEncoding(TestCase):
         readline = self.get_readline(lines)
         self.assertRaises(SyntaxError, tokenize.detect_encoding, readline)
 
-
     def test_utf8_normalization(self):
         # See get_normal_name() in Parser/tokenizer/helpers.c.
         encodings = ("utf-8", "utf-8-mac", "utf-8-unix")