]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
Issue #18873: The tokenize module, IDLE, 2to3, and the findnocoding.py script
authorSerhiy Storchaka <storchaka@gmail.com>
Mon, 16 Sep 2013 20:51:56 +0000 (23:51 +0300)
committerSerhiy Storchaka <storchaka@gmail.com>
Mon, 16 Sep 2013 20:51:56 +0000 (23:51 +0300)
now detect Python source code encoding only in comment lines.

Lib/idlelib/IOBinding.py
Lib/lib2to3/pgen2/tokenize.py
Lib/lib2to3/tests/data/false_encoding.py [new file with mode: 0644]
Lib/lib2to3/tests/test_refactor.py
Lib/test/test_importlib/source/test_source_encoding.py
Lib/test/test_tokenize.py
Lib/tokenize.py
Misc/NEWS
Tools/scripts/findnocoding.py

index 4558ae6c37826e63d88181268b2d9969127a7c49..cba80483a60332edf5a2413cdf82de74cd6d2d5e 100644 (file)
@@ -63,7 +63,7 @@ locale_encoding = locale_encoding.lower()
 encoding = locale_encoding  ### KBK 07Sep07  This is used all over IDLE, check!
                             ### 'encoding' is used below in encode(), check!
 
-coding_re = re.compile("coding[:=]\s*([-\w_.]+)")
+coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
 
 def coding_spec(data):
     """Return the encoding declaration according to PEP 263.
@@ -84,14 +84,16 @@ def coding_spec(data):
         lines = data
     # consider only the first two lines
     if '\n' in lines:
-        lst = lines.split('\n')[:2]
+        lst = lines.split('\n', 2)[:2]
     elif '\r' in lines:
-        lst = lines.split('\r')[:2]
+        lst = lines.split('\r', 2)[:2]
+    else:
+        lst = [lines]
+    for line in lst:
+        match = coding_re.match(line)
+        if match is not None:
+            break
     else:
-        lst = list(lines)
-    str = '\n'.join(lst)
-    match = coding_re.search(str)
-    if not match:
         return None
     name = match.group(1)
     try:
index 31e29698e62f660a702cd4b2bd4293d7886e6cf3..83656fc19f443fac155f7074e83291d01d9b34a8 100644 (file)
@@ -236,7 +236,7 @@ class Untokenizer:
                 startline = False
             toks_append(tokval)
 
-cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
+cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
 
 def _get_normal_name(orig_enc):
     """Imitates get_normal_name in tokenizer.c."""
@@ -281,11 +281,10 @@ def detect_encoding(readline):
             line_string = line.decode('ascii')
         except UnicodeDecodeError:
             return None
-
-        matches = cookie_re.findall(line_string)
-        if not matches:
+        match = cookie_re.match(line_string)
+        if not match:
             return None
-        encoding = _get_normal_name(matches[0])
+        encoding = _get_normal_name(match.group(1))
         try:
             codec = lookup(encoding)
         except LookupError:
diff --git a/Lib/lib2to3/tests/data/false_encoding.py b/Lib/lib2to3/tests/data/false_encoding.py
new file mode 100644 (file)
index 0000000..f4e59e7
--- /dev/null
@@ -0,0 +1,2 @@
+#!/usr/bin/env python
+print '#coding=0'
index 8bdebc1f3da22852cd8ee64ee6a7c9982a3b467f..5ecd9b1cb3e186c3e93d3acafaa13dfe6f291595 100644 (file)
@@ -271,6 +271,10 @@ from __future__ import print_function"""
         fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
         self.check_file_refactoring(fn)
 
+    def test_false_file_encoding(self):
+        fn = os.path.join(TEST_DATA_DIR, "false_encoding.py")
+        data = self.check_file_refactoring(fn)
+
     def test_bom(self):
         fn = os.path.join(TEST_DATA_DIR, "bom.py")
         data = self.check_file_refactoring(fn)
index 0ca51954390fb03cbe3432387c60ac1208f20af2..ba02b4427437288ec707747ae027e76f2479802d 100644 (file)
@@ -10,7 +10,7 @@ import unicodedata
 import unittest
 
 
-CODING_RE = re.compile(r'coding[:=]\s*([-\w.]+)')
+CODING_RE = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
 
 
 class EncodingTest(unittest.TestCase):
@@ -41,7 +41,7 @@ class EncodingTest(unittest.TestCase):
 
     def create_source(self, encoding):
         encoding_line = "# coding={0}".format(encoding)
-        assert CODING_RE.search(encoding_line)
+        assert CODING_RE.match(encoding_line)
         source_lines = [encoding_line.encode('utf-8')]
         source_lines.append(self.source_line.encode(encoding))
         return b'\n'.join(source_lines)
@@ -50,7 +50,7 @@ class EncodingTest(unittest.TestCase):
         # Make sure that an encoding that has never been a standard one for
         # Python works.
         encoding_line = "# coding=koi8-r"
-        assert CODING_RE.search(encoding_line)
+        assert CODING_RE.match(encoding_line)
         source = "{0}\na=42\n".format(encoding_line).encode("koi8-r")
         self.run_test(source)
 
index b4a58f0db26de4c0d1bf7082ce65c90b29d0decb..17650855eb33b5a702be8c614d3ecf7101a98fa5 100644 (file)
@@ -946,6 +946,13 @@ class TestDetectEncoding(TestCase):
         readline = self.get_readline((b'# coding: bad\n',))
         self.assertRaises(SyntaxError, detect_encoding, readline)
 
+    def test_false_encoding(self):
+        # Issue 18873: "Encoding" detected in non-comment lines
+        readline = self.get_readline((b'print("#coding=fake")',))
+        encoding, consumed_lines = detect_encoding(readline)
+        self.assertEqual(encoding, 'utf-8')
+        self.assertEqual(consumed_lines, [b'print("#coding=fake")'])
+
     def test_open(self):
         filename = support.TESTFN + '.py'
         self.addCleanup(support.unlink, filename)
index cbf91ef222c25fd684bce9b786ecc4f97132c07d..f1e61d8ad591b0ca6820cb94febb1c19403cb561 100644 (file)
@@ -31,7 +31,7 @@ from token import *
 from codecs import lookup, BOM_UTF8
 import collections
 from io import TextIOWrapper
-cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
+cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
 
 import token
 __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
@@ -372,10 +372,10 @@ def detect_encoding(readline):
                 msg = '{} for {!r}'.format(msg, filename)
             raise SyntaxError(msg)
 
-        matches = cookie_re.findall(line_string)
-        if not matches:
+        match = cookie_re.match(line_string)
+        if not match:
             return None
-        encoding = _get_normal_name(matches[0])
+        encoding = _get_normal_name(match.group(1))
         try:
             codec = lookup(encoding)
         except LookupError:
index f74cdb19d1eb077a37a2bbccacf00f009285aa29..25f395a4bf9172de269f905d6fa12fe9c27665f4 100644 (file)
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -68,6 +68,8 @@ Core and Builtins
 Library
 -------
 
+- Issue #18873: The tokenize module now detects Python source code encoding
+  only in comment lines.
 
 - Issue #17324: Fix http.server's request handling case on trailing '/'. Patch
   contributed by Vajrasky Kok.
@@ -304,6 +306,9 @@ C API
 IDLE
 ----
 
+- Issue #18873: IDLE now detects Python source code encoding only in comment
+  lines.
+
 - Issue #18988: The "Tab" key now works when a word is already autocompleted.
 
 - Issue #18489: Add tests for SearchEngine. Original patch by Phil Webster.
@@ -430,6 +435,9 @@ Documentation
 Tools/Demos
 -----------
 
+- Issue #18873: 2to3 and the findnocoding.py script now detect Python source
+  code encoding only in comment lines.
+
 - Issue #18817: Fix a resource warning in Lib/aifc.py demo.
 
 - Issue #18439: Make patchcheck work on Windows for ACKS, NEWS.
index b3e9dc7361091251bce9e9385b8a0668c89c17f0..c0997d6598e44c11ecb147d06984644e0e2cea86 100755 (executable)
@@ -32,13 +32,13 @@ except ImportError:
                          "no sophisticated Python source file search will be done.", file=sys.stderr)
 
 
-decl_re = re.compile(rb"coding[=:]\s*([-\w.]+)")
+decl_re = re.compile(rb'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')
 
 def get_declaration(line):
-    match = decl_re.search(line)
+    match = decl_re.match(line)
     if match:
         return match.group(1)
-    return ''
+    return b''
 
 def has_correct_encoding(text, codec):
     try: