]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
[3.13] gh-128519: Align the docstring of untokenize() to match the docs (GH-128521...
authorMiss Islington (bot) <31488909+miss-islington@users.noreply.github.com>
Mon, 6 Jan 2025 09:05:54 +0000 (10:05 +0100)
committerGitHub <noreply@github.com>
Mon, 6 Jan 2025 09:05:54 +0000 (09:05 +0000)
(cherry picked from commit aef52ca8b334ff90e8032da39f4d06e7b5130eb9)

Co-authored-by: Tomas R <tomas.roun8@gmail.com>
Lib/tokenize.py

index 4b4c3cfe16999b5231076748b19e514e79bb7eaf..430447d35e1adf0f3fc8daf4c15a90bcc039df10 100644 (file)
@@ -318,16 +318,10 @@ def untokenize(iterable):
     with at least two elements, a token number and token value.  If
     only two tokens are passed, the resulting output is poor.
 
-    Round-trip invariant for full input:
-        Untokenized source will match input source exactly
-
-    Round-trip invariant for limited input:
-        # Output bytes will tokenize back to the input
-        t1 = [tok[:2] for tok in tokenize(f.readline)]
-        newcode = untokenize(t1)
-        readline = BytesIO(newcode).readline
-        t2 = [tok[:2] for tok in tokenize(readline)]
-        assert t1 == t2
+    The result is guaranteed to tokenize back to match the input so
+    that the conversion is lossless and round-trips are assured.
+    The guarantee applies only to the token type and token string as
+    the spacing between tokens (column positions) may change.
     """
     ut = Untokenizer()
     out = ut.untokenize(iterable)