def untokenize(self, iterable):
it = iter(iterable)
+ indents = []
+ startline = False
for t in it:
if len(t) == 2:
self.compat(t, it)
tok_type, token, start, end, line = t
if tok_type == ENDMARKER:
break
+ if tok_type == INDENT:
+ indents.append(token)
+ continue
+ elif tok_type == DEDENT:
+ indents.pop()
+ self.prev_row, self.prev_col = end
+ continue
+ elif tok_type in (NEWLINE, NL):
+ startline = True
+ elif startline and indents:
+ indent = indents[-1]
+ if start[1] >= len(indent):
+ self.tokens.append(indent)
+ self.prev_col = len(indent)
+ startline = False
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
Library
-------
+- Issue #20387: Restore semantic round-trip correctness in tokenize/untokenize
+ for tab-indented blocks.
+
- Issue #24456: Fixed possible buffer over-read in adpcm2lin() and lin2adpcm()
functions of the audioop module. Fixed SystemError when the state is not a
tuple. Fixed possible memory leak.