]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
gh-104169: Refactor tokenizer into lexer and wrappers (#110684)
authorLysandros Nikolaou <lisandrosnik@gmail.com>
Wed, 11 Oct 2023 15:14:44 +0000 (17:14 +0200)
committerGitHub <noreply@github.com>
Wed, 11 Oct 2023 15:14:44 +0000 (15:14 +0000)
* The lexer, which include the actual lexeme producing logic, goes into
  the `lexer` directory.
* The wrappers, one wrapper per input mode (file, string, utf-8, and
  readline), go into the `tokenizer` directory and include logic for
  creating a lexer instance and managing the buffer for different modes.
---------

Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
Co-authored-by: blurb-it[bot] <43283697+blurb-it[bot]@users.noreply.github.com>
29 files changed:
Makefile.pre.in
Misc/NEWS.d/next/Core and Builtins/2023-10-11-12-48-03.gh-issue-104169.bPoX8u.rst [new file with mode: 0644]
PCbuild/_freeze_module.vcxproj
PCbuild/_freeze_module.vcxproj.filters
PCbuild/pythoncore.vcxproj
PCbuild/pythoncore.vcxproj.filters
Parser/action_helpers.c
Parser/lexer/buffer.c [new file with mode: 0644]
Parser/lexer/buffer.h [new file with mode: 0644]
Parser/lexer/lexer.c [new file with mode: 0644]
Parser/lexer/lexer.h [new file with mode: 0644]
Parser/lexer/state.c [new file with mode: 0644]
Parser/lexer/state.h [moved from Parser/tokenizer.h with 85% similarity]
Parser/peg_api.c
Parser/pegen.c
Parser/pegen_errors.c
Parser/string_parser.c
Parser/tokenizer.c [deleted file]
Parser/tokenizer/file_tokenizer.c [new file with mode: 0644]
Parser/tokenizer/helpers.c [new file with mode: 0644]
Parser/tokenizer/helpers.h [new file with mode: 0644]
Parser/tokenizer/readline_tokenizer.c [new file with mode: 0644]
Parser/tokenizer/string_tokenizer.c [new file with mode: 0644]
Parser/tokenizer/tokenizer.h [new file with mode: 0644]
Parser/tokenizer/utf8_tokenizer.c [new file with mode: 0644]
Python/Python-tokenize.c
Tools/c-analyzer/cpython/ignored.tsv
configure
configure.ac

index f612f075df9abc2baa8381f981bd82abf756a221..f70c1122c82a01af593e696549fe8b02e01cd810 100644 (file)
@@ -347,20 +347,36 @@ PEGEN_OBJS=               \
                Parser/string_parser.o \
                Parser/peg_api.o
 
+TOKENIZER_OBJS=                \
+               Parser/lexer/buffer.o \
+               Parser/lexer/lexer.o \
+               Parser/lexer/state.o \
+               Parser/tokenizer/file_tokenizer.o \
+               Parser/tokenizer/readline_tokenizer.o \
+               Parser/tokenizer/string_tokenizer.o \
+               Parser/tokenizer/utf8_tokenizer.o \
+               Parser/tokenizer/helpers.o
 
 PEGEN_HEADERS= \
                $(srcdir)/Include/internal/pycore_parser.h \
                $(srcdir)/Parser/pegen.h \
                $(srcdir)/Parser/string_parser.h
 
+TOKENIZER_HEADERS= \
+               Parser/lexer/buffer.h \
+               Parser/lexer/lexer.h \
+               Parser/lexer/state.h \
+               Parser/tokenizer/tokenizer.h \
+               Parser/tokenizer/helpers.h
+
 POBJS=         \
                Parser/token.o \
 
-PARSER_OBJS=   $(POBJS) $(PEGEN_OBJS) Parser/myreadline.o Parser/tokenizer.o
+PARSER_OBJS=   $(POBJS) $(PEGEN_OBJS) $(TOKENIZER_OBJS) Parser/myreadline.o
 
 PARSER_HEADERS= \
                $(PEGEN_HEADERS) \
-               $(srcdir)/Parser/tokenizer.h
+               $(TOKENIZER_HEADERS)
 
 ##########################################################################
 # Python
@@ -1397,6 +1413,8 @@ regen-pegen-metaparser:
 .PHONY: regen-pegen
 regen-pegen:
        @$(MKDIR_P) $(srcdir)/Parser
+       @$(MKDIR_P) $(srcdir)/Parser/tokenizer
+       @$(MKDIR_P) $(srcdir)/Parser/lexer
        PYTHONPATH=$(srcdir)/Tools/peg_generator $(PYTHON_FOR_REGEN) -m pegen -q c \
                $(srcdir)/Grammar/python.gram \
                $(srcdir)/Grammar/Tokens \
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-10-11-12-48-03.gh-issue-104169.bPoX8u.rst b/Misc/NEWS.d/next/Core and Builtins/2023-10-11-12-48-03.gh-issue-104169.bPoX8u.rst
new file mode 100644 (file)
index 0000000..82ec9c1
--- /dev/null
@@ -0,0 +1,4 @@
+Split the tokenizer into two separate directories:\r
+- One part includes the actual lexeme producing logic and lives in ``Parser/lexer``.\r
+- The second part wraps the lexer according to the different tokenization modes\r
+  we have (string, utf-8, file, interactive, readline) and lives in ``Parser/tokenizer``.\r
index bdcf29ba44dab57f8d96e57baa0fa73e449a03fa..aaa63fe9456fb7ee487e5967971fe17282d935f0 100644 (file)
     <ClCompile Include="..\Parser\action_helpers.c" />
     <ClCompile Include="..\Parser\string_parser.c" />
     <ClCompile Include="..\Parser\token.c" />
-    <ClCompile Include="..\Parser\tokenizer.c" />
+    <ClCompile Include="..\Parser\lexer\buffer.c" />
+    <ClCompile Include="..\Parser\lexer\state.c" />
+    <ClCompile Include="..\Parser\lexer\lexer.c" />
+    <ClCompile Include="..\Parser\tokenizer\string_tokenizer.c" />
+    <ClCompile Include="..\Parser\tokenizer\file_tokenizer.c" />
+    <ClCompile Include="..\Parser\tokenizer\utf8_tokenizer.c" />
+    <ClCompile Include="..\Parser\tokenizer\readline_tokenizer.c" />
+    <ClCompile Include="..\Parser\tokenizer\helpers.c" />
     <ClCompile Include="..\PC\invalid_parameter_handler.c" />
     <ClCompile Include="..\PC\msvcrtmodule.c" />
     <ClCompile Include="..\PC\winreg.c" />
index 45333fa97f1c64f5270d4b898a54e446a4659c2a..279736b0fbbb13cf4047af52e86e972fe652dd36 100644 (file)
     <ClCompile Include="..\Parser\token.c">
       <Filter>Source Files</Filter>
     </ClCompile>
-    <ClCompile Include="..\Parser\tokenizer.c">
+    <ClCompile Include="..\Parser\lexer\lexer.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\lexer\buffer.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\lexer\state.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\tokenizer\string_tokenizer.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\tokenizer\utf8_tokenizer.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\tokenizer\file_tokenizer.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\tokenizer\readline_tokenizer.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\tokenizer\helpers.c">
       <Filter>Source Files</Filter>
     </ClCompile>
     <ClCompile Include="..\Python\traceback.c">
index 43a79fd5938486c8599d5a0093beebe3203b38ca..f240779d2a5d4e89d459f6bd3f625c98622166ce 100644 (file)
     <ClInclude Include="..\Objects\stringlib\replace.h" />
     <ClInclude Include="..\Objects\stringlib\split.h" />
     <ClInclude Include="..\Objects\unicodetype_db.h" />
-    <ClInclude Include="..\Parser\tokenizer.h" />
+    <ClInclude Include="..\Parser\lexer\state.h" />
+    <ClInclude Include="..\Parser\lexer\lexer.h" />
+    <ClInclude Include="..\Parser\lexer\buffer.h" />
+    <ClInclude Include="..\Parser\tokenizer\helpers.h" />
+    <ClInclude Include="..\Parser\tokenizer\tokenizer.h" />
     <ClInclude Include="..\Parser\string_parser.h" />
     <ClInclude Include="..\Parser\pegen.h" />
     <ClInclude Include="..\PC\errmap.h" />
     <ClCompile Include="..\Objects\unionobject.c" />
     <ClCompile Include="..\Objects\weakrefobject.c" />
     <ClCompile Include="..\Parser\myreadline.c" />
-    <ClCompile Include="..\Parser\tokenizer.c" />
+    <ClCompile Include="..\Parser\lexer\state.c" />
+    <ClCompile Include="..\Parser\lexer\lexer.c" />
+    <ClCompile Include="..\Parser\lexer\buffer.c" />
+    <ClCompile Include="..\Parser\tokenizer\string_tokenizer.c" />
+    <ClCompile Include="..\Parser\tokenizer\file_tokenizer.c" />
+    <ClCompile Include="..\Parser\tokenizer\utf8_tokenizer.c" />
+    <ClCompile Include="..\Parser\tokenizer\readline_tokenizer.c" />
+    <ClCompile Include="..\Parser\tokenizer\helpers.c" />
     <ClCompile Include="..\Parser\token.c" />
     <ClCompile Include="..\Parser\pegen.c" />
     <ClCompile Include="..\Parser\pegen_errors.c" />
index 59159ed609968b84e09523049c64bc43a0629718..6e13eb81018f9b476b4953e7b331b56f4a809e11 100644 (file)
     <ClInclude Include="..\Objects\unicodetype_db.h">
       <Filter>Objects</Filter>
     </ClInclude>
-    <ClInclude Include="..\Parser\tokenizer.h">
+    <ClInclude Include="..\Parser\lexer\lexer.h">
+      <Filter>Parser</Filter>
+    </ClInclude>
+    <ClInclude Include="..\Parser\lexer\state.h">
+      <Filter>Parser</Filter>
+    </ClInclude>
+    <ClInclude Include="..\Parser\lexer\buffer.h">
+      <Filter>Parser</Filter>
+    </ClInclude>
+    <ClInclude Include="..\Parser\tokenizer\tokenizer.h">
+      <Filter>Parser</Filter>
+    </ClInclude>
+    <ClInclude Include="..\Parser\tokenizer\helpers.h">
       <Filter>Parser</Filter>
     </ClInclude>
     <ClInclude Include="..\PC\errmap.h">
     <ClCompile Include="..\Parser\myreadline.c">
       <Filter>Parser</Filter>
     </ClCompile>
-    <ClCompile Include="..\Parser\tokenizer.c">
+    <ClCompile Include="..\Parser\lexer\lexer.c">
+      <Filter>Parser</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\lexer\state.c">
+      <Filter>Parser</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\lexer\buffer.c">
+      <Filter>Parser</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\tokenizer\string_tokenizer.c">
+      <Filter>Parser</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\tokenizer\file_tokenizer.c">
+      <Filter>Parser</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\tokenizer\utf8_tokenizer.c">
+      <Filter>Parser</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\tokenizer\readline_tokenizer.c">
+      <Filter>Parser</Filter>
+    </ClCompile>
+    <ClCompile Include="..\Parser\tokenizer\helpers.c">
       <Filter>Parser</Filter>
     </ClCompile>
     <ClCompile Include="..\Parser\token.c">
index b8713a329d4ef64d11f06d415314dd7c5aef3766..fb2d56af3fc3723a43e314c6bf1932a56231ac9c 100644 (file)
@@ -1,7 +1,6 @@
 #include <Python.h>
 
 #include "pegen.h"
-#include "tokenizer.h"
 #include "string_parser.h"
 #include "pycore_runtime.h"         // _PyRuntime
 
diff --git a/Parser/lexer/buffer.c b/Parser/lexer/buffer.c
new file mode 100644 (file)
index 0000000..f6502bf
--- /dev/null
@@ -0,0 +1,76 @@
+#include "Python.h"
+#include "errcode.h"
+
+#include "state.h"
+
+/* Traverse and remember all f-string buffers, in order to be able to restore
+   them after reallocating tok->buf */
+void
+_PyLexer_remember_fstring_buffers(struct tok_state *tok)
+{
+    int index;
+    tokenizer_mode *mode;
+
+    for (index = tok->tok_mode_stack_index; index >= 0; --index) {
+        mode = &(tok->tok_mode_stack[index]);
+        mode->f_string_start_offset = mode->f_string_start - tok->buf;
+        mode->f_string_multi_line_start_offset = mode->f_string_multi_line_start - tok->buf;
+    }
+}
+
+/* Traverse and restore all f-string buffers after reallocating tok->buf */
+void
+_PyLexer_restore_fstring_buffers(struct tok_state *tok)
+{
+    int index;
+    tokenizer_mode *mode;
+
+    for (index = tok->tok_mode_stack_index; index >= 0; --index) {
+        mode = &(tok->tok_mode_stack[index]);
+        mode->f_string_start = tok->buf + mode->f_string_start_offset;
+        mode->f_string_multi_line_start = tok->buf + mode->f_string_multi_line_start_offset;
+    }
+}
+
+/* Read a line of text from TOK into S, using the stream in TOK.
+   Return NULL on failure, else S.
+
+   On entry, tok->decoding_buffer will be one of:
+     1) NULL: need to call tok->decoding_readline to get a new line
+     2) PyUnicodeObject *: decoding_feof has called tok->decoding_readline and
+       stored the result in tok->decoding_buffer
+     3) PyByteArrayObject *: previous call to tok_readline_recode did not have enough room
+       (in the s buffer) to copy entire contents of the line read
+       by tok->decoding_readline.  tok->decoding_buffer has the overflow.
+       In this case, tok_readline_recode is called in a loop (with an expanded buffer)
+       until the buffer ends with a '\n' (or until the end of the file is
+       reached): see tok_nextc and its calls to tok_reserve_buf.
+*/
+int
+_PyLexer_tok_reserve_buf(struct tok_state *tok, Py_ssize_t size)
+{
+    Py_ssize_t cur = tok->cur - tok->buf;
+    Py_ssize_t oldsize = tok->inp - tok->buf;
+    Py_ssize_t newsize = oldsize + Py_MAX(size, oldsize >> 1);
+    if (newsize > tok->end - tok->buf) {
+        char *newbuf = tok->buf;
+        Py_ssize_t start = tok->start == NULL ? -1 : tok->start - tok->buf;
+        Py_ssize_t line_start = tok->start == NULL ? -1 : tok->line_start - tok->buf;
+        Py_ssize_t multi_line_start = tok->multi_line_start - tok->buf;
+        _PyLexer_remember_fstring_buffers(tok);
+        newbuf = (char *)PyMem_Realloc(newbuf, newsize);
+        if (newbuf == NULL) {
+            tok->done = E_NOMEM;
+            return 0;
+        }
+        tok->buf = newbuf;
+        tok->cur = tok->buf + cur;
+        tok->inp = tok->buf + oldsize;
+        tok->end = tok->buf + newsize;
+        tok->start = start < 0 ? NULL : tok->buf + start;
+        tok->line_start = line_start < 0 ? NULL : tok->buf + line_start;
+        tok->multi_line_start = multi_line_start < 0 ? NULL : tok->buf + multi_line_start;
+        _PyLexer_restore_fstring_buffers(tok);
+    }
+    return 1;
+}
diff --git a/Parser/lexer/buffer.h b/Parser/lexer/buffer.h
new file mode 100644 (file)
index 0000000..bb21816
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _LEXER_BUFFER_H_
+#define _LEXER_BUFFER_H_
+
+#include "pyport.h"
+
+void _PyLexer_remember_fstring_buffers(struct tok_state *tok);
+void _PyLexer_restore_fstring_buffers(struct tok_state *tok);
+int _PyLexer_tok_reserve_buf(struct tok_state *tok, Py_ssize_t size);
+
+#endif
diff --git a/Parser/lexer/lexer.c b/Parser/lexer/lexer.c
new file mode 100644 (file)
index 0000000..c7134ab
--- /dev/null
@@ -0,0 +1,1419 @@
+#include "Python.h"
+#include "pycore_token.h"
+#include "pycore_unicodeobject.h"
+#include "errcode.h"
+
+#include "state.h"
+#include "../tokenizer/helpers.h"
+
+/* Alternate tab spacing */
+#define ALTTABSIZE 1
+
+#define is_potential_identifier_start(c) (\
+              (c >= 'a' && c <= 'z')\
+               || (c >= 'A' && c <= 'Z')\
+               || c == '_'\
+               || (c >= 128))
+
+#define is_potential_identifier_char(c) (\
+              (c >= 'a' && c <= 'z')\
+               || (c >= 'A' && c <= 'Z')\
+               || (c >= '0' && c <= '9')\
+               || c == '_'\
+               || (c >= 128))
+
+#ifdef Py_DEBUG
+static inline tokenizer_mode* TOK_GET_MODE(struct tok_state* tok) {
+    assert(tok->tok_mode_stack_index >= 0);
+    assert(tok->tok_mode_stack_index < MAXFSTRINGLEVEL);
+    return &(tok->tok_mode_stack[tok->tok_mode_stack_index]);
+}
+static inline tokenizer_mode* TOK_NEXT_MODE(struct tok_state* tok) {
+    assert(tok->tok_mode_stack_index >= 0);
+    assert(tok->tok_mode_stack_index + 1 < MAXFSTRINGLEVEL);
+    return &(tok->tok_mode_stack[++tok->tok_mode_stack_index]);
+}
+#else
+#define TOK_GET_MODE(tok) (&(tok->tok_mode_stack[tok->tok_mode_stack_index]))
+#define TOK_NEXT_MODE(tok) (&(tok->tok_mode_stack[++tok->tok_mode_stack_index]))
+#endif
+
+#define MAKE_TOKEN(token_type) _PyLexer_token_setup(tok, token, token_type, p_start, p_end)
+#define MAKE_TYPE_COMMENT_TOKEN(token_type, col_offset, end_col_offset) (\
+                _PyLexer_type_comment_token_setup(tok, token, token_type, col_offset, end_col_offset, p_start, p_end))
+
+/* Spaces in this constant are treated as "zero or more spaces or tabs" when
+   tokenizing. */
+static const char* type_comment_prefix = "# type: ";
+
+static inline int
+contains_null_bytes(const char* str, size_t size)
+{
+    return memchr(str, 0, size) != NULL;
+}
+
+/* Get next char, updating state; error code goes into tok->done */
+static int
+tok_nextc(struct tok_state *tok)
+{
+    int rc;
+    for (;;) {
+        if (tok->cur != tok->inp) {
+            tok->col_offset++;
+            return Py_CHARMASK(*tok->cur++); /* Fast path */
+        }
+        if (tok->done != E_OK) {
+            return EOF;
+        }
+        rc = tok->underflow(tok);
+#if defined(Py_DEBUG)
+        if (tok->debug) {
+            fprintf(stderr, "line[%d] = ", tok->lineno);
+            _PyTokenizer_print_escape(stderr, tok->cur, tok->inp - tok->cur);
+            fprintf(stderr, "  tok->done = %d\n", tok->done);
+        }
+#endif
+        if (!rc) {
+            tok->cur = tok->inp;
+            return EOF;
+        }
+        tok->line_start = tok->cur;
+
+        if (contains_null_bytes(tok->line_start, tok->inp - tok->line_start)) {
+            _PyTokenizer_syntaxerror(tok, "source code cannot contain null bytes");
+            tok->cur = tok->inp;
+            return EOF;
+        }
+    }
+    Py_UNREACHABLE();
+}
+
+/* Back-up one character */
+static void
+tok_backup(struct tok_state *tok, int c)
+{
+    if (c != EOF) {
+        if (--tok->cur < tok->buf) {
+            Py_FatalError("tokenizer beginning of buffer");
+        }
+        if ((int)(unsigned char)*tok->cur != Py_CHARMASK(c)) {
+            Py_FatalError("tok_backup: wrong character");
+        }
+        tok->col_offset--;
+    }
+}
+
+static int
+set_fstring_expr(struct tok_state* tok, struct token *token, char c) {
+    assert(token != NULL);
+    assert(c == '}' || c == ':' || c == '!');
+    tokenizer_mode *tok_mode = TOK_GET_MODE(tok);
+
+    if (!tok_mode->f_string_debug || token->metadata) {
+        return 0;
+    }
+
+    PyObject *res = PyUnicode_DecodeUTF8(
+        tok_mode->last_expr_buffer,
+        tok_mode->last_expr_size - tok_mode->last_expr_end,
+        NULL
+    );
+    if (!res) {
+        return -1;
+    }
+    token->metadata = res;
+    return 0;
+}
+
+int
+_PyLexer_update_fstring_expr(struct tok_state *tok, char cur)
+{
+    assert(tok->cur != NULL);
+
+    Py_ssize_t size = strlen(tok->cur);
+    tokenizer_mode *tok_mode = TOK_GET_MODE(tok);
+
+    switch (cur) {
+       case 0:
+            if (!tok_mode->last_expr_buffer || tok_mode->last_expr_end >= 0) {
+                return 1;
+            }
+            char *new_buffer = PyMem_Realloc(
+                tok_mode->last_expr_buffer,
+                tok_mode->last_expr_size + size
+            );
+            if (new_buffer == NULL) {
+                PyMem_Free(tok_mode->last_expr_buffer);
+                goto error;
+            }
+            tok_mode->last_expr_buffer = new_buffer;
+            strncpy(tok_mode->last_expr_buffer + tok_mode->last_expr_size, tok->cur, size);
+            tok_mode->last_expr_size += size;
+            break;
+        case '{':
+            if (tok_mode->last_expr_buffer != NULL) {
+                PyMem_Free(tok_mode->last_expr_buffer);
+            }
+            tok_mode->last_expr_buffer = PyMem_Malloc(size);
+            if (tok_mode->last_expr_buffer == NULL) {
+                goto error;
+            }
+            tok_mode->last_expr_size = size;
+            tok_mode->last_expr_end = -1;
+            strncpy(tok_mode->last_expr_buffer, tok->cur, size);
+            break;
+        case '}':
+        case '!':
+        case ':':
+            if (tok_mode->last_expr_end == -1) {
+                tok_mode->last_expr_end = strlen(tok->start);
+            }
+            break;
+        default:
+            Py_UNREACHABLE();
+    }
+    return 1;
+error:
+    tok->done = E_NOMEM;
+    return 0;
+}
+
+static int
+lookahead(struct tok_state *tok, const char *test)
+{
+    const char *s = test;
+    int res = 0;
+    while (1) {
+        int c = tok_nextc(tok);
+        if (*s == 0) {
+            res = !is_potential_identifier_char(c);
+        }
+        else if (c == *s) {
+            s++;
+            continue;
+        }
+
+        tok_backup(tok, c);
+        while (s != test) {
+            tok_backup(tok, *--s);
+        }
+        return res;
+    }
+}
+
+static int
+verify_end_of_number(struct tok_state *tok, int c, const char *kind) {
+    if (tok->tok_extra_tokens) {
+        // When we are parsing extra tokens, we don't want to emit warnings
+        // about invalid literals, because we want to be a bit more liberal.
+        return 1;
+    }
+    /* Emit a deprecation warning only if the numeric literal is immediately
+     * followed by one of keywords which can occur after a numeric literal
+     * in valid code: "and", "else", "for", "if", "in", "is" and "or".
+     * It allows to gradually deprecate existing valid code without adding
+     * warning before error in most cases of invalid numeric literal (which
+     * would be confusing and break existing tests).
+     * Raise a syntax error with slightly better message than plain
+     * "invalid syntax" if the numeric literal is immediately followed by
+     * other keyword or identifier.
+     */
+    int r = 0;
+    if (c == 'a') {
+        r = lookahead(tok, "nd");
+    }
+    else if (c == 'e') {
+        r = lookahead(tok, "lse");
+    }
+    else if (c == 'f') {
+        r = lookahead(tok, "or");
+    }
+    else if (c == 'i') {
+        int c2 = tok_nextc(tok);
+        if (c2 == 'f' || c2 == 'n' || c2 == 's') {
+            r = 1;
+        }
+        tok_backup(tok, c2);
+    }
+    else if (c == 'o') {
+        r = lookahead(tok, "r");
+    }
+    else if (c == 'n') {
+        r = lookahead(tok, "ot");
+    }
+    if (r) {
+        tok_backup(tok, c);
+        if (_PyTokenizer_parser_warn(tok, PyExc_SyntaxWarning,
+                "invalid %s literal", kind))
+        {
+            return 0;
+        }
+        tok_nextc(tok);
+    }
+    else /* In future releases, only error will remain. */
+    if (c < 128 && is_potential_identifier_char(c)) {
+        tok_backup(tok, c);
+        _PyTokenizer_syntaxerror(tok, "invalid %s literal", kind);
+        return 0;
+    }
+    return 1;
+}
+
+/* Verify that the identifier follows PEP 3131.
+   All identifier strings are guaranteed to be "ready" unicode objects.
+ */
+static int
+verify_identifier(struct tok_state *tok)
+{
+    if (tok->tok_extra_tokens) {
+        return 1;
+    }
+    PyObject *s;
+    if (tok->decoding_erred)
+        return 0;
+    s = PyUnicode_DecodeUTF8(tok->start, tok->cur - tok->start, NULL);
+    if (s == NULL) {
+        if (PyErr_ExceptionMatches(PyExc_UnicodeDecodeError)) {
+            tok->done = E_DECODE;
+        }
+        else {
+            tok->done = E_ERROR;
+        }
+        return 0;
+    }
+    Py_ssize_t invalid = _PyUnicode_ScanIdentifier(s);
+    if (invalid < 0) {
+        Py_DECREF(s);
+        tok->done = E_ERROR;
+        return 0;
+    }
+    assert(PyUnicode_GET_LENGTH(s) > 0);
+    if (invalid < PyUnicode_GET_LENGTH(s)) {
+        Py_UCS4 ch = PyUnicode_READ_CHAR(s, invalid);
+        if (invalid + 1 < PyUnicode_GET_LENGTH(s)) {
+            /* Determine the offset in UTF-8 encoded input */
+            Py_SETREF(s, PyUnicode_Substring(s, 0, invalid + 1));
+            if (s != NULL) {
+                Py_SETREF(s, PyUnicode_AsUTF8String(s));
+            }
+            if (s == NULL) {
+                tok->done = E_ERROR;
+                return 0;
+            }
+            tok->cur = (char *)tok->start + PyBytes_GET_SIZE(s);
+        }
+        Py_DECREF(s);
+        if (Py_UNICODE_ISPRINTABLE(ch)) {
+            _PyTokenizer_syntaxerror(tok, "invalid character '%c' (U+%04X)", ch, ch);
+        }
+        else {
+            _PyTokenizer_syntaxerror(tok, "invalid non-printable character U+%04X", ch);
+        }
+        return 0;
+    }
+    Py_DECREF(s);
+    return 1;
+}
+
+static int
+tok_decimal_tail(struct tok_state *tok)
+{
+    int c;
+
+    while (1) {
+        do {
+            c = tok_nextc(tok);
+        } while (Py_ISDIGIT(c));
+        if (c != '_') {
+            break;
+        }
+        c = tok_nextc(tok);
+        if (!Py_ISDIGIT(c)) {
+            tok_backup(tok, c);
+            _PyTokenizer_syntaxerror(tok, "invalid decimal literal");
+            return 0;
+        }
+    }
+    return c;
+}
+
+static inline int
+tok_continuation_line(struct tok_state *tok) {
+    int c = tok_nextc(tok);
+    if (c == '\r') {
+        c = tok_nextc(tok);
+    }
+    if (c != '\n') {
+        tok->done = E_LINECONT;
+        return -1;
+    }
+    c = tok_nextc(tok);
+    if (c == EOF) {
+        tok->done = E_EOF;
+        tok->cur = tok->inp;
+        return -1;
+    } else {
+        tok_backup(tok, c);
+    }
+    return c;
+}
+
+static int
+tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct token *token)
+{
+    int c;
+    int blankline, nonascii;
+
+    const char *p_start = NULL;
+    const char *p_end = NULL;
+  nextline:
+    tok->start = NULL;
+    tok->starting_col_offset = -1;
+    blankline = 0;
+
+
+    /* Get indentation level */
+    if (tok->atbol) {
+        int col = 0;
+        int altcol = 0;
+        tok->atbol = 0;
+        int cont_line_col = 0;
+        for (;;) {
+            c = tok_nextc(tok);
+            if (c == ' ') {
+                col++, altcol++;
+            }
+            else if (c == '\t') {
+                col = (col / tok->tabsize + 1) * tok->tabsize;
+                altcol = (altcol / ALTTABSIZE + 1) * ALTTABSIZE;
+            }
+            else if (c == '\014')  {/* Control-L (formfeed) */
+                col = altcol = 0; /* For Emacs users */
+            }
+            else if (c == '\\') {
+                // Indentation cannot be split over multiple physical lines
+                // using backslashes. This means that if we found a backslash
+                // preceded by whitespace, **the first one we find** determines
+                // the level of indentation of whatever comes next.
+                cont_line_col = cont_line_col ? cont_line_col : col;
+                if ((c = tok_continuation_line(tok)) == -1) {
+                    return MAKE_TOKEN(ERRORTOKEN);
+                }
+            }
+            else {
+                break;
+            }
+        }
+        tok_backup(tok, c);
+        if (c == '#' || c == '\n' || c == '\r') {
+            /* Lines with only whitespace and/or comments
+               shouldn't affect the indentation and are
+               not passed to the parser as NEWLINE tokens,
+               except *totally* empty lines in interactive
+               mode, which signal the end of a command group. */
+            if (col == 0 && c == '\n' && tok->prompt != NULL) {
+                blankline = 0; /* Let it through */
+            }
+            else if (tok->prompt != NULL && tok->lineno == 1) {
+                /* In interactive mode, if the first line contains
+                   only spaces and/or a comment, let it through. */
+                blankline = 0;
+                col = altcol = 0;
+            }
+            else {
+                blankline = 1; /* Ignore completely */
+            }
+            /* We can't jump back right here since we still
+               may need to skip to the end of a comment */
+        }
+        if (!blankline && tok->level == 0) {
+            col = cont_line_col ? cont_line_col : col;
+            altcol = cont_line_col ? cont_line_col : altcol;
+            if (col == tok->indstack[tok->indent]) {
+                /* No change */
+                if (altcol != tok->altindstack[tok->indent]) {
+                    return MAKE_TOKEN(_PyTokenizer_indenterror(tok));
+                }
+            }
+            else if (col > tok->indstack[tok->indent]) {
+                /* Indent -- always one */
+                if (tok->indent+1 >= MAXINDENT) {
+                    tok->done = E_TOODEEP;
+                    tok->cur = tok->inp;
+                    return MAKE_TOKEN(ERRORTOKEN);
+                }
+                if (altcol <= tok->altindstack[tok->indent]) {
+                    return MAKE_TOKEN(_PyTokenizer_indenterror(tok));
+                }
+                tok->pendin++;
+                tok->indstack[++tok->indent] = col;
+                tok->altindstack[tok->indent] = altcol;
+            }
+            else /* col < tok->indstack[tok->indent] */ {
+                /* Dedent -- any number, must be consistent */
+                while (tok->indent > 0 &&
+                    col < tok->indstack[tok->indent]) {
+                    tok->pendin--;
+                    tok->indent--;
+                }
+                if (col != tok->indstack[tok->indent]) {
+                    tok->done = E_DEDENT;
+                    tok->cur = tok->inp;
+                    return MAKE_TOKEN(ERRORTOKEN);
+                }
+                if (altcol != tok->altindstack[tok->indent]) {
+                    return MAKE_TOKEN(_PyTokenizer_indenterror(tok));
+                }
+            }
+        }
+    }
+
+    tok->start = tok->cur;
+    tok->starting_col_offset = tok->col_offset;
+
+    /* Return pending indents/dedents */
+    if (tok->pendin != 0) {
+        if (tok->pendin < 0) {
+            if (tok->tok_extra_tokens) {
+                p_start = tok->cur;
+                p_end = tok->cur;
+            }
+            tok->pendin++;
+            return MAKE_TOKEN(DEDENT);
+        }
+        else {
+            if (tok->tok_extra_tokens) {
+                p_start = tok->buf;
+                p_end = tok->cur;
+            }
+            tok->pendin--;
+            return MAKE_TOKEN(INDENT);
+        }
+    }
+
+    /* Peek ahead at the next character */
+    c = tok_nextc(tok);
+    tok_backup(tok, c);
+
+ again:
+    tok->start = NULL;
+    /* Skip spaces */
+    do {
+        c = tok_nextc(tok);
+    } while (c == ' ' || c == '\t' || c == '\014');
+
+    /* Set start of current token */
+    tok->start = tok->cur == NULL ? NULL : tok->cur - 1;
+    tok->starting_col_offset = tok->col_offset - 1;
+
+    /* Skip comment, unless it's a type comment */
+    if (c == '#') {
+
+        const char* p = NULL;
+        const char *prefix, *type_start;
+        int current_starting_col_offset;
+
+        while (c != EOF && c != '\n' && c != '\r') {
+            c = tok_nextc(tok);
+        }
+
+        if (tok->tok_extra_tokens) {
+            p = tok->start;
+        }
+
+        if (tok->type_comments) {
+            p = tok->start;
+            current_starting_col_offset = tok->starting_col_offset;
+            prefix = type_comment_prefix;
+            while (*prefix && p < tok->cur) {
+                if (*prefix == ' ') {
+                    while (*p == ' ' || *p == '\t') {
+                        p++;
+                        current_starting_col_offset++;
+                    }
+                } else if (*prefix == *p) {
+                    p++;
+                    current_starting_col_offset++;
+                } else {
+                    break;
+                }
+
+                prefix++;
+            }
+
+            /* This is a type comment if we matched all of type_comment_prefix. */
+            if (!*prefix) {
+                int is_type_ignore = 1;
+                // +6 in order to skip the word 'ignore'
+                const char *ignore_end = p + 6;
+                const int ignore_end_col_offset = current_starting_col_offset + 6;
+                tok_backup(tok, c);  /* don't eat the newline or EOF */
+
+                type_start = p;
+
+                /* A TYPE_IGNORE is "type: ignore" followed by the end of the token
+                 * or anything ASCII and non-alphanumeric. */
+                is_type_ignore = (
+                    tok->cur >= ignore_end && memcmp(p, "ignore", 6) == 0
+                    && !(tok->cur > ignore_end
+                         && ((unsigned char)ignore_end[0] >= 128 || Py_ISALNUM(ignore_end[0]))));
+
+                if (is_type_ignore) {
+                    p_start = ignore_end;
+                    p_end = tok->cur;
+
+                    /* If this type ignore is the only thing on the line, consume the newline also. */
+                    if (blankline) {
+                        tok_nextc(tok);
+                        tok->atbol = 1;
+                    }
+                    return MAKE_TYPE_COMMENT_TOKEN(TYPE_IGNORE, ignore_end_col_offset, tok->col_offset);
+                } else {
+                    p_start = type_start;
+                    p_end = tok->cur;
+                    return MAKE_TYPE_COMMENT_TOKEN(TYPE_COMMENT, current_starting_col_offset, tok->col_offset);
+                }
+            }
+        }
+        if (tok->tok_extra_tokens) {
+            tok_backup(tok, c);  /* don't eat the newline or EOF */
+            p_start = p;
+            p_end = tok->cur;
+            tok->comment_newline = blankline;
+            return MAKE_TOKEN(COMMENT);
+        }
+    }
+
+    if (tok->done == E_INTERACT_STOP) {
+        return MAKE_TOKEN(ENDMARKER);
+    }
+
+    /* Check for EOF and errors now */
+    if (c == EOF) {
+        if (tok->level) {
+            return MAKE_TOKEN(ERRORTOKEN);
+        }
+        return MAKE_TOKEN(tok->done == E_EOF ? ENDMARKER : ERRORTOKEN);
+    }
+
+    /* Identifier (most frequent token!) */
+    nonascii = 0;
+    if (is_potential_identifier_start(c)) {
+        /* Process the various legal combinations of b"", r"", u"", and f"". */
+        int saw_b = 0, saw_r = 0, saw_u = 0, saw_f = 0;
+        while (1) {
+            if (!(saw_b || saw_u || saw_f) && (c == 'b' || c == 'B'))
+                saw_b = 1;
+            /* Since this is a backwards compatibility support literal we don't
+               want to support it in arbitrary order like byte literals. */
+            else if (!(saw_b || saw_u || saw_r || saw_f)
+                     && (c == 'u'|| c == 'U')) {
+                saw_u = 1;
+            }
+            /* ur"" and ru"" are not supported */
+            else if (!(saw_r || saw_u) && (c == 'r' || c == 'R')) {
+                saw_r = 1;
+            }
+            else if (!(saw_f || saw_b || saw_u) && (c == 'f' || c == 'F')) {
+                saw_f = 1;
+            }
+            else {
+                break;
+            }
+            c = tok_nextc(tok);
+            if (c == '"' || c == '\'') {
+                if (saw_f) {
+                    goto f_string_quote;
+                }
+                goto letter_quote;
+            }
+        }
+        while (is_potential_identifier_char(c)) {
+            if (c >= 128) {
+                nonascii = 1;
+            }
+            c = tok_nextc(tok);
+        }
+        tok_backup(tok, c);
+        if (nonascii && !verify_identifier(tok)) {
+            return MAKE_TOKEN(ERRORTOKEN);
+        }
+
+        p_start = tok->start;
+        p_end = tok->cur;
+
+        return MAKE_TOKEN(NAME);
+    }
+
+    if (c == '\r') {
+        c = tok_nextc(tok);
+    }
+
+    /* Newline */
+    if (c == '\n') {
+        tok->atbol = 1;
+        if (blankline || tok->level > 0) {
+            if (tok->tok_extra_tokens) {
+                if (tok->comment_newline) {
+                    tok->comment_newline = 0;
+                }
+                p_start = tok->start;
+                p_end = tok->cur;
+                return MAKE_TOKEN(NL);
+            }
+            goto nextline;
+        }
+        if (tok->comment_newline && tok->tok_extra_tokens) {
+            tok->comment_newline = 0;
+            p_start = tok->start;
+            p_end = tok->cur;
+            return MAKE_TOKEN(NL);
+        }
+        p_start = tok->start;
+        p_end = tok->cur - 1; /* Leave '\n' out of the string */
+        tok->cont_line = 0;
+        return MAKE_TOKEN(NEWLINE);
+    }
+
+    /* Period or number starting with period? */
+    if (c == '.') {
+        c = tok_nextc(tok);
+        if (Py_ISDIGIT(c)) {
+            goto fraction;
+        } else if (c == '.') {
+            c = tok_nextc(tok);
+            if (c == '.') {
+                p_start = tok->start;
+                p_end = tok->cur;
+                return MAKE_TOKEN(ELLIPSIS);
+            }
+            else {
+                tok_backup(tok, c);
+            }
+            tok_backup(tok, '.');
+        }
+        else {
+            tok_backup(tok, c);
+        }
+        p_start = tok->start;
+        p_end = tok->cur;
+        return MAKE_TOKEN(DOT);
+    }
+
+    /* Number */
+    if (Py_ISDIGIT(c)) {
+        if (c == '0') {
+            /* Hex, octal or binary -- maybe. */
+            c = tok_nextc(tok);
+            if (c == 'x' || c == 'X') {
+                /* Hex */
+                c = tok_nextc(tok);
+                do {
+                    if (c == '_') {
+                        c = tok_nextc(tok);
+                    }
+                    if (!Py_ISXDIGIT(c)) {
+                        tok_backup(tok, c);
+                        return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid hexadecimal literal"));
+                    }
+                    do {
+                        c = tok_nextc(tok);
+                    } while (Py_ISXDIGIT(c));
+                } while (c == '_');
+                if (!verify_end_of_number(tok, c, "hexadecimal")) {
+                    return MAKE_TOKEN(ERRORTOKEN);
+                }
+            }
+            else if (c == 'o' || c == 'O') {
+                /* Octal */
+                c = tok_nextc(tok);
+                do {
+                    if (c == '_') {
+                        c = tok_nextc(tok);
+                    }
+                    if (c < '0' || c >= '8') {
+                        if (Py_ISDIGIT(c)) {
+                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
+                                    "invalid digit '%c' in octal literal", c));
+                        }
+                        else {
+                            tok_backup(tok, c);
+                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid octal literal"));
+                        }
+                    }
+                    do {
+                        c = tok_nextc(tok);
+                    } while ('0' <= c && c < '8');
+                } while (c == '_');
+                if (Py_ISDIGIT(c)) {
+                    return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
+                            "invalid digit '%c' in octal literal", c));
+                }
+                if (!verify_end_of_number(tok, c, "octal")) {
+                    return MAKE_TOKEN(ERRORTOKEN);
+                }
+            }
+            else if (c == 'b' || c == 'B') {
+                /* Binary */
+                c = tok_nextc(tok);
+                do {
+                    if (c == '_') {
+                        c = tok_nextc(tok);
+                    }
+                    if (c != '0' && c != '1') {
+                        if (Py_ISDIGIT(c)) {
+                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid digit '%c' in binary literal", c));
+                        }
+                        else {
+                            tok_backup(tok, c);
+                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid binary literal"));
+                        }
+                    }
+                    do {
+                        c = tok_nextc(tok);
+                    } while (c == '0' || c == '1');
+                } while (c == '_');
+                if (Py_ISDIGIT(c)) {
+                    return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid digit '%c' in binary literal", c));
+                }
+                if (!verify_end_of_number(tok, c, "binary")) {
+                    return MAKE_TOKEN(ERRORTOKEN);
+                }
+            }
+            else {
+                int nonzero = 0;
+                /* maybe old-style octal; c is first char of it */
+                /* in any case, allow '0' as a literal */
+                while (1) {
+                    if (c == '_') {
+                        c = tok_nextc(tok);
+                        if (!Py_ISDIGIT(c)) {
+                            tok_backup(tok, c);
+                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid decimal literal"));
+                        }
+                    }
+                    if (c != '0') {
+                        break;
+                    }
+                    c = tok_nextc(tok);
+                }
+                char* zeros_end = tok->cur;
+                if (Py_ISDIGIT(c)) {
+                    nonzero = 1;
+                    c = tok_decimal_tail(tok);
+                    if (c == 0) {
+                        return MAKE_TOKEN(ERRORTOKEN);
+                    }
+                }
+                if (c == '.') {
+                    c = tok_nextc(tok);
+                    goto fraction;
+                }
+                else if (c == 'e' || c == 'E') {
+                    goto exponent;
+                }
+                else if (c == 'j' || c == 'J') {
+                    goto imaginary;
+                }
+                else if (nonzero && !tok->tok_extra_tokens) {
+                    /* Old-style octal: now disallowed. */
+                    tok_backup(tok, c);
+                    return MAKE_TOKEN(_PyTokenizer_syntaxerror_known_range(
+                            tok, (int)(tok->start + 1 - tok->line_start),
+                            (int)(zeros_end - tok->line_start),
+                            "leading zeros in decimal integer "
+                            "literals are not permitted; "
+                            "use an 0o prefix for octal integers"));
+                }
+                if (!verify_end_of_number(tok, c, "decimal")) {
+                    return MAKE_TOKEN(ERRORTOKEN);
+                }
+            }
+        }
+        else {
+            /* Decimal */
+            c = tok_decimal_tail(tok);
+            if (c == 0) {
+                return MAKE_TOKEN(ERRORTOKEN);
+            }
+            {
+                /* Accept floating point numbers. */
+                if (c == '.') {
+                    c = tok_nextc(tok);
+        fraction:
+                    /* Fraction */
+                    if (Py_ISDIGIT(c)) {
+                        c = tok_decimal_tail(tok);
+                        if (c == 0) {
+                            return MAKE_TOKEN(ERRORTOKEN);
+                        }
+                    }
+                }
+                if (c == 'e' || c == 'E') {
+                    int e;
+                  exponent:
+                    e = c;
+                    /* Exponent part */
+                    c = tok_nextc(tok);
+                    if (c == '+' || c == '-') {
+                        c = tok_nextc(tok);
+                        if (!Py_ISDIGIT(c)) {
+                            tok_backup(tok, c);
+                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid decimal literal"));
+                        }
+                    } else if (!Py_ISDIGIT(c)) {
+                        tok_backup(tok, c);
+                        if (!verify_end_of_number(tok, e, "decimal")) {
+                            return MAKE_TOKEN(ERRORTOKEN);
+                        }
+                        tok_backup(tok, e);
+                        p_start = tok->start;
+                        p_end = tok->cur;
+                        return MAKE_TOKEN(NUMBER);
+                    }
+                    c = tok_decimal_tail(tok);
+                    if (c == 0) {
+                        return MAKE_TOKEN(ERRORTOKEN);
+                    }
+                }
+                if (c == 'j' || c == 'J') {
+                    /* Imaginary part */
+        imaginary:
+                    c = tok_nextc(tok);
+                    if (!verify_end_of_number(tok, c, "imaginary")) {
+                        return MAKE_TOKEN(ERRORTOKEN);
+                    }
+                }
+                else if (!verify_end_of_number(tok, c, "decimal")) {
+                    return MAKE_TOKEN(ERRORTOKEN);
+                }
+            }
+        }
+        tok_backup(tok, c);
+        p_start = tok->start;
+        p_end = tok->cur;
+        return MAKE_TOKEN(NUMBER);
+    }
+
+  f_string_quote:
+    if (((Py_TOLOWER(*tok->start) == 'f' || Py_TOLOWER(*tok->start) == 'r') && (c == '\'' || c == '"'))) {
+        int quote = c;
+        int quote_size = 1;             /* 1 or 3 */
+
+        /* Nodes of type STRING, especially multi line strings
+           must be handled differently in order to get both
+           the starting line number and the column offset right.
+           (cf. issue 16806) */
+        tok->first_lineno = tok->lineno;
+        tok->multi_line_start = tok->line_start;
+
+        /* Find the quote size and start of string */
+        int after_quote = tok_nextc(tok);
+        if (after_quote == quote) {
+            int after_after_quote = tok_nextc(tok);
+            if (after_after_quote == quote) {
+                quote_size = 3;
+            }
+            else {
+                // TODO: Check this
+                tok_backup(tok, after_after_quote);
+                tok_backup(tok, after_quote);
+            }
+        }
+        if (after_quote != quote) {
+            tok_backup(tok, after_quote);
+        }
+
+
+        p_start = tok->start;
+        p_end = tok->cur;
+        if (tok->tok_mode_stack_index + 1 >= MAXFSTRINGLEVEL) {
+            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "too many nested f-strings"));
+        }
+        tokenizer_mode *the_current_tok = TOK_NEXT_MODE(tok);
+        the_current_tok->kind = TOK_FSTRING_MODE;
+        the_current_tok->f_string_quote = quote;
+        the_current_tok->f_string_quote_size = quote_size;
+        the_current_tok->f_string_start = tok->start;
+        the_current_tok->f_string_multi_line_start = tok->line_start;
+        the_current_tok->f_string_line_start = tok->lineno;
+        the_current_tok->f_string_start_offset = -1;
+        the_current_tok->f_string_multi_line_start_offset = -1;
+        the_current_tok->last_expr_buffer = NULL;
+        the_current_tok->last_expr_size = 0;
+        the_current_tok->last_expr_end = -1;
+        the_current_tok->f_string_debug = 0;
+
+        switch (*tok->start) {
+            case 'F':
+            case 'f':
+                the_current_tok->f_string_raw = Py_TOLOWER(*(tok->start + 1)) == 'r';
+                break;
+            case 'R':
+            case 'r':
+                the_current_tok->f_string_raw = 1;
+                break;
+            default:
+                Py_UNREACHABLE();
+        }
+
+        the_current_tok->curly_bracket_depth = 0;
+        the_current_tok->curly_bracket_expr_start_depth = -1;
+        return MAKE_TOKEN(FSTRING_START);
+    }
+
+  letter_quote:
+    /* String */
+    if (c == '\'' || c == '"') {
+        int quote = c;
+        int quote_size = 1;             /* 1 or 3 */
+        int end_quote_size = 0;
+
+        /* Nodes of type STRING, especially multi line strings
+           must be handled differently in order to get both
+           the starting line number and the column offset right.
+           (cf. issue 16806) */
+        tok->first_lineno = tok->lineno;
+        tok->multi_line_start = tok->line_start;
+
+        /* Find the quote size and start of string */
+        c = tok_nextc(tok);
+        if (c == quote) {
+            c = tok_nextc(tok);
+            if (c == quote) {
+                quote_size = 3;
+            }
+            else {
+                end_quote_size = 1;     /* empty string found */
+            }
+        }
+        if (c != quote) {
+            tok_backup(tok, c);
+        }
+
+        /* Get rest of string */
+        while (end_quote_size != quote_size) {
+            c = tok_nextc(tok);
+            if (tok->done == E_ERROR) {
+                return MAKE_TOKEN(ERRORTOKEN);
+            }
+            if (tok->done == E_DECODE) {
+                break;
+            }
+            if (c == EOF || (quote_size == 1 && c == '\n')) {
+                assert(tok->multi_line_start != NULL);
+                // shift the tok_state's location into
+                // the start of string, and report the error
+                // from the initial quote character
+                tok->cur = (char *)tok->start;
+                tok->cur++;
+                tok->line_start = tok->multi_line_start;
+                int start = tok->lineno;
+                tok->lineno = tok->first_lineno;
+
+                if (INSIDE_FSTRING(tok)) {
+                    /* When we are in an f-string, before raising the
+                     * unterminated string literal error, check whether
+                     * does the initial quote matches with f-strings quotes
+                     * and if it is, then this must be a missing '}' token
+                     * so raise the proper error */
+                    tokenizer_mode *the_current_tok = TOK_GET_MODE(tok);
+                    if (the_current_tok->f_string_quote == quote &&
+                        the_current_tok->f_string_quote_size == quote_size) {
+                        return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "f-string: expecting '}'", start));
+                    }
+                }
+
+                if (quote_size == 3) {
+                    _PyTokenizer_syntaxerror(tok, "unterminated triple-quoted string literal"
+                                     " (detected at line %d)", start);
+                    if (c != '\n') {
+                        tok->done = E_EOFS;
+                    }
+                    return MAKE_TOKEN(ERRORTOKEN);
+                }
+                else {
+                    _PyTokenizer_syntaxerror(tok, "unterminated string literal (detected at"
+                                     " line %d)", start);
+                    if (c != '\n') {
+                        tok->done = E_EOLS;
+                    }
+                    return MAKE_TOKEN(ERRORTOKEN);
+                }
+            }
+            if (c == quote) {
+                end_quote_size += 1;
+            }
+            else {
+                end_quote_size = 0;
+                if (c == '\\') {
+                    c = tok_nextc(tok);  /* skip escaped char */
+                    if (c == '\r') {
+                        c = tok_nextc(tok);
+                    }
+                }
+            }
+        }
+
+        p_start = tok->start;
+        p_end = tok->cur;
+        return MAKE_TOKEN(STRING);
+    }
+
+    /* Line continuation */
+    if (c == '\\') {
+        if ((c = tok_continuation_line(tok)) == -1) {
+            return MAKE_TOKEN(ERRORTOKEN);
+        }
+        tok->cont_line = 1;
+        goto again; /* Read next line */
+    }
+
+    /* Punctuation character */
+    int is_punctuation = (c == ':' || c == '}' || c == '!' || c == '{');
+    if (is_punctuation && INSIDE_FSTRING(tok) && INSIDE_FSTRING_EXPR(current_tok)) {
+        /* This code block gets executed before the curly_bracket_depth is incremented
+         * by the `{` case, so for ensuring that we are on the 0th level, we need
+         * to adjust it manually */
+        int cursor = current_tok->curly_bracket_depth - (c != '{');
+        if (cursor == 0 && !_PyLexer_update_fstring_expr(tok, c)) {
+            return MAKE_TOKEN(ENDMARKER);
+        }
+        if (cursor == 0 && c != '{' && set_fstring_expr(tok, token, c)) {
+            return MAKE_TOKEN(ERRORTOKEN);
+        }
+
+        if (c == ':' && cursor == current_tok->curly_bracket_expr_start_depth) {
+            current_tok->kind = TOK_FSTRING_MODE;
+            p_start = tok->start;
+            p_end = tok->cur;
+            return MAKE_TOKEN(_PyToken_OneChar(c));
+        }
+    }
+
+    /* Check for two-character token */
+    {
+        int c2 = tok_nextc(tok);
+        int current_token = _PyToken_TwoChars(c, c2);
+        if (current_token != OP) {
+            int c3 = tok_nextc(tok);
+            int current_token3 = _PyToken_ThreeChars(c, c2, c3);
+            if (current_token3 != OP) {
+                current_token = current_token3;
+            }
+            else {
+                tok_backup(tok, c3);
+            }
+            p_start = tok->start;
+            p_end = tok->cur;
+            return MAKE_TOKEN(current_token);
+        }
+        tok_backup(tok, c2);
+    }
+
+    /* Keep track of parentheses nesting level */
+    switch (c) {
+    case '(':
+    case '[':
+    case '{':
+        if (tok->level >= MAXLEVEL) {
+            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "too many nested parentheses"));
+        }
+        tok->parenstack[tok->level] = c;
+        tok->parenlinenostack[tok->level] = tok->lineno;
+        tok->parencolstack[tok->level] = (int)(tok->start - tok->line_start);
+        tok->level++;
+        if (INSIDE_FSTRING(tok)) {
+            current_tok->curly_bracket_depth++;
+        }
+        break;
+    case ')':
+    case ']':
+    case '}':
+        if (INSIDE_FSTRING(tok) && !current_tok->curly_bracket_depth && c == '}') {
+            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "f-string: single '}' is not allowed"));
+        }
+        if (!tok->tok_extra_tokens && !tok->level) {
+            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "unmatched '%c'", c));
+        }
+        if (tok->level > 0) {
+            tok->level--;
+            int opening = tok->parenstack[tok->level];
+            if (!tok->tok_extra_tokens && !((opening == '(' && c == ')') ||
+                                            (opening == '[' && c == ']') ||
+                                            (opening == '{' && c == '}'))) {
+                /* If the opening bracket belongs to an f-string's expression
+                part (e.g. f"{)}") and the closing bracket is an arbitrary
+                nested expression, then instead of matching a different
+                syntactical construct with it; we'll throw an unmatched
+                parentheses error. */
+                if (INSIDE_FSTRING(tok) && opening == '{') {
+                    assert(current_tok->curly_bracket_depth >= 0);
+                    int previous_bracket = current_tok->curly_bracket_depth - 1;
+                    if (previous_bracket == current_tok->curly_bracket_expr_start_depth) {
+                        return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "f-string: unmatched '%c'", c));
+                    }
+                }
+                if (tok->parenlinenostack[tok->level] != tok->lineno) {
+                    return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
+                            "closing parenthesis '%c' does not match "
+                            "opening parenthesis '%c' on line %d",
+                            c, opening, tok->parenlinenostack[tok->level]));
+                }
+                else {
+                    return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
+                            "closing parenthesis '%c' does not match "
+                            "opening parenthesis '%c'",
+                            c, opening));
+                }
+            }
+        }
+
+        if (INSIDE_FSTRING(tok)) {
+            current_tok->curly_bracket_depth--;
+            if (c == '}' && current_tok->curly_bracket_depth == current_tok->curly_bracket_expr_start_depth) {
+                current_tok->curly_bracket_expr_start_depth--;
+                current_tok->kind = TOK_FSTRING_MODE;
+                current_tok->f_string_debug = 0;
+            }
+        }
+        break;
+    default:
+        break;
+    }
+
+    if (!Py_UNICODE_ISPRINTABLE(c)) {
+        return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid non-printable character U+%04X", c));
+    }
+
+    if( c == '=' && INSIDE_FSTRING_EXPR(current_tok)) {
+        current_tok->f_string_debug = 1;
+    }
+
+    /* Punctuation character */
+    p_start = tok->start;
+    p_end = tok->cur;
+    return MAKE_TOKEN(_PyToken_OneChar(c));
+}
+
+static int
+tok_get_fstring_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct token *token)
+{
+    const char *p_start = NULL;
+    const char *p_end = NULL;
+    int end_quote_size = 0;
+    int unicode_escape = 0;
+
+    tok->start = tok->cur;
+    tok->first_lineno = tok->lineno;
+    tok->starting_col_offset = tok->col_offset;
+
+    // If we start with a bracket, we defer to the normal mode as there is nothing for us to tokenize
+    // before it.
+    int start_char = tok_nextc(tok);
+    if (start_char == '{') {
+        int peek1 = tok_nextc(tok);
+        tok_backup(tok, peek1);
+        tok_backup(tok, start_char);
+        if (peek1 != '{') {
+            current_tok->curly_bracket_expr_start_depth++;
+            if (current_tok->curly_bracket_expr_start_depth >= MAX_EXPR_NESTING) {
+                return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "f-string: expressions nested too deeply"));
+            }
+            TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
+            return tok_get_normal_mode(tok, current_tok, token);
+        }
+    }
+    else {
+        tok_backup(tok, start_char);
+    }
+
+    // Check if we are at the end of the string
+    for (int i = 0; i < current_tok->f_string_quote_size; i++) {
+        int quote = tok_nextc(tok);
+        if (quote != current_tok->f_string_quote) {
+            tok_backup(tok, quote);
+            goto f_string_middle;
+        }
+    }
+
+    if (current_tok->last_expr_buffer != NULL) {
+        PyMem_Free(current_tok->last_expr_buffer);
+        current_tok->last_expr_buffer = NULL;
+        current_tok->last_expr_size = 0;
+        current_tok->last_expr_end = -1;
+    }
+
+    p_start = tok->start;
+    p_end = tok->cur;
+    tok->tok_mode_stack_index--;
+    return MAKE_TOKEN(FSTRING_END);
+
+f_string_middle:
+
+    // TODO: This is a bit of a hack, but it works for now. We need to find a better way to handle
+    // this.
+    tok->multi_line_start = tok->line_start;
+    while (end_quote_size != current_tok->f_string_quote_size) {
+        int c = tok_nextc(tok);
+        if (tok->done == E_ERROR) {
+            return MAKE_TOKEN(ERRORTOKEN);
+        }
+        int in_format_spec = (
+                current_tok->last_expr_end != -1
+                &&
+                INSIDE_FSTRING_EXPR(current_tok)
+        );
+
+       if (c == EOF || (current_tok->f_string_quote_size == 1 && c == '\n')) {
+            if (tok->decoding_erred) {
+                return MAKE_TOKEN(ERRORTOKEN);
+            }
+
+            // If we are in a format spec and we found a newline,
+            // it means that the format spec ends here and we should
+            // return to the regular mode.
+            if (in_format_spec && c == '\n') {
+                tok_backup(tok, c);
+                TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
+                p_start = tok->start;
+                p_end = tok->cur;
+                return MAKE_TOKEN(FSTRING_MIDDLE);
+            }
+
+            assert(tok->multi_line_start != NULL);
+            // shift the tok_state's location into
+            // the start of string, and report the error
+            // from the initial quote character
+            tok->cur = (char *)current_tok->f_string_start;
+            tok->cur++;
+            tok->line_start = current_tok->f_string_multi_line_start;
+            int start = tok->lineno;
+
+            tokenizer_mode *the_current_tok = TOK_GET_MODE(tok);
+            tok->lineno = the_current_tok->f_string_line_start;
+
+            if (current_tok->f_string_quote_size == 3) {
+                return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
+                                    "unterminated triple-quoted f-string literal"
+                                    " (detected at line %d)", start));
+            }
+            else {
+                return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
+                                    "unterminated f-string literal (detected at"
+                                    " line %d)", start));
+            }
+        }
+
+        if (c == current_tok->f_string_quote) {
+            end_quote_size += 1;
+            continue;
+        } else {
+            end_quote_size = 0;
+        }
+
+        if (c == '{') {
+            int peek = tok_nextc(tok);
+            if (peek != '{' || in_format_spec) {
+                tok_backup(tok, peek);
+                tok_backup(tok, c);
+                current_tok->curly_bracket_expr_start_depth++;
+                if (current_tok->curly_bracket_expr_start_depth >= MAX_EXPR_NESTING) {
+                    return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "f-string: expressions nested too deeply"));
+                }
+                TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
+                p_start = tok->start;
+                p_end = tok->cur;
+            } else {
+                p_start = tok->start;
+                p_end = tok->cur - 1;
+            }
+            return MAKE_TOKEN(FSTRING_MIDDLE);
+        } else if (c == '}') {
+            if (unicode_escape) {
+                p_start = tok->start;
+                p_end = tok->cur;
+                return MAKE_TOKEN(FSTRING_MIDDLE);
+            }
+            int peek = tok_nextc(tok);
+
+            // The tokenizer can only be in the format spec if we have already completed the expression
+            // scanning (indicated by the end of the expression being set) and we are not at the top level
+            // of the bracket stack (-1 is the top level). Since format specifiers can't legally use double
+            // brackets, we can bypass it here.
+            if (peek == '}' && !in_format_spec) {
+                p_start = tok->start;
+                p_end = tok->cur - 1;
+            } else {
+                tok_backup(tok, peek);
+                tok_backup(tok, c);
+                TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
+                p_start = tok->start;
+                p_end = tok->cur;
+            }
+            return MAKE_TOKEN(FSTRING_MIDDLE);
+        } else if (c == '\\') {
+            int peek = tok_nextc(tok);
+            if (peek == '\r') {
+                peek = tok_nextc(tok);
+            }
+            // Special case when the backslash is right before a curly
+            // brace. We have to restore and return the control back
+            // to the loop for the next iteration.
+            if (peek == '{' || peek == '}') {
+                if (!current_tok->f_string_raw) {
+                    if (_PyTokenizer_warn_invalid_escape_sequence(tok, peek)) {
+                        return MAKE_TOKEN(ERRORTOKEN);
+                    }
+                }
+                tok_backup(tok, peek);
+                continue;
+            }
+
+            if (!current_tok->f_string_raw) {
+                if (peek == 'N') {
+                    /* Handle named unicode escapes (\N{BULLET}) */
+                    peek = tok_nextc(tok);
+                    if (peek == '{') {
+                        unicode_escape = 1;
+                    } else {
+                        tok_backup(tok, peek);
+                    }
+                }
+            } /* else {
+                skip the escaped character
+            }*/
+        }
+    }
+
+    // Backup the f-string quotes to emit a final FSTRING_MIDDLE and
+    // add the quotes to the FSTRING_END in the next tokenizer iteration.
+    for (int i = 0; i < current_tok->f_string_quote_size; i++) {
+        tok_backup(tok, current_tok->f_string_quote);
+    }
+    p_start = tok->start;
+    p_end = tok->cur;
+    return MAKE_TOKEN(FSTRING_MIDDLE);
+}
+
+static int
+tok_get(struct tok_state *tok, struct token *token)
+{
+    tokenizer_mode *current_tok = TOK_GET_MODE(tok);
+    if (current_tok->kind == TOK_REGULAR_MODE) {
+        return tok_get_normal_mode(tok, current_tok, token);
+    } else {
+        return tok_get_fstring_mode(tok, current_tok, token);
+    }
+}
+
+int
+_PyTokenizer_Get(struct tok_state *tok, struct token *token)
+{
+    int result = tok_get(tok, token);
+    if (tok->decoding_erred) {
+        result = ERRORTOKEN;
+        tok->done = E_DECODE;
+    }
+    return result;
+}
diff --git a/Parser/lexer/lexer.h b/Parser/lexer/lexer.h
new file mode 100644 (file)
index 0000000..7f21bf5
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _PY_LEXER_LEXER_H_
+#define _PY_LEXER_LEXER_H_
+
+#include "state.h"
+
+int _PyLexer_update_fstring_expr(struct tok_state *tok, char cur);
+
+int _PyTokenizer_Get(struct tok_state *, struct token *);
+
+#endif
diff --git a/Parser/lexer/state.c b/Parser/lexer/state.c
new file mode 100644 (file)
index 0000000..653ddaf
--- /dev/null
@@ -0,0 +1,149 @@
+#include "Python.h"
+#include "pycore_pystate.h"
+#include "pycore_token.h"
+#include "errcode.h"
+
+#include "state.h"
+
+/* Never change this */
+#define TABSIZE 8
+
+/* Create and initialize a new tok_state structure */
+struct tok_state *
+_PyTokenizer_tok_new(void)
+{
+    struct tok_state *tok = (struct tok_state *)PyMem_Malloc(
+                                            sizeof(struct tok_state));
+    if (tok == NULL)
+        return NULL;
+    tok->buf = tok->cur = tok->inp = NULL;
+    tok->fp_interactive = 0;
+    tok->interactive_src_start = NULL;
+    tok->interactive_src_end = NULL;
+    tok->start = NULL;
+    tok->end = NULL;
+    tok->done = E_OK;
+    tok->fp = NULL;
+    tok->input = NULL;
+    tok->tabsize = TABSIZE;
+    tok->indent = 0;
+    tok->indstack[0] = 0;
+    tok->atbol = 1;
+    tok->pendin = 0;
+    tok->prompt = tok->nextprompt = NULL;
+    tok->lineno = 0;
+    tok->starting_col_offset = -1;
+    tok->col_offset = -1;
+    tok->level = 0;
+    tok->altindstack[0] = 0;
+    tok->decoding_state = STATE_INIT;
+    tok->decoding_erred = 0;
+    tok->enc = NULL;
+    tok->encoding = NULL;
+    tok->cont_line = 0;
+    tok->filename = NULL;
+    tok->decoding_readline = NULL;
+    tok->decoding_buffer = NULL;
+    tok->readline = NULL;
+    tok->type_comments = 0;
+    tok->interactive_underflow = IUNDERFLOW_NORMAL;
+    tok->underflow = NULL;
+    tok->str = NULL;
+    tok->report_warnings = 1;
+    tok->tok_extra_tokens = 0;
+    tok->comment_newline = 0;
+    tok->implicit_newline = 0;
+    tok->tok_mode_stack[0] = (tokenizer_mode){.kind =TOK_REGULAR_MODE, .f_string_quote='\0', .f_string_quote_size = 0, .f_string_debug=0};
+    tok->tok_mode_stack_index = 0;
+#ifdef Py_DEBUG
+    tok->debug = _Py_GetConfig()->parser_debug;
+#endif
+    return tok;
+}
+
+static void
+free_fstring_expressions(struct tok_state *tok)
+{
+    int index;
+    tokenizer_mode *mode;
+
+    for (index = tok->tok_mode_stack_index; index >= 0; --index) {
+        mode = &(tok->tok_mode_stack[index]);
+        if (mode->last_expr_buffer != NULL) {
+            PyMem_Free(mode->last_expr_buffer);
+            mode->last_expr_buffer = NULL;
+            mode->last_expr_size = 0;
+            mode->last_expr_end = -1;
+        }
+    }
+}
+
+/* Free a tok_state structure */
+void
+_PyTokenizer_Free(struct tok_state *tok)
+{
+    if (tok->encoding != NULL) {
+        PyMem_Free(tok->encoding);
+    }
+    Py_XDECREF(tok->decoding_readline);
+    Py_XDECREF(tok->decoding_buffer);
+    Py_XDECREF(tok->readline);
+    Py_XDECREF(tok->filename);
+    if ((tok->readline != NULL || tok->fp != NULL ) && tok->buf != NULL) {
+        PyMem_Free(tok->buf);
+    }
+    if (tok->input) {
+        PyMem_Free(tok->input);
+    }
+    if (tok->interactive_src_start != NULL) {
+        PyMem_Free(tok->interactive_src_start);
+    }
+    free_fstring_expressions(tok);
+    PyMem_Free(tok);
+}
+
+void
+_PyToken_Free(struct token *token) {
+    Py_XDECREF(token->metadata);
+}
+
+void
+_PyToken_Init(struct token *token) {
+    token->metadata = NULL;
+}
+
+int
+_PyLexer_type_comment_token_setup(struct tok_state *tok, struct token *token, int type, int col_offset,
+                         int end_col_offset, const char *start, const char *end)
+{
+    token->level = tok->level;
+    token->lineno = token->end_lineno = tok->lineno;
+    token->col_offset = col_offset;
+    token->end_col_offset = end_col_offset;
+    token->start = start;
+    token->end = end;
+    return type;
+}
+
+int
+_PyLexer_token_setup(struct tok_state *tok, struct token *token, int type, const char *start, const char *end)
+{
+    assert((start == NULL && end == NULL) || (start != NULL && end != NULL));
+    token->level = tok->level;
+    if (ISSTRINGLIT(type)) {
+        token->lineno = tok->first_lineno;
+    }
+    else {
+        token->lineno = tok->lineno;
+    }
+    token->end_lineno = tok->lineno;
+    token->col_offset = token->end_col_offset = -1;
+    token->start = start;
+    token->end = end;
+
+    if (start != NULL && end != NULL) {
+        token->col_offset = tok->starting_col_offset;
+        token->end_col_offset = tok->col_offset;
+    }
+    return type;
+}
similarity index 85%
rename from Parser/tokenizer.h
rename to Parser/lexer/state.h
index 11d69fc5b2e15cc82b570dfb2cfcc82882df513b..61d090d6d2fe213a9f649a9e04eb3042aafcf0aa 100644 (file)
@@ -1,19 +1,15 @@
-#ifndef Py_TOKENIZER_H
-#define Py_TOKENIZER_H
-#ifdef __cplusplus
-extern "C" {
-#endif
+#ifndef _PY_LEXER_H_
+#define _PY_LEXER_H_
 
 #include "object.h"
 
-/* Tokenizer interface */
-
-#include "pycore_token.h" /* For token types */
-
 #define MAXINDENT 100       /* Max indentation level */
 #define MAXLEVEL 200        /* Max parentheses level */
 #define MAXFSTRINGLEVEL 150 /* Max f-string nesting level */
 
+#define INSIDE_FSTRING(tok) (tok->tok_mode_stack_index > 0)
+#define INSIDE_FSTRING_EXPR(tok) (tok->curly_bracket_expr_start_depth >= 0)
+
 enum decoding_state {
     STATE_INIT,
     STATE_SEEK_CODING,
@@ -118,6 +114,8 @@ struct tok_state {
 
     /* How to proceed when asked for a new token in interactive mode */
     enum interactive_underflow_t interactive_underflow;
+    int (*underflow)(struct tok_state *); /* Function to call when buffer is empty and we need to refill it*/
+
     int report_warnings;
     // TODO: Factor this into its own thing
     tokenizer_mode tok_mode_stack[MAXFSTRINGLEVEL];
@@ -130,19 +128,14 @@ struct tok_state {
 #endif
 };
 
-extern struct tok_state *_PyTokenizer_FromString(const char *, int, int);
-extern struct tok_state *_PyTokenizer_FromUTF8(const char *, int, int);
-extern struct tok_state *_PyTokenizer_FromReadline(PyObject*, const char*, int, int);
-extern struct tok_state *_PyTokenizer_FromFile(FILE *, const char*,
-                                              const char *, const char *);
-extern void _PyTokenizer_Free(struct tok_state *);
-extern void _PyToken_Free(struct token *);
-extern void _PyToken_Init(struct token *);
-extern int _PyTokenizer_Get(struct tok_state *, struct token *);
+int _PyLexer_type_comment_token_setup(struct tok_state *tok, struct token *token, int type, int col_offset,
+                         int end_col_offset, const char *start, const char *end);
+int _PyLexer_token_setup(struct tok_state *tok, struct token *token, int type, const char *start, const char *end);
+
+struct tok_state *_PyTokenizer_tok_new(void);
+void _PyTokenizer_Free(struct tok_state *);
+void _PyToken_Free(struct token *);
+void _PyToken_Init(struct token *);
 
-#define tok_dump _Py_tok_dump
 
-#ifdef __cplusplus
-}
 #endif
-#endif /* !Py_TOKENIZER_H */
index 1487ac4ff856c2931ab0053f821f6e8c79d83480..447f56651af4249b8040b339b4313169c2200fc8 100644 (file)
@@ -1,6 +1,5 @@
 #include "Python.h"
 
-#include "tokenizer.h"
 #include "pegen.h"
 
 mod_ty
index bfade3446a57f7b9da55cd551b9b29f100b2ce88..2e0b569c20e827e0fc7279d84a7f07c585d2e3b8 100644 (file)
@@ -3,7 +3,8 @@
 #include "pycore_pystate.h"       // _PyThreadState_GET()
 #include <errcode.h>
 
-#include "tokenizer.h"
+#include "lexer/lexer.h"
+#include "tokenizer/tokenizer.h"
 #include "pegen.h"
 
 // Internal parser functions
index b11b0442fe96fffa016a1c20a79696a9de635edb..2baae5289b5667ce6ec381a7b3fdf442997f8e6e 100644 (file)
@@ -2,7 +2,8 @@
 #include <errcode.h>
 
 #include "pycore_pyerrors.h"      // _PyErr_ProgramDecodedTextObject()
-#include "tokenizer.h"
+#include "lexer/state.h"
+#include "lexer/lexer.h"
 #include "pegen.h"
 
 // TOKENIZER ERRORS
index 72898c38b79bdef77f0c5e16a31a46ab193c66f1..c5f421844e9c52981dd4f21828e89e24ba481944 100644 (file)
@@ -4,7 +4,7 @@
 #include "pycore_bytesobject.h"   // _PyBytes_DecodeEscape()
 #include "pycore_unicodeobject.h" // _PyUnicode_DecodeUnicodeEscapeInternal()
 
-#include "tokenizer.h"
+#include "lexer/state.h"
 #include "pegen.h"
 #include "string_parser.h"
 
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
deleted file mode 100644 (file)
index 5e3816f..0000000
+++ /dev/null
@@ -1,2951 +0,0 @@
-
-/* Tokenizer implementation */
-
-#include "Python.h"
-#include "pycore_call.h"          // _PyObject_CallNoArgs()
-
-#include "tokenizer.h"            // struct tok_state
-#include "errcode.h"              // E_OK
-
-#ifdef HAVE_UNISTD_H
-#  include <unistd.h>             // read()
-#endif
-
-/* Alternate tab spacing */
-#define ALTTABSIZE 1
-
-#define is_potential_identifier_start(c) (\
-              (c >= 'a' && c <= 'z')\
-               || (c >= 'A' && c <= 'Z')\
-               || c == '_'\
-               || (c >= 128))
-
-#define is_potential_identifier_char(c) (\
-              (c >= 'a' && c <= 'z')\
-               || (c >= 'A' && c <= 'Z')\
-               || (c >= '0' && c <= '9')\
-               || c == '_'\
-               || (c >= 128))
-
-
-/* Don't ever change this -- it would break the portability of Python code */
-#define TABSIZE 8
-
-#define MAKE_TOKEN(token_type) token_setup(tok, token, token_type, p_start, p_end)
-#define MAKE_TYPE_COMMENT_TOKEN(token_type, col_offset, end_col_offset) (\
-                type_comment_token_setup(tok, token, token_type, col_offset, end_col_offset, p_start, p_end))
-#define ADVANCE_LINENO() \
-            tok->lineno++; \
-            tok->col_offset = 0;
-
-#define INSIDE_FSTRING(tok) (tok->tok_mode_stack_index > 0)
-#define INSIDE_FSTRING_EXPR(tok) (tok->curly_bracket_expr_start_depth >= 0)
-#ifdef Py_DEBUG
-static inline tokenizer_mode* TOK_GET_MODE(struct tok_state* tok) {
-    assert(tok->tok_mode_stack_index >= 0);
-    assert(tok->tok_mode_stack_index < MAXFSTRINGLEVEL);
-    return &(tok->tok_mode_stack[tok->tok_mode_stack_index]);
-}
-static inline tokenizer_mode* TOK_NEXT_MODE(struct tok_state* tok) {
-    assert(tok->tok_mode_stack_index >= 0);
-    assert(tok->tok_mode_stack_index + 1 < MAXFSTRINGLEVEL);
-    return &(tok->tok_mode_stack[++tok->tok_mode_stack_index]);
-}
-#else
-#define TOK_GET_MODE(tok) (&(tok->tok_mode_stack[tok->tok_mode_stack_index]))
-#define TOK_NEXT_MODE(tok) (&(tok->tok_mode_stack[++tok->tok_mode_stack_index]))
-#endif
-
-/* Forward */
-static struct tok_state *tok_new(void);
-static int tok_nextc(struct tok_state *tok);
-static void tok_backup(struct tok_state *tok, int c);
-static int syntaxerror(struct tok_state *tok, const char *format, ...);
-
-/* Spaces in this constant are treated as "zero or more spaces or tabs" when
-   tokenizing. */
-static const char* type_comment_prefix = "# type: ";
-
-/* Create and initialize a new tok_state structure */
-
-static struct tok_state *
-tok_new(void)
-{
-    struct tok_state *tok = (struct tok_state *)PyMem_Malloc(
-                                            sizeof(struct tok_state));
-    if (tok == NULL)
-        return NULL;
-    tok->buf = tok->cur = tok->inp = NULL;
-    tok->fp_interactive = 0;
-    tok->interactive_src_start = NULL;
-    tok->interactive_src_end = NULL;
-    tok->start = NULL;
-    tok->end = NULL;
-    tok->done = E_OK;
-    tok->fp = NULL;
-    tok->input = NULL;
-    tok->tabsize = TABSIZE;
-    tok->indent = 0;
-    tok->indstack[0] = 0;
-    tok->atbol = 1;
-    tok->pendin = 0;
-    tok->prompt = tok->nextprompt = NULL;
-    tok->lineno = 0;
-    tok->starting_col_offset = -1;
-    tok->col_offset = -1;
-    tok->level = 0;
-    tok->altindstack[0] = 0;
-    tok->decoding_state = STATE_INIT;
-    tok->decoding_erred = 0;
-    tok->enc = NULL;
-    tok->encoding = NULL;
-    tok->cont_line = 0;
-    tok->filename = NULL;
-    tok->decoding_readline = NULL;
-    tok->decoding_buffer = NULL;
-    tok->readline = NULL;
-    tok->type_comments = 0;
-    tok->interactive_underflow = IUNDERFLOW_NORMAL;
-    tok->str = NULL;
-    tok->report_warnings = 1;
-    tok->tok_extra_tokens = 0;
-    tok->comment_newline = 0;
-    tok->implicit_newline = 0;
-    tok->tok_mode_stack[0] = (tokenizer_mode){.kind =TOK_REGULAR_MODE, .f_string_quote='\0', .f_string_quote_size = 0, .f_string_debug=0};
-    tok->tok_mode_stack_index = 0;
-#ifdef Py_DEBUG
-    tok->debug = _Py_GetConfig()->parser_debug;
-#endif
-    return tok;
-}
-
-static char *
-new_string(const char *s, Py_ssize_t len, struct tok_state *tok)
-{
-    char* result = (char *)PyMem_Malloc(len + 1);
-    if (!result) {
-        tok->done = E_NOMEM;
-        return NULL;
-    }
-    memcpy(result, s, len);
-    result[len] = '\0';
-    return result;
-}
-
-static char *
-error_ret(struct tok_state *tok) /* XXX */
-{
-    tok->decoding_erred = 1;
-    if ((tok->fp != NULL || tok->readline != NULL) && tok->buf != NULL) {/* see _PyTokenizer_Free */
-        PyMem_Free(tok->buf);
-    }
-    tok->buf = tok->cur = tok->inp = NULL;
-    tok->start = NULL;
-    tok->end = NULL;
-    tok->done = E_DECODE;
-    return NULL;                /* as if it were EOF */
-}
-
-
-static const char *
-get_normal_name(const char *s)  /* for utf-8 and latin-1 */
-{
-    char buf[13];
-    int i;
-    for (i = 0; i < 12; i++) {
-        int c = s[i];
-        if (c == '\0')
-            break;
-        else if (c == '_')
-            buf[i] = '-';
-        else
-            buf[i] = Py_TOLOWER(c);
-    }
-    buf[i] = '\0';
-    if (strcmp(buf, "utf-8") == 0 ||
-        strncmp(buf, "utf-8-", 6) == 0)
-        return "utf-8";
-    else if (strcmp(buf, "latin-1") == 0 ||
-             strcmp(buf, "iso-8859-1") == 0 ||
-             strcmp(buf, "iso-latin-1") == 0 ||
-             strncmp(buf, "latin-1-", 8) == 0 ||
-             strncmp(buf, "iso-8859-1-", 11) == 0 ||
-             strncmp(buf, "iso-latin-1-", 12) == 0)
-        return "iso-8859-1";
-    else
-        return s;
-}
-
-/* Return the coding spec in S, or NULL if none is found.  */
-
-static int
-get_coding_spec(const char *s, char **spec, Py_ssize_t size, struct tok_state *tok)
-{
-    Py_ssize_t i;
-    *spec = NULL;
-    /* Coding spec must be in a comment, and that comment must be
-     * the only statement on the source code line. */
-    for (i = 0; i < size - 6; i++) {
-        if (s[i] == '#')
-            break;
-        if (s[i] != ' ' && s[i] != '\t' && s[i] != '\014')
-            return 1;
-    }
-    for (; i < size - 6; i++) { /* XXX inefficient search */
-        const char* t = s + i;
-        if (memcmp(t, "coding", 6) == 0) {
-            const char* begin = NULL;
-            t += 6;
-            if (t[0] != ':' && t[0] != '=')
-                continue;
-            do {
-                t++;
-            } while (t[0] == ' ' || t[0] == '\t');
-
-            begin = t;
-            while (Py_ISALNUM(t[0]) ||
-                   t[0] == '-' || t[0] == '_' || t[0] == '.')
-                t++;
-
-            if (begin < t) {
-                char* r = new_string(begin, t - begin, tok);
-                const char* q;
-                if (!r)
-                    return 0;
-                q = get_normal_name(r);
-                if (r != q) {
-                    PyMem_Free(r);
-                    r = new_string(q, strlen(q), tok);
-                    if (!r)
-                        return 0;
-                }
-                *spec = r;
-                break;
-            }
-        }
-    }
-    return 1;
-}
-
-/* Check whether the line contains a coding spec. If it does,
-   invoke the set_readline function for the new encoding.
-   This function receives the tok_state and the new encoding.
-   Return 1 on success, 0 on failure.  */
-
-static int
-check_coding_spec(const char* line, Py_ssize_t size, struct tok_state *tok,
-                  int set_readline(struct tok_state *, const char *))
-{
-    char *cs;
-    if (tok->cont_line) {
-        /* It's a continuation line, so it can't be a coding spec. */
-        tok->decoding_state = STATE_NORMAL;
-        return 1;
-    }
-    if (!get_coding_spec(line, &cs, size, tok)) {
-        return 0;
-    }
-    if (!cs) {
-        Py_ssize_t i;
-        for (i = 0; i < size; i++) {
-            if (line[i] == '#' || line[i] == '\n' || line[i] == '\r')
-                break;
-            if (line[i] != ' ' && line[i] != '\t' && line[i] != '\014') {
-                /* Stop checking coding spec after a line containing
-                 * anything except a comment. */
-                tok->decoding_state = STATE_NORMAL;
-                break;
-            }
-        }
-        return 1;
-    }
-    tok->decoding_state = STATE_NORMAL;
-    if (tok->encoding == NULL) {
-        assert(tok->decoding_readline == NULL);
-        if (strcmp(cs, "utf-8") != 0 && !set_readline(tok, cs)) {
-            error_ret(tok);
-            PyErr_Format(PyExc_SyntaxError, "encoding problem: %s", cs);
-            PyMem_Free(cs);
-            return 0;
-        }
-        tok->encoding = cs;
-    } else {                /* then, compare cs with BOM */
-        if (strcmp(tok->encoding, cs) != 0) {
-            error_ret(tok);
-            PyErr_Format(PyExc_SyntaxError,
-                         "encoding problem: %s with BOM", cs);
-            PyMem_Free(cs);
-            return 0;
-        }
-        PyMem_Free(cs);
-    }
-    return 1;
-}
-
-/* See whether the file starts with a BOM. If it does,
-   invoke the set_readline function with the new encoding.
-   Return 1 on success, 0 on failure.  */
-
-static int
-check_bom(int get_char(struct tok_state *),
-          void unget_char(int, struct tok_state *),
-          int set_readline(struct tok_state *, const char *),
-          struct tok_state *tok)
-{
-    int ch1, ch2, ch3;
-    ch1 = get_char(tok);
-    tok->decoding_state = STATE_SEEK_CODING;
-    if (ch1 == EOF) {
-        return 1;
-    } else if (ch1 == 0xEF) {
-        ch2 = get_char(tok);
-        if (ch2 != 0xBB) {
-            unget_char(ch2, tok);
-            unget_char(ch1, tok);
-            return 1;
-        }
-        ch3 = get_char(tok);
-        if (ch3 != 0xBF) {
-            unget_char(ch3, tok);
-            unget_char(ch2, tok);
-            unget_char(ch1, tok);
-            return 1;
-        }
-    } else {
-        unget_char(ch1, tok);
-        return 1;
-    }
-    if (tok->encoding != NULL)
-        PyMem_Free(tok->encoding);
-    tok->encoding = new_string("utf-8", 5, tok);
-    if (!tok->encoding)
-        return 0;
-    /* No need to set_readline: input is already utf-8 */
-    return 1;
-}
-
-static int
-tok_concatenate_interactive_new_line(struct tok_state *tok, const char *line) {
-    assert(tok->fp_interactive);
-
-    if (!line) {
-        return 0;
-    }
-
-    Py_ssize_t current_size = tok->interactive_src_end - tok->interactive_src_start;
-    Py_ssize_t line_size = strlen(line);
-    char last_char = line[line_size > 0 ? line_size - 1 : line_size];
-    if (last_char != '\n') {
-        line_size += 1;
-    }
-    char* new_str = tok->interactive_src_start;
-
-    new_str = PyMem_Realloc(new_str, current_size + line_size + 1);
-    if (!new_str) {
-        if (tok->interactive_src_start) {
-            PyMem_Free(tok->interactive_src_start);
-        }
-        tok->interactive_src_start = NULL;
-        tok->interactive_src_end = NULL;
-        tok->done = E_NOMEM;
-        return -1;
-    }
-    strcpy(new_str + current_size, line);
-    tok->implicit_newline = 0;
-    if (last_char != '\n') {
-        /* Last line does not end in \n, fake one */
-        new_str[current_size + line_size - 1] = '\n';
-        new_str[current_size + line_size] = '\0';
-        tok->implicit_newline = 1;
-    }
-    tok->interactive_src_start = new_str;
-    tok->interactive_src_end = new_str + current_size + line_size;
-    return 0;
-}
-
-/* Traverse and remember all f-string buffers, in order to be able to restore
-   them after reallocating tok->buf */
-static void
-remember_fstring_buffers(struct tok_state *tok)
-{
-    int index;
-    tokenizer_mode *mode;
-
-    for (index = tok->tok_mode_stack_index; index >= 0; --index) {
-        mode = &(tok->tok_mode_stack[index]);
-        mode->f_string_start_offset = mode->f_string_start - tok->buf;
-        mode->f_string_multi_line_start_offset = mode->f_string_multi_line_start - tok->buf;
-    }
-}
-
-/* Traverse and restore all f-string buffers after reallocating tok->buf */
-static void
-restore_fstring_buffers(struct tok_state *tok)
-{
-    int index;
-    tokenizer_mode *mode;
-
-    for (index = tok->tok_mode_stack_index; index >= 0; --index) {
-        mode = &(tok->tok_mode_stack[index]);
-        mode->f_string_start = tok->buf + mode->f_string_start_offset;
-        mode->f_string_multi_line_start = tok->buf + mode->f_string_multi_line_start_offset;
-    }
-}
-
-static int
-set_fstring_expr(struct tok_state* tok, struct token *token, char c) {
-    assert(token != NULL);
-    assert(c == '}' || c == ':' || c == '!');
-    tokenizer_mode *tok_mode = TOK_GET_MODE(tok);
-
-    if (!tok_mode->f_string_debug || token->metadata) {
-        return 0;
-    }
-
-    PyObject *res = PyUnicode_DecodeUTF8(
-        tok_mode->last_expr_buffer,
-        tok_mode->last_expr_size - tok_mode->last_expr_end,
-        NULL
-    );
-    if (!res) {
-        return -1;
-    }
-    token->metadata = res;
-    return 0;
-}
-
-static int
-update_fstring_expr(struct tok_state *tok, char cur)
-{
-    assert(tok->cur != NULL);
-
-    Py_ssize_t size = strlen(tok->cur);
-    tokenizer_mode *tok_mode = TOK_GET_MODE(tok);
-
-    switch (cur) {
-       case 0:
-            if (!tok_mode->last_expr_buffer || tok_mode->last_expr_end >= 0) {
-                return 1;
-            }
-            char *new_buffer = PyMem_Realloc(
-                tok_mode->last_expr_buffer,
-                tok_mode->last_expr_size + size
-            );
-            if (new_buffer == NULL) {
-                PyMem_Free(tok_mode->last_expr_buffer);
-                goto error;
-            }
-            tok_mode->last_expr_buffer = new_buffer;
-            strncpy(tok_mode->last_expr_buffer + tok_mode->last_expr_size, tok->cur, size);
-            tok_mode->last_expr_size += size;
-            break;
-        case '{':
-            if (tok_mode->last_expr_buffer != NULL) {
-                PyMem_Free(tok_mode->last_expr_buffer);
-            }
-            tok_mode->last_expr_buffer = PyMem_Malloc(size);
-            if (tok_mode->last_expr_buffer == NULL) {
-                goto error;
-            }
-            tok_mode->last_expr_size = size;
-            tok_mode->last_expr_end = -1;
-            strncpy(tok_mode->last_expr_buffer, tok->cur, size);
-            break;
-        case '}':
-        case '!':
-        case ':':
-            if (tok_mode->last_expr_end == -1) {
-                tok_mode->last_expr_end = strlen(tok->start);
-            }
-            break;
-        default:
-            Py_UNREACHABLE();
-    }
-    return 1;
-error:
-    tok->done = E_NOMEM;
-    return 0;
-}
-
-static void
-free_fstring_expressions(struct tok_state *tok)
-{
-    int index;
-    tokenizer_mode *mode;
-
-    for (index = tok->tok_mode_stack_index; index >= 0; --index) {
-        mode = &(tok->tok_mode_stack[index]);
-        if (mode->last_expr_buffer != NULL) {
-            PyMem_Free(mode->last_expr_buffer);
-            mode->last_expr_buffer = NULL;
-            mode->last_expr_size = 0;
-            mode->last_expr_end = -1;
-        }
-    }
-}
-
-/* Read a line of text from TOK into S, using the stream in TOK.
-   Return NULL on failure, else S.
-
-   On entry, tok->decoding_buffer will be one of:
-     1) NULL: need to call tok->decoding_readline to get a new line
-     2) PyUnicodeObject *: decoding_feof has called tok->decoding_readline and
-       stored the result in tok->decoding_buffer
-     3) PyByteArrayObject *: previous call to tok_readline_recode did not have enough room
-       (in the s buffer) to copy entire contents of the line read
-       by tok->decoding_readline.  tok->decoding_buffer has the overflow.
-       In this case, tok_readline_recode is called in a loop (with an expanded buffer)
-       until the buffer ends with a '\n' (or until the end of the file is
-       reached): see tok_nextc and its calls to tok_reserve_buf.
-*/
-
-static int
-tok_reserve_buf(struct tok_state *tok, Py_ssize_t size)
-{
-    Py_ssize_t cur = tok->cur - tok->buf;
-    Py_ssize_t oldsize = tok->inp - tok->buf;
-    Py_ssize_t newsize = oldsize + Py_MAX(size, oldsize >> 1);
-    if (newsize > tok->end - tok->buf) {
-        char *newbuf = tok->buf;
-        Py_ssize_t start = tok->start == NULL ? -1 : tok->start - tok->buf;
-        Py_ssize_t line_start = tok->start == NULL ? -1 : tok->line_start - tok->buf;
-        Py_ssize_t multi_line_start = tok->multi_line_start - tok->buf;
-        remember_fstring_buffers(tok);
-        newbuf = (char *)PyMem_Realloc(newbuf, newsize);
-        if (newbuf == NULL) {
-            tok->done = E_NOMEM;
-            return 0;
-        }
-        tok->buf = newbuf;
-        tok->cur = tok->buf + cur;
-        tok->inp = tok->buf + oldsize;
-        tok->end = tok->buf + newsize;
-        tok->start = start < 0 ? NULL : tok->buf + start;
-        tok->line_start = line_start < 0 ? NULL : tok->buf + line_start;
-        tok->multi_line_start = multi_line_start < 0 ? NULL : tok->buf + multi_line_start;
-        restore_fstring_buffers(tok);
-    }
-    return 1;
-}
-
-static inline int
-contains_null_bytes(const char* str, size_t size) {
-    return memchr(str, 0, size) != NULL;
-}
-
-static int
-tok_readline_recode(struct tok_state *tok) {
-    PyObject *line;
-    const  char *buf;
-    Py_ssize_t buflen;
-    line = tok->decoding_buffer;
-    if (line == NULL) {
-        line = PyObject_CallNoArgs(tok->decoding_readline);
-        if (line == NULL) {
-            error_ret(tok);
-            goto error;
-        }
-    }
-    else {
-        tok->decoding_buffer = NULL;
-    }
-    buf = PyUnicode_AsUTF8AndSize(line, &buflen);
-    if (buf == NULL) {
-        error_ret(tok);
-        goto error;
-    }
-    // Make room for the null terminator *and* potentially
-    // an extra newline character that we may need to artificially
-    // add.
-    size_t buffer_size = buflen + 2;
-    if (!tok_reserve_buf(tok, buffer_size)) {
-        goto error;
-    }
-    memcpy(tok->inp, buf, buflen);
-    tok->inp += buflen;
-    *tok->inp = '\0';
-    if (tok->fp_interactive &&
-        tok_concatenate_interactive_new_line(tok, buf) == -1) {
-        goto error;
-    }
-    Py_DECREF(line);
-    return 1;
-error:
-    Py_XDECREF(line);
-    return 0;
-}
-
-/* Set the readline function for TOK to a StreamReader's
-   readline function. The StreamReader is named ENC.
-
-   This function is called from check_bom and check_coding_spec.
-
-   ENC is usually identical to the future value of tok->encoding,
-   except for the (currently unsupported) case of UTF-16.
-
-   Return 1 on success, 0 on failure. */
-
-static int
-fp_setreadl(struct tok_state *tok, const char* enc)
-{
-    PyObject *readline, *open, *stream;
-    int fd;
-    long pos;
-
-    fd = fileno(tok->fp);
-    /* Due to buffering the file offset for fd can be different from the file
-     * position of tok->fp.  If tok->fp was opened in text mode on Windows,
-     * its file position counts CRLF as one char and can't be directly mapped
-     * to the file offset for fd.  Instead we step back one byte and read to
-     * the end of line.*/
-    pos = ftell(tok->fp);
-    if (pos == -1 ||
-        lseek(fd, (off_t)(pos > 0 ? pos - 1 : pos), SEEK_SET) == (off_t)-1) {
-        PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL);
-        return 0;
-    }
-
-    open = _PyImport_GetModuleAttrString("io", "open");
-    if (open == NULL) {
-        return 0;
-    }
-    stream = PyObject_CallFunction(open, "isisOOO",
-                    fd, "r", -1, enc, Py_None, Py_None, Py_False);
-    Py_DECREF(open);
-    if (stream == NULL) {
-        return 0;
-    }
-
-    readline = PyObject_GetAttr(stream, &_Py_ID(readline));
-    Py_DECREF(stream);
-    if (readline == NULL) {
-        return 0;
-    }
-    Py_XSETREF(tok->decoding_readline, readline);
-
-    if (pos > 0) {
-        PyObject *bufobj = _PyObject_CallNoArgs(readline);
-        if (bufobj == NULL) {
-            return 0;
-        }
-        Py_DECREF(bufobj);
-    }
-
-    return 1;
-}
-
-/* Fetch the next byte from TOK. */
-
-static int fp_getc(struct tok_state *tok) {
-    return getc(tok->fp);
-}
-
-/* Unfetch the last byte back into TOK.  */
-
-static void fp_ungetc(int c, struct tok_state *tok) {
-    ungetc(c, tok->fp);
-}
-
-/* Check whether the characters at s start a valid
-   UTF-8 sequence. Return the number of characters forming
-   the sequence if yes, 0 if not.  The special cases match
-   those in stringlib/codecs.h:utf8_decode.
-*/
-static int
-valid_utf8(const unsigned char* s)
-{
-    int expected = 0;
-    int length;
-    if (*s < 0x80) {
-        /* single-byte code */
-        return 1;
-    }
-    else if (*s < 0xE0) {
-        /* \xC2\x80-\xDF\xBF -- 0080-07FF */
-        if (*s < 0xC2) {
-            /* invalid sequence
-               \x80-\xBF -- continuation byte
-               \xC0-\xC1 -- fake 0000-007F */
-            return 0;
-        }
-        expected = 1;
-    }
-    else if (*s < 0xF0) {
-        /* \xE0\xA0\x80-\xEF\xBF\xBF -- 0800-FFFF */
-        if (*s == 0xE0 && *(s + 1) < 0xA0) {
-            /* invalid sequence
-               \xE0\x80\x80-\xE0\x9F\xBF -- fake 0000-0800 */
-            return 0;
-        }
-        else if (*s == 0xED && *(s + 1) >= 0xA0) {
-            /* Decoding UTF-8 sequences in range \xED\xA0\x80-\xED\xBF\xBF
-               will result in surrogates in range D800-DFFF. Surrogates are
-               not valid UTF-8 so they are rejected.
-               See https://www.unicode.org/versions/Unicode5.2.0/ch03.pdf
-               (table 3-7) and http://www.rfc-editor.org/rfc/rfc3629.txt */
-            return 0;
-        }
-        expected = 2;
-    }
-    else if (*s < 0xF5) {
-        /* \xF0\x90\x80\x80-\xF4\x8F\xBF\xBF -- 10000-10FFFF */
-        if (*(s + 1) < 0x90 ? *s == 0xF0 : *s == 0xF4) {
-            /* invalid sequence -- one of:
-               \xF0\x80\x80\x80-\xF0\x8F\xBF\xBF -- fake 0000-FFFF
-               \xF4\x90\x80\x80- -- 110000- overflow */
-            return 0;
-        }
-        expected = 3;
-    }
-    else {
-        /* invalid start byte */
-        return 0;
-    }
-    length = expected + 1;
-    for (; expected; expected--)
-        if (s[expected] < 0x80 || s[expected] >= 0xC0)
-            return 0;
-    return length;
-}
-
-static int
-ensure_utf8(char *line, struct tok_state *tok)
-{
-    int badchar = 0;
-    unsigned char *c;
-    int length;
-    for (c = (unsigned char *)line; *c; c += length) {
-        if (!(length = valid_utf8(c))) {
-            badchar = *c;
-            break;
-        }
-    }
-    if (badchar) {
-        PyErr_Format(PyExc_SyntaxError,
-                     "Non-UTF-8 code starting with '\\x%.2x' "
-                     "in file %U on line %i, "
-                     "but no encoding declared; "
-                     "see https://peps.python.org/pep-0263/ for details",
-                     badchar, tok->filename, tok->lineno);
-        return 0;
-    }
-    return 1;
-}
-
-/* Fetch a byte from TOK, using the string buffer. */
-
-static int
-buf_getc(struct tok_state *tok) {
-    return Py_CHARMASK(*tok->str++);
-}
-
-/* Unfetch a byte from TOK, using the string buffer. */
-
-static void
-buf_ungetc(int c, struct tok_state *tok) {
-    tok->str--;
-    assert(Py_CHARMASK(*tok->str) == c);        /* tok->cur may point to read-only segment */
-}
-
-/* Set the readline function for TOK to ENC. For the string-based
-   tokenizer, this means to just record the encoding. */
-
-static int
-buf_setreadl(struct tok_state *tok, const char* enc) {
-    tok->enc = enc;
-    return 1;
-}
-
-/* Return a UTF-8 encoding Python string object from the
-   C byte string STR, which is encoded with ENC. */
-
-static PyObject *
-translate_into_utf8(const char* str, const char* enc) {
-    PyObject *utf8;
-    PyObject* buf = PyUnicode_Decode(str, strlen(str), enc, NULL);
-    if (buf == NULL)
-        return NULL;
-    utf8 = PyUnicode_AsUTF8String(buf);
-    Py_DECREF(buf);
-    return utf8;
-}
-
-
-static char *
-translate_newlines(const char *s, int exec_input, int preserve_crlf,
-                   struct tok_state *tok) {
-    int skip_next_lf = 0;
-    size_t needed_length = strlen(s) + 2, final_length;
-    char *buf, *current;
-    char c = '\0';
-    buf = PyMem_Malloc(needed_length);
-    if (buf == NULL) {
-        tok->done = E_NOMEM;
-        return NULL;
-    }
-    for (current = buf; *s; s++, current++) {
-        c = *s;
-        if (skip_next_lf) {
-            skip_next_lf = 0;
-            if (c == '\n') {
-                c = *++s;
-                if (!c)
-                    break;
-            }
-        }
-        if (!preserve_crlf && c == '\r') {
-            skip_next_lf = 1;
-            c = '\n';
-        }
-        *current = c;
-    }
-    /* If this is exec input, add a newline to the end of the string if
-       there isn't one already. */
-    if (exec_input && c != '\n' && c != '\0') {
-        *current = '\n';
-        current++;
-    }
-    *current = '\0';
-    final_length = current - buf + 1;
-    if (final_length < needed_length && final_length) {
-        /* should never fail */
-        char* result = PyMem_Realloc(buf, final_length);
-        if (result == NULL) {
-            PyMem_Free(buf);
-        }
-        buf = result;
-    }
-    return buf;
-}
-
-/* Decode a byte string STR for use as the buffer of TOK.
-   Look for encoding declarations inside STR, and record them
-   inside TOK.  */
-
-static char *
-decode_str(const char *input, int single, struct tok_state *tok, int preserve_crlf)
-{
-    PyObject* utf8 = NULL;
-    char *str;
-    const char *s;
-    const char *newl[2] = {NULL, NULL};
-    int lineno = 0;
-    tok->input = str = translate_newlines(input, single, preserve_crlf, tok);
-    if (str == NULL)
-        return NULL;
-    tok->enc = NULL;
-    tok->str = str;
-    if (!check_bom(buf_getc, buf_ungetc, buf_setreadl, tok))
-        return error_ret(tok);
-    str = tok->str;             /* string after BOM if any */
-    assert(str);
-    if (tok->enc != NULL) {
-        utf8 = translate_into_utf8(str, tok->enc);
-        if (utf8 == NULL)
-            return error_ret(tok);
-        str = PyBytes_AsString(utf8);
-    }
-    for (s = str;; s++) {
-        if (*s == '\0') break;
-        else if (*s == '\n') {
-            assert(lineno < 2);
-            newl[lineno] = s;
-            lineno++;
-            if (lineno == 2) break;
-        }
-    }
-    tok->enc = NULL;
-    /* need to check line 1 and 2 separately since check_coding_spec
-       assumes a single line as input */
-    if (newl[0]) {
-        if (!check_coding_spec(str, newl[0] - str, tok, buf_setreadl)) {
-            return NULL;
-        }
-        if (tok->enc == NULL && tok->decoding_state != STATE_NORMAL && newl[1]) {
-            if (!check_coding_spec(newl[0]+1, newl[1] - newl[0],
-                                   tok, buf_setreadl))
-                return NULL;
-        }
-    }
-    if (tok->enc != NULL) {
-        assert(utf8 == NULL);
-        utf8 = translate_into_utf8(str, tok->enc);
-        if (utf8 == NULL)
-            return error_ret(tok);
-        str = PyBytes_AS_STRING(utf8);
-    }
-    assert(tok->decoding_buffer == NULL);
-    tok->decoding_buffer = utf8; /* CAUTION */
-    return str;
-}
-
-/* Set up tokenizer for string */
-
-struct tok_state *
-_PyTokenizer_FromString(const char *str, int exec_input, int preserve_crlf)
-{
-    struct tok_state *tok = tok_new();
-    char *decoded;
-
-    if (tok == NULL)
-        return NULL;
-    decoded = decode_str(str, exec_input, tok, preserve_crlf);
-    if (decoded == NULL) {
-        _PyTokenizer_Free(tok);
-        return NULL;
-    }
-
-    tok->buf = tok->cur = tok->inp = decoded;
-    tok->end = decoded;
-    return tok;
-}
-
-struct tok_state *
-_PyTokenizer_FromReadline(PyObject* readline, const char* enc,
-                          int exec_input, int preserve_crlf)
-{
-    struct tok_state *tok = tok_new();
-    if (tok == NULL)
-        return NULL;
-    if ((tok->buf = (char *)PyMem_Malloc(BUFSIZ)) == NULL) {
-        _PyTokenizer_Free(tok);
-        return NULL;
-    }
-    tok->cur = tok->inp = tok->buf;
-    tok->end = tok->buf + BUFSIZ;
-    tok->fp = NULL;
-    if (enc != NULL) {
-        tok->encoding = new_string(enc, strlen(enc), tok);
-        if (!tok->encoding) {
-            _PyTokenizer_Free(tok);
-            return NULL;
-        }
-    }
-    tok->decoding_state = STATE_NORMAL;
-    Py_INCREF(readline);
-    tok->readline = readline;
-    return tok;
-}
-
-/* Set up tokenizer for UTF-8 string */
-
-struct tok_state *
-_PyTokenizer_FromUTF8(const char *str, int exec_input, int preserve_crlf)
-{
-    struct tok_state *tok = tok_new();
-    char *translated;
-    if (tok == NULL)
-        return NULL;
-    tok->input = translated = translate_newlines(str, exec_input, preserve_crlf, tok);
-    if (translated == NULL) {
-        _PyTokenizer_Free(tok);
-        return NULL;
-    }
-    tok->decoding_state = STATE_NORMAL;
-    tok->enc = NULL;
-    tok->str = translated;
-    tok->encoding = new_string("utf-8", 5, tok);
-    if (!tok->encoding) {
-        _PyTokenizer_Free(tok);
-        return NULL;
-    }
-
-    tok->buf = tok->cur = tok->inp = translated;
-    tok->end = translated;
-    return tok;
-}
-
-/* Set up tokenizer for file */
-
-struct tok_state *
-_PyTokenizer_FromFile(FILE *fp, const char* enc,
-                      const char *ps1, const char *ps2)
-{
-    struct tok_state *tok = tok_new();
-    if (tok == NULL)
-        return NULL;
-    if ((tok->buf = (char *)PyMem_Malloc(BUFSIZ)) == NULL) {
-        _PyTokenizer_Free(tok);
-        return NULL;
-    }
-    tok->cur = tok->inp = tok->buf;
-    tok->end = tok->buf + BUFSIZ;
-    tok->fp = fp;
-    tok->prompt = ps1;
-    tok->nextprompt = ps2;
-    if (enc != NULL) {
-        /* Must copy encoding declaration since it
-           gets copied into the parse tree. */
-        tok->encoding = new_string(enc, strlen(enc), tok);
-        if (!tok->encoding) {
-            _PyTokenizer_Free(tok);
-            return NULL;
-        }
-        tok->decoding_state = STATE_NORMAL;
-    }
-    return tok;
-}
-
-/* Free a tok_state structure */
-
-void
-_PyTokenizer_Free(struct tok_state *tok)
-{
-    if (tok->encoding != NULL) {
-        PyMem_Free(tok->encoding);
-    }
-    Py_XDECREF(tok->decoding_readline);
-    Py_XDECREF(tok->decoding_buffer);
-    Py_XDECREF(tok->readline);
-    Py_XDECREF(tok->filename);
-    if ((tok->readline != NULL || tok->fp != NULL ) && tok->buf != NULL) {
-        PyMem_Free(tok->buf);
-    }
-    if (tok->input) {
-        PyMem_Free(tok->input);
-    }
-    if (tok->interactive_src_start != NULL) {
-        PyMem_Free(tok->interactive_src_start);
-    }
-    free_fstring_expressions(tok);
-    PyMem_Free(tok);
-}
-
-void
-_PyToken_Free(struct token *token) {
-    Py_XDECREF(token->metadata);
-}
-
-void
-_PyToken_Init(struct token *token) {
-    token->metadata = NULL;
-}
-
-static int
-tok_readline_raw(struct tok_state *tok)
-{
-    do {
-        if (!tok_reserve_buf(tok, BUFSIZ)) {
-            return 0;
-        }
-        int n_chars = (int)(tok->end - tok->inp);
-        size_t line_size = 0;
-        char *line = _Py_UniversalNewlineFgetsWithSize(tok->inp, n_chars, tok->fp, NULL, &line_size);
-        if (line == NULL) {
-            return 1;
-        }
-        if (tok->fp_interactive &&
-            tok_concatenate_interactive_new_line(tok, line) == -1) {
-            return 0;
-        }
-        tok->inp += line_size;
-        if (tok->inp == tok->buf) {
-            return 0;
-        }
-    } while (tok->inp[-1] != '\n');
-    return 1;
-}
-
-static int
-tok_readline_string(struct tok_state* tok) {
-    PyObject* line = NULL;
-    PyObject* raw_line = PyObject_CallNoArgs(tok->readline);
-    if (raw_line == NULL) {
-        if (PyErr_ExceptionMatches(PyExc_StopIteration)) {
-            PyErr_Clear();
-            return 1;
-        }
-        error_ret(tok);
-        goto error;
-    }
-    if(tok->encoding != NULL) {
-        if (!PyBytes_Check(raw_line)) {
-            PyErr_Format(PyExc_TypeError, "readline() returned a non-bytes object");
-            error_ret(tok);
-            goto error;
-        }
-        line = PyUnicode_Decode(PyBytes_AS_STRING(raw_line), PyBytes_GET_SIZE(raw_line),
-                                tok->encoding, "replace");
-        Py_CLEAR(raw_line);
-        if (line == NULL) {
-            error_ret(tok);
-            goto error;
-        }
-    } else {
-        if(!PyUnicode_Check(raw_line)) {
-            PyErr_Format(PyExc_TypeError, "readline() returned a non-string object");
-            error_ret(tok);
-            goto error;
-        }
-        line = raw_line;
-        raw_line = NULL;
-    }
-    Py_ssize_t buflen;
-    const char* buf = PyUnicode_AsUTF8AndSize(line, &buflen);
-    if (buf == NULL) {
-        error_ret(tok);
-        goto error;
-    }
-
-    // Make room for the null terminator *and* potentially
-    // an extra newline character that we may need to artificially
-    // add.
-    size_t buffer_size = buflen + 2;
-    if (!tok_reserve_buf(tok, buffer_size)) {
-        goto error;
-    }
-    memcpy(tok->inp, buf, buflen);
-    tok->inp += buflen;
-    *tok->inp = '\0';
-
-    tok->line_start = tok->cur;
-    Py_DECREF(line);
-    return 1;
-error:
-    Py_XDECREF(raw_line);
-    Py_XDECREF(line);
-    return 0;
-}
-
-static int
-tok_underflow_string(struct tok_state *tok) {
-    char *end = strchr(tok->inp, '\n');
-    if (end != NULL) {
-        end++;
-    }
-    else {
-        end = strchr(tok->inp, '\0');
-        if (end == tok->inp) {
-            tok->done = E_EOF;
-            return 0;
-        }
-    }
-    if (tok->start == NULL) {
-        tok->buf = tok->cur;
-    }
-    tok->line_start = tok->cur;
-    ADVANCE_LINENO();
-    tok->inp = end;
-    return 1;
-}
-
-static int
-tok_underflow_interactive(struct tok_state *tok) {
-    if (tok->interactive_underflow == IUNDERFLOW_STOP) {
-        tok->done = E_INTERACT_STOP;
-        return 1;
-    }
-    char *newtok = PyOS_Readline(tok->fp ? tok->fp : stdin, stdout, tok->prompt);
-    if (newtok != NULL) {
-        char *translated = translate_newlines(newtok, 0, 0, tok);
-        PyMem_Free(newtok);
-        if (translated == NULL) {
-            return 0;
-        }
-        newtok = translated;
-    }
-    if (tok->encoding && newtok && *newtok) {
-        /* Recode to UTF-8 */
-        Py_ssize_t buflen;
-        const char* buf;
-        PyObject *u = translate_into_utf8(newtok, tok->encoding);
-        PyMem_Free(newtok);
-        if (u == NULL) {
-            tok->done = E_DECODE;
-            return 0;
-        }
-        buflen = PyBytes_GET_SIZE(u);
-        buf = PyBytes_AS_STRING(u);
-        newtok = PyMem_Malloc(buflen+1);
-        if (newtok == NULL) {
-            Py_DECREF(u);
-            tok->done = E_NOMEM;
-            return 0;
-        }
-        strcpy(newtok, buf);
-        Py_DECREF(u);
-    }
-    if (tok->fp_interactive &&
-        tok_concatenate_interactive_new_line(tok, newtok) == -1) {
-        PyMem_Free(newtok);
-        return 0;
-    }
-    if (tok->nextprompt != NULL) {
-        tok->prompt = tok->nextprompt;
-    }
-    if (newtok == NULL) {
-        tok->done = E_INTR;
-    }
-    else if (*newtok == '\0') {
-        PyMem_Free(newtok);
-        tok->done = E_EOF;
-    }
-    else if (tok->start != NULL) {
-        Py_ssize_t cur_multi_line_start = tok->multi_line_start - tok->buf;
-        remember_fstring_buffers(tok);
-        size_t size = strlen(newtok);
-        ADVANCE_LINENO();
-        if (!tok_reserve_buf(tok, size + 1)) {
-            PyMem_Free(tok->buf);
-            tok->buf = NULL;
-            PyMem_Free(newtok);
-            return 0;
-        }
-        memcpy(tok->cur, newtok, size + 1);
-        PyMem_Free(newtok);
-        tok->inp += size;
-        tok->multi_line_start = tok->buf + cur_multi_line_start;
-        restore_fstring_buffers(tok);
-    }
-    else {
-        remember_fstring_buffers(tok);
-        ADVANCE_LINENO();
-        PyMem_Free(tok->buf);
-        tok->buf = newtok;
-        tok->cur = tok->buf;
-        tok->line_start = tok->buf;
-        tok->inp = strchr(tok->buf, '\0');
-        tok->end = tok->inp + 1;
-        restore_fstring_buffers(tok);
-    }
-    if (tok->done != E_OK) {
-        if (tok->prompt != NULL) {
-            PySys_WriteStderr("\n");
-        }
-        return 0;
-    }
-
-    if (tok->tok_mode_stack_index && !update_fstring_expr(tok, 0)) {
-        return 0;
-    }
-    return 1;
-}
-
-static int
-tok_underflow_file(struct tok_state *tok) {
-    if (tok->start == NULL && !INSIDE_FSTRING(tok)) {
-        tok->cur = tok->inp = tok->buf;
-    }
-    if (tok->decoding_state == STATE_INIT) {
-        /* We have not yet determined the encoding.
-           If an encoding is found, use the file-pointer
-           reader functions from now on. */
-        if (!check_bom(fp_getc, fp_ungetc, fp_setreadl, tok)) {
-            error_ret(tok);
-            return 0;
-        }
-        assert(tok->decoding_state != STATE_INIT);
-    }
-    /* Read until '\n' or EOF */
-    if (tok->decoding_readline != NULL) {
-        /* We already have a codec associated with this input. */
-        if (!tok_readline_recode(tok)) {
-            return 0;
-        }
-    }
-    else {
-        /* We want a 'raw' read. */
-        if (!tok_readline_raw(tok)) {
-            return 0;
-        }
-    }
-    if (tok->inp == tok->cur) {
-        tok->done = E_EOF;
-        return 0;
-    }
-    tok->implicit_newline = 0;
-    if (tok->inp[-1] != '\n') {
-        assert(tok->inp + 1 < tok->end);
-        /* Last line does not end in \n, fake one */
-        *tok->inp++ = '\n';
-        *tok->inp = '\0';
-        tok->implicit_newline = 1;
-    }
-
-    if (tok->tok_mode_stack_index && !update_fstring_expr(tok, 0)) {
-        return 0;
-    }
-
-    ADVANCE_LINENO();
-    if (tok->decoding_state != STATE_NORMAL) {
-        if (tok->lineno > 2) {
-            tok->decoding_state = STATE_NORMAL;
-        }
-        else if (!check_coding_spec(tok->cur, strlen(tok->cur),
-                                    tok, fp_setreadl))
-        {
-            return 0;
-        }
-    }
-    /* The default encoding is UTF-8, so make sure we don't have any
-       non-UTF-8 sequences in it. */
-    if (!tok->encoding && !ensure_utf8(tok->cur, tok)) {
-        error_ret(tok);
-        return 0;
-    }
-    assert(tok->done == E_OK);
-    return tok->done == E_OK;
-}
-
-static int
-tok_underflow_readline(struct tok_state* tok) {
-    assert(tok->decoding_state == STATE_NORMAL);
-    assert(tok->fp == NULL && tok->input == NULL && tok->decoding_readline == NULL);
-    if (tok->start == NULL && !INSIDE_FSTRING(tok)) {
-        tok->cur = tok->inp = tok->buf;
-    }
-    if (!tok_readline_string(tok)) {
-        return 0;
-    }
-    if (tok->inp == tok->cur) {
-        tok->done = E_EOF;
-        return 0;
-    }
-    tok->implicit_newline = 0;
-    if (tok->inp[-1] != '\n') {
-        assert(tok->inp + 1 < tok->end);
-        /* Last line does not end in \n, fake one */
-        *tok->inp++ = '\n';
-        *tok->inp = '\0';
-        tok->implicit_newline = 1;
-    }
-
-    if (tok->tok_mode_stack_index && !update_fstring_expr(tok, 0)) {
-        return 0;
-    }
-
-    ADVANCE_LINENO();
-    /* The default encoding is UTF-8, so make sure we don't have any
-       non-UTF-8 sequences in it. */
-    if (!tok->encoding && !ensure_utf8(tok->cur, tok)) {
-        error_ret(tok);
-        return 0;
-    }
-    assert(tok->done == E_OK);
-    return tok->done == E_OK;
-}
-
-#if defined(Py_DEBUG)
-static void
-print_escape(FILE *f, const char *s, Py_ssize_t size)
-{
-    if (s == NULL) {
-        fputs("NULL", f);
-        return;
-    }
-    putc('"', f);
-    while (size-- > 0) {
-        unsigned char c = *s++;
-        switch (c) {
-            case '\n': fputs("\\n", f); break;
-            case '\r': fputs("\\r", f); break;
-            case '\t': fputs("\\t", f); break;
-            case '\f': fputs("\\f", f); break;
-            case '\'': fputs("\\'", f); break;
-            case '"': fputs("\\\"", f); break;
-            default:
-                if (0x20 <= c && c <= 0x7f)
-                    putc(c, f);
-                else
-                    fprintf(f, "\\x%02x", c);
-        }
-    }
-    putc('"', f);
-}
-#endif
-
-/* Get next char, updating state; error code goes into tok->done */
-
-static int
-tok_nextc(struct tok_state *tok)
-{
-    int rc;
-    for (;;) {
-        if (tok->cur != tok->inp) {
-            tok->col_offset++;
-            return Py_CHARMASK(*tok->cur++); /* Fast path */
-        }
-        if (tok->done != E_OK) {
-            return EOF;
-        }
-        if (tok->readline) {
-            rc = tok_underflow_readline(tok);
-        }
-        else if (tok->fp == NULL) {
-            rc = tok_underflow_string(tok);
-        }
-        else if (tok->prompt != NULL) {
-            rc = tok_underflow_interactive(tok);
-        }
-        else {
-            rc = tok_underflow_file(tok);
-        }
-#if defined(Py_DEBUG)
-        if (tok->debug) {
-            fprintf(stderr, "line[%d] = ", tok->lineno);
-            print_escape(stderr, tok->cur, tok->inp - tok->cur);
-            fprintf(stderr, "  tok->done = %d\n", tok->done);
-        }
-#endif
-        if (!rc) {
-            tok->cur = tok->inp;
-            return EOF;
-        }
-        tok->line_start = tok->cur;
-
-        if (contains_null_bytes(tok->line_start, tok->inp - tok->line_start)) {
-            syntaxerror(tok, "source code cannot contain null bytes");
-            tok->cur = tok->inp;
-            return EOF;
-        }
-    }
-    Py_UNREACHABLE();
-}
-
-/* Back-up one character */
-
-static void
-tok_backup(struct tok_state *tok, int c)
-{
-    if (c != EOF) {
-        if (--tok->cur < tok->buf) {
-            Py_FatalError("tokenizer beginning of buffer");
-        }
-        if ((int)(unsigned char)*tok->cur != Py_CHARMASK(c)) {
-            Py_FatalError("tok_backup: wrong character");
-        }
-        tok->col_offset--;
-    }
-}
-
-static int
-_syntaxerror_range(struct tok_state *tok, const char *format,
-                   int col_offset, int end_col_offset,
-                   va_list vargs)
-{
-    // In release builds, we don't want to overwrite a previous error, but in debug builds we
-    // want to fail if we are not doing it so we can fix it.
-    assert(tok->done != E_ERROR);
-    if (tok->done == E_ERROR) {
-        return ERRORTOKEN;
-    }
-    PyObject *errmsg, *errtext, *args;
-    errmsg = PyUnicode_FromFormatV(format, vargs);
-    if (!errmsg) {
-        goto error;
-    }
-
-    errtext = PyUnicode_DecodeUTF8(tok->line_start, tok->cur - tok->line_start,
-                                   "replace");
-    if (!errtext) {
-        goto error;
-    }
-
-    if (col_offset == -1) {
-        col_offset = (int)PyUnicode_GET_LENGTH(errtext);
-    }
-    if (end_col_offset == -1) {
-        end_col_offset = col_offset;
-    }
-
-    Py_ssize_t line_len = strcspn(tok->line_start, "\n");
-    if (line_len != tok->cur - tok->line_start) {
-        Py_DECREF(errtext);
-        errtext = PyUnicode_DecodeUTF8(tok->line_start, line_len,
-                                       "replace");
-    }
-    if (!errtext) {
-        goto error;
-    }
-
-    args = Py_BuildValue("(O(OiiNii))", errmsg, tok->filename, tok->lineno,
-                         col_offset, errtext, tok->lineno, end_col_offset);
-    if (args) {
-        PyErr_SetObject(PyExc_SyntaxError, args);
-        Py_DECREF(args);
-    }
-
-error:
-    Py_XDECREF(errmsg);
-    tok->done = E_ERROR;
-    return ERRORTOKEN;
-}
-
-static int
-syntaxerror(struct tok_state *tok, const char *format, ...)
-{
-    // This errors are cleaned on startup. Todo: Fix it.
-    va_list vargs;
-    va_start(vargs, format);
-    int ret = _syntaxerror_range(tok, format, -1, -1, vargs);
-    va_end(vargs);
-    return ret;
-}
-
-static int
-syntaxerror_known_range(struct tok_state *tok,
-                        int col_offset, int end_col_offset,
-                        const char *format, ...)
-{
-    va_list vargs;
-    va_start(vargs, format);
-    int ret = _syntaxerror_range(tok, format, col_offset, end_col_offset, vargs);
-    va_end(vargs);
-    return ret;
-}
-
-static int
-indenterror(struct tok_state *tok)
-{
-    tok->done = E_TABSPACE;
-    tok->cur = tok->inp;
-    return ERRORTOKEN;
-}
-
-static int
-parser_warn(struct tok_state *tok, PyObject *category, const char *format, ...)
-{
-    if (!tok->report_warnings) {
-        return 0;
-    }
-
-    PyObject *errmsg;
-    va_list vargs;
-    va_start(vargs, format);
-    errmsg = PyUnicode_FromFormatV(format, vargs);
-    va_end(vargs);
-    if (!errmsg) {
-        goto error;
-    }
-
-    if (PyErr_WarnExplicitObject(category, errmsg, tok->filename,
-                                 tok->lineno, NULL, NULL) < 0) {
-        if (PyErr_ExceptionMatches(category)) {
-            /* Replace the DeprecationWarning exception with a SyntaxError
-               to get a more accurate error report */
-            PyErr_Clear();
-            syntaxerror(tok, "%U", errmsg);
-        }
-        goto error;
-    }
-    Py_DECREF(errmsg);
-    return 0;
-
-error:
-    Py_XDECREF(errmsg);
-    tok->done = E_ERROR;
-    return -1;
-}
-
-static int
-warn_invalid_escape_sequence(struct tok_state *tok, int first_invalid_escape_char)
-{
-    if (!tok->report_warnings) {
-        return 0;
-    }
-
-    PyObject *msg = PyUnicode_FromFormat(
-        "invalid escape sequence '\\%c'",
-        (char) first_invalid_escape_char
-    );
-
-    if (msg == NULL) {
-        return -1;
-    }
-
-    if (PyErr_WarnExplicitObject(PyExc_SyntaxWarning, msg, tok->filename,
-                                 tok->lineno, NULL, NULL) < 0) {
-        Py_DECREF(msg);
-
-        if (PyErr_ExceptionMatches(PyExc_SyntaxWarning)) {
-            /* Replace the SyntaxWarning exception with a SyntaxError
-               to get a more accurate error report */
-            PyErr_Clear();
-            return syntaxerror(tok, "invalid escape sequence '\\%c'", (char) first_invalid_escape_char);
-        }
-
-        return -1;
-    }
-
-    Py_DECREF(msg);
-    return 0;
-}
-
-static int
-lookahead(struct tok_state *tok, const char *test)
-{
-    const char *s = test;
-    int res = 0;
-    while (1) {
-        int c = tok_nextc(tok);
-        if (*s == 0) {
-            res = !is_potential_identifier_char(c);
-        }
-        else if (c == *s) {
-            s++;
-            continue;
-        }
-
-        tok_backup(tok, c);
-        while (s != test) {
-            tok_backup(tok, *--s);
-        }
-        return res;
-    }
-}
-
-static int
-verify_end_of_number(struct tok_state *tok, int c, const char *kind) {
-    if (tok->tok_extra_tokens) {
-        // When we are parsing extra tokens, we don't want to emit warnings
-        // about invalid literals, because we want to be a bit more liberal.
-        return 1;
-    }
-    /* Emit a deprecation warning only if the numeric literal is immediately
-     * followed by one of keywords which can occur after a numeric literal
-     * in valid code: "and", "else", "for", "if", "in", "is" and "or".
-     * It allows to gradually deprecate existing valid code without adding
-     * warning before error in most cases of invalid numeric literal (which
-     * would be confusing and break existing tests).
-     * Raise a syntax error with slightly better message than plain
-     * "invalid syntax" if the numeric literal is immediately followed by
-     * other keyword or identifier.
-     */
-    int r = 0;
-    if (c == 'a') {
-        r = lookahead(tok, "nd");
-    }
-    else if (c == 'e') {
-        r = lookahead(tok, "lse");
-    }
-    else if (c == 'f') {
-        r = lookahead(tok, "or");
-    }
-    else if (c == 'i') {
-        int c2 = tok_nextc(tok);
-        if (c2 == 'f' || c2 == 'n' || c2 == 's') {
-            r = 1;
-        }
-        tok_backup(tok, c2);
-    }
-    else if (c == 'o') {
-        r = lookahead(tok, "r");
-    }
-    else if (c == 'n') {
-        r = lookahead(tok, "ot");
-    }
-    if (r) {
-        tok_backup(tok, c);
-        if (parser_warn(tok, PyExc_SyntaxWarning,
-                "invalid %s literal", kind))
-        {
-            return 0;
-        }
-        tok_nextc(tok);
-    }
-    else /* In future releases, only error will remain. */
-    if (c < 128 && is_potential_identifier_char(c)) {
-        tok_backup(tok, c);
-        syntaxerror(tok, "invalid %s literal", kind);
-        return 0;
-    }
-    return 1;
-}
-
-/* Verify that the identifier follows PEP 3131.
-   All identifier strings are guaranteed to be "ready" unicode objects.
- */
-static int
-verify_identifier(struct tok_state *tok)
-{
-    if (tok->tok_extra_tokens) {
-        return 1;
-    }
-    PyObject *s;
-    if (tok->decoding_erred)
-        return 0;
-    s = PyUnicode_DecodeUTF8(tok->start, tok->cur - tok->start, NULL);
-    if (s == NULL) {
-        if (PyErr_ExceptionMatches(PyExc_UnicodeDecodeError)) {
-            tok->done = E_DECODE;
-        }
-        else {
-            tok->done = E_ERROR;
-        }
-        return 0;
-    }
-    Py_ssize_t invalid = _PyUnicode_ScanIdentifier(s);
-    if (invalid < 0) {
-        Py_DECREF(s);
-        tok->done = E_ERROR;
-        return 0;
-    }
-    assert(PyUnicode_GET_LENGTH(s) > 0);
-    if (invalid < PyUnicode_GET_LENGTH(s)) {
-        Py_UCS4 ch = PyUnicode_READ_CHAR(s, invalid);
-        if (invalid + 1 < PyUnicode_GET_LENGTH(s)) {
-            /* Determine the offset in UTF-8 encoded input */
-            Py_SETREF(s, PyUnicode_Substring(s, 0, invalid + 1));
-            if (s != NULL) {
-                Py_SETREF(s, PyUnicode_AsUTF8String(s));
-            }
-            if (s == NULL) {
-                tok->done = E_ERROR;
-                return 0;
-            }
-            tok->cur = (char *)tok->start + PyBytes_GET_SIZE(s);
-        }
-        Py_DECREF(s);
-        if (Py_UNICODE_ISPRINTABLE(ch)) {
-            syntaxerror(tok, "invalid character '%c' (U+%04X)", ch, ch);
-        }
-        else {
-            syntaxerror(tok, "invalid non-printable character U+%04X", ch);
-        }
-        return 0;
-    }
-    Py_DECREF(s);
-    return 1;
-}
-
-static int
-tok_decimal_tail(struct tok_state *tok)
-{
-    int c;
-
-    while (1) {
-        do {
-            c = tok_nextc(tok);
-        } while (Py_ISDIGIT(c));
-        if (c != '_') {
-            break;
-        }
-        c = tok_nextc(tok);
-        if (!Py_ISDIGIT(c)) {
-            tok_backup(tok, c);
-            syntaxerror(tok, "invalid decimal literal");
-            return 0;
-        }
-    }
-    return c;
-}
-
-
-static inline int
-tok_continuation_line(struct tok_state *tok) {
-    int c = tok_nextc(tok);
-    if (c == '\r') {
-        c = tok_nextc(tok);
-    }
-    if (c != '\n') {
-        tok->done = E_LINECONT;
-        return -1;
-    }
-    c = tok_nextc(tok);
-    if (c == EOF) {
-        tok->done = E_EOF;
-        tok->cur = tok->inp;
-        return -1;
-    } else {
-        tok_backup(tok, c);
-    }
-    return c;
-}
-
-static int
-type_comment_token_setup(struct tok_state *tok, struct token *token, int type, int col_offset,
-                         int end_col_offset, const char *start, const char *end)
-{
-    token->level = tok->level;
-    token->lineno = token->end_lineno = tok->lineno;
-    token->col_offset = col_offset;
-    token->end_col_offset = end_col_offset;
-    token->start = start;
-    token->end = end;
-    return type;
-}
-
-static int
-token_setup(struct tok_state *tok, struct token *token, int type, const char *start, const char *end)
-{
-    assert((start == NULL && end == NULL) || (start != NULL && end != NULL));
-    token->level = tok->level;
-    if (ISSTRINGLIT(type)) {
-        token->lineno = tok->first_lineno;
-    }
-    else {
-        token->lineno = tok->lineno;
-    }
-    token->end_lineno = tok->lineno;
-    token->col_offset = token->end_col_offset = -1;
-    token->start = start;
-    token->end = end;
-
-    if (start != NULL && end != NULL) {
-        token->col_offset = tok->starting_col_offset;
-        token->end_col_offset = tok->col_offset;
-    }
-    return type;
-}
-
-
-static int
-tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct token *token)
-{
-    int c;
-    int blankline, nonascii;
-
-    const char *p_start = NULL;
-    const char *p_end = NULL;
-  nextline:
-    tok->start = NULL;
-    tok->starting_col_offset = -1;
-    blankline = 0;
-
-
-    /* Get indentation level */
-    if (tok->atbol) {
-        int col = 0;
-        int altcol = 0;
-        tok->atbol = 0;
-        int cont_line_col = 0;
-        for (;;) {
-            c = tok_nextc(tok);
-            if (c == ' ') {
-                col++, altcol++;
-            }
-            else if (c == '\t') {
-                col = (col / tok->tabsize + 1) * tok->tabsize;
-                altcol = (altcol / ALTTABSIZE + 1) * ALTTABSIZE;
-            }
-            else if (c == '\014')  {/* Control-L (formfeed) */
-                col = altcol = 0; /* For Emacs users */
-            }
-            else if (c == '\\') {
-                // Indentation cannot be split over multiple physical lines
-                // using backslashes. This means that if we found a backslash
-                // preceded by whitespace, **the first one we find** determines
-                // the level of indentation of whatever comes next.
-                cont_line_col = cont_line_col ? cont_line_col : col;
-                if ((c = tok_continuation_line(tok)) == -1) {
-                    return MAKE_TOKEN(ERRORTOKEN);
-                }
-            }
-            else {
-                break;
-            }
-        }
-        tok_backup(tok, c);
-        if (c == '#' || c == '\n' || c == '\r') {
-            /* Lines with only whitespace and/or comments
-               shouldn't affect the indentation and are
-               not passed to the parser as NEWLINE tokens,
-               except *totally* empty lines in interactive
-               mode, which signal the end of a command group. */
-            if (col == 0 && c == '\n' && tok->prompt != NULL) {
-                blankline = 0; /* Let it through */
-            }
-            else if (tok->prompt != NULL && tok->lineno == 1) {
-                /* In interactive mode, if the first line contains
-                   only spaces and/or a comment, let it through. */
-                blankline = 0;
-                col = altcol = 0;
-            }
-            else {
-                blankline = 1; /* Ignore completely */
-            }
-            /* We can't jump back right here since we still
-               may need to skip to the end of a comment */
-        }
-        if (!blankline && tok->level == 0) {
-            col = cont_line_col ? cont_line_col : col;
-            altcol = cont_line_col ? cont_line_col : altcol;
-            if (col == tok->indstack[tok->indent]) {
-                /* No change */
-                if (altcol != tok->altindstack[tok->indent]) {
-                    return MAKE_TOKEN(indenterror(tok));
-                }
-            }
-            else if (col > tok->indstack[tok->indent]) {
-                /* Indent -- always one */
-                if (tok->indent+1 >= MAXINDENT) {
-                    tok->done = E_TOODEEP;
-                    tok->cur = tok->inp;
-                    return MAKE_TOKEN(ERRORTOKEN);
-                }
-                if (altcol <= tok->altindstack[tok->indent]) {
-                    return MAKE_TOKEN(indenterror(tok));
-                }
-                tok->pendin++;
-                tok->indstack[++tok->indent] = col;
-                tok->altindstack[tok->indent] = altcol;
-            }
-            else /* col < tok->indstack[tok->indent] */ {
-                /* Dedent -- any number, must be consistent */
-                while (tok->indent > 0 &&
-                    col < tok->indstack[tok->indent]) {
-                    tok->pendin--;
-                    tok->indent--;
-                }
-                if (col != tok->indstack[tok->indent]) {
-                    tok->done = E_DEDENT;
-                    tok->cur = tok->inp;
-                    return MAKE_TOKEN(ERRORTOKEN);
-                }
-                if (altcol != tok->altindstack[tok->indent]) {
-                    return MAKE_TOKEN(indenterror(tok));
-                }
-            }
-        }
-    }
-
-    tok->start = tok->cur;
-    tok->starting_col_offset = tok->col_offset;
-
-    /* Return pending indents/dedents */
-    if (tok->pendin != 0) {
-        if (tok->pendin < 0) {
-            if (tok->tok_extra_tokens) {
-                p_start = tok->cur;
-                p_end = tok->cur;
-            }
-            tok->pendin++;
-            return MAKE_TOKEN(DEDENT);
-        }
-        else {
-            if (tok->tok_extra_tokens) {
-                p_start = tok->buf;
-                p_end = tok->cur;
-            }
-            tok->pendin--;
-            return MAKE_TOKEN(INDENT);
-        }
-    }
-
-    /* Peek ahead at the next character */
-    c = tok_nextc(tok);
-    tok_backup(tok, c);
-
- again:
-    tok->start = NULL;
-    /* Skip spaces */
-    do {
-        c = tok_nextc(tok);
-    } while (c == ' ' || c == '\t' || c == '\014');
-
-    /* Set start of current token */
-    tok->start = tok->cur == NULL ? NULL : tok->cur - 1;
-    tok->starting_col_offset = tok->col_offset - 1;
-
-    /* Skip comment, unless it's a type comment */
-    if (c == '#') {
-
-        const char* p = NULL;
-        const char *prefix, *type_start;
-        int current_starting_col_offset;
-
-        while (c != EOF && c != '\n' && c != '\r') {
-            c = tok_nextc(tok);
-        }
-
-        if (tok->tok_extra_tokens) {
-            p = tok->start;
-        }
-
-        if (tok->type_comments) {
-            p = tok->start;
-            current_starting_col_offset = tok->starting_col_offset;
-            prefix = type_comment_prefix;
-            while (*prefix && p < tok->cur) {
-                if (*prefix == ' ') {
-                    while (*p == ' ' || *p == '\t') {
-                        p++;
-                        current_starting_col_offset++;
-                    }
-                } else if (*prefix == *p) {
-                    p++;
-                    current_starting_col_offset++;
-                } else {
-                    break;
-                }
-
-                prefix++;
-            }
-
-            /* This is a type comment if we matched all of type_comment_prefix. */
-            if (!*prefix) {
-                int is_type_ignore = 1;
-                // +6 in order to skip the word 'ignore'
-                const char *ignore_end = p + 6;
-                const int ignore_end_col_offset = current_starting_col_offset + 6;
-                tok_backup(tok, c);  /* don't eat the newline or EOF */
-
-                type_start = p;
-
-                /* A TYPE_IGNORE is "type: ignore" followed by the end of the token
-                 * or anything ASCII and non-alphanumeric. */
-                is_type_ignore = (
-                    tok->cur >= ignore_end && memcmp(p, "ignore", 6) == 0
-                    && !(tok->cur > ignore_end
-                         && ((unsigned char)ignore_end[0] >= 128 || Py_ISALNUM(ignore_end[0]))));
-
-                if (is_type_ignore) {
-                    p_start = ignore_end;
-                    p_end = tok->cur;
-
-                    /* If this type ignore is the only thing on the line, consume the newline also. */
-                    if (blankline) {
-                        tok_nextc(tok);
-                        tok->atbol = 1;
-                    }
-                    return MAKE_TYPE_COMMENT_TOKEN(TYPE_IGNORE, ignore_end_col_offset, tok->col_offset);
-                } else {
-                    p_start = type_start;
-                    p_end = tok->cur;
-                    return MAKE_TYPE_COMMENT_TOKEN(TYPE_COMMENT, current_starting_col_offset, tok->col_offset);
-                }
-            }
-        }
-        if (tok->tok_extra_tokens) {
-            tok_backup(tok, c);  /* don't eat the newline or EOF */
-            p_start = p;
-            p_end = tok->cur;
-            tok->comment_newline = blankline;
-            return MAKE_TOKEN(COMMENT);
-        }
-    }
-
-    if (tok->done == E_INTERACT_STOP) {
-        return MAKE_TOKEN(ENDMARKER);
-    }
-
-    /* Check for EOF and errors now */
-    if (c == EOF) {
-        if (tok->level) {
-            return MAKE_TOKEN(ERRORTOKEN);
-        }
-        return MAKE_TOKEN(tok->done == E_EOF ? ENDMARKER : ERRORTOKEN);
-    }
-
-    /* Identifier (most frequent token!) */
-    nonascii = 0;
-    if (is_potential_identifier_start(c)) {
-        /* Process the various legal combinations of b"", r"", u"", and f"". */
-        int saw_b = 0, saw_r = 0, saw_u = 0, saw_f = 0;
-        while (1) {
-            if (!(saw_b || saw_u || saw_f) && (c == 'b' || c == 'B'))
-                saw_b = 1;
-            /* Since this is a backwards compatibility support literal we don't
-               want to support it in arbitrary order like byte literals. */
-            else if (!(saw_b || saw_u || saw_r || saw_f)
-                     && (c == 'u'|| c == 'U')) {
-                saw_u = 1;
-            }
-            /* ur"" and ru"" are not supported */
-            else if (!(saw_r || saw_u) && (c == 'r' || c == 'R')) {
-                saw_r = 1;
-            }
-            else if (!(saw_f || saw_b || saw_u) && (c == 'f' || c == 'F')) {
-                saw_f = 1;
-            }
-            else {
-                break;
-            }
-            c = tok_nextc(tok);
-            if (c == '"' || c == '\'') {
-                if (saw_f) {
-                    goto f_string_quote;
-                }
-                goto letter_quote;
-            }
-        }
-        while (is_potential_identifier_char(c)) {
-            if (c >= 128) {
-                nonascii = 1;
-            }
-            c = tok_nextc(tok);
-        }
-        tok_backup(tok, c);
-        if (nonascii && !verify_identifier(tok)) {
-            return MAKE_TOKEN(ERRORTOKEN);
-        }
-
-        p_start = tok->start;
-        p_end = tok->cur;
-
-        return MAKE_TOKEN(NAME);
-    }
-
-    if (c == '\r') {
-        c = tok_nextc(tok);
-    }
-
-    /* Newline */
-    if (c == '\n') {
-        tok->atbol = 1;
-        if (blankline || tok->level > 0) {
-            if (tok->tok_extra_tokens) {
-                if (tok->comment_newline) {
-                    tok->comment_newline = 0;
-                }
-                p_start = tok->start;
-                p_end = tok->cur;
-                return MAKE_TOKEN(NL);
-            }
-            goto nextline;
-        }
-        if (tok->comment_newline && tok->tok_extra_tokens) {
-            tok->comment_newline = 0;
-            p_start = tok->start;
-            p_end = tok->cur;
-            return MAKE_TOKEN(NL);
-        }
-        p_start = tok->start;
-        p_end = tok->cur - 1; /* Leave '\n' out of the string */
-        tok->cont_line = 0;
-        return MAKE_TOKEN(NEWLINE);
-    }
-
-    /* Period or number starting with period? */
-    if (c == '.') {
-        c = tok_nextc(tok);
-        if (Py_ISDIGIT(c)) {
-            goto fraction;
-        } else if (c == '.') {
-            c = tok_nextc(tok);
-            if (c == '.') {
-                p_start = tok->start;
-                p_end = tok->cur;
-                return MAKE_TOKEN(ELLIPSIS);
-            }
-            else {
-                tok_backup(tok, c);
-            }
-            tok_backup(tok, '.');
-        }
-        else {
-            tok_backup(tok, c);
-        }
-        p_start = tok->start;
-        p_end = tok->cur;
-        return MAKE_TOKEN(DOT);
-    }
-
-    /* Number */
-    if (Py_ISDIGIT(c)) {
-        if (c == '0') {
-            /* Hex, octal or binary -- maybe. */
-            c = tok_nextc(tok);
-            if (c == 'x' || c == 'X') {
-                /* Hex */
-                c = tok_nextc(tok);
-                do {
-                    if (c == '_') {
-                        c = tok_nextc(tok);
-                    }
-                    if (!Py_ISXDIGIT(c)) {
-                        tok_backup(tok, c);
-                        return MAKE_TOKEN(syntaxerror(tok, "invalid hexadecimal literal"));
-                    }
-                    do {
-                        c = tok_nextc(tok);
-                    } while (Py_ISXDIGIT(c));
-                } while (c == '_');
-                if (!verify_end_of_number(tok, c, "hexadecimal")) {
-                    return MAKE_TOKEN(ERRORTOKEN);
-                }
-            }
-            else if (c == 'o' || c == 'O') {
-                /* Octal */
-                c = tok_nextc(tok);
-                do {
-                    if (c == '_') {
-                        c = tok_nextc(tok);
-                    }
-                    if (c < '0' || c >= '8') {
-                        if (Py_ISDIGIT(c)) {
-                            return MAKE_TOKEN(syntaxerror(tok,
-                                    "invalid digit '%c' in octal literal", c));
-                        }
-                        else {
-                            tok_backup(tok, c);
-                            return MAKE_TOKEN(syntaxerror(tok, "invalid octal literal"));
-                        }
-                    }
-                    do {
-                        c = tok_nextc(tok);
-                    } while ('0' <= c && c < '8');
-                } while (c == '_');
-                if (Py_ISDIGIT(c)) {
-                    return MAKE_TOKEN(syntaxerror(tok,
-                            "invalid digit '%c' in octal literal", c));
-                }
-                if (!verify_end_of_number(tok, c, "octal")) {
-                    return MAKE_TOKEN(ERRORTOKEN);
-                }
-            }
-            else if (c == 'b' || c == 'B') {
-                /* Binary */
-                c = tok_nextc(tok);
-                do {
-                    if (c == '_') {
-                        c = tok_nextc(tok);
-                    }
-                    if (c != '0' && c != '1') {
-                        if (Py_ISDIGIT(c)) {
-                            return MAKE_TOKEN(syntaxerror(tok, "invalid digit '%c' in binary literal", c));
-                        }
-                        else {
-                            tok_backup(tok, c);
-                            return MAKE_TOKEN(syntaxerror(tok, "invalid binary literal"));
-                        }
-                    }
-                    do {
-                        c = tok_nextc(tok);
-                    } while (c == '0' || c == '1');
-                } while (c == '_');
-                if (Py_ISDIGIT(c)) {
-                    return MAKE_TOKEN(syntaxerror(tok, "invalid digit '%c' in binary literal", c));
-                }
-                if (!verify_end_of_number(tok, c, "binary")) {
-                    return MAKE_TOKEN(ERRORTOKEN);
-                }
-            }
-            else {
-                int nonzero = 0;
-                /* maybe old-style octal; c is first char of it */
-                /* in any case, allow '0' as a literal */
-                while (1) {
-                    if (c == '_') {
-                        c = tok_nextc(tok);
-                        if (!Py_ISDIGIT(c)) {
-                            tok_backup(tok, c);
-                            return MAKE_TOKEN(syntaxerror(tok, "invalid decimal literal"));
-                        }
-                    }
-                    if (c != '0') {
-                        break;
-                    }
-                    c = tok_nextc(tok);
-                }
-                char* zeros_end = tok->cur;
-                if (Py_ISDIGIT(c)) {
-                    nonzero = 1;
-                    c = tok_decimal_tail(tok);
-                    if (c == 0) {
-                        return MAKE_TOKEN(ERRORTOKEN);
-                    }
-                }
-                if (c == '.') {
-                    c = tok_nextc(tok);
-                    goto fraction;
-                }
-                else if (c == 'e' || c == 'E') {
-                    goto exponent;
-                }
-                else if (c == 'j' || c == 'J') {
-                    goto imaginary;
-                }
-                else if (nonzero && !tok->tok_extra_tokens) {
-                    /* Old-style octal: now disallowed. */
-                    tok_backup(tok, c);
-                    return MAKE_TOKEN(syntaxerror_known_range(
-                            tok, (int)(tok->start + 1 - tok->line_start),
-                            (int)(zeros_end - tok->line_start),
-                            "leading zeros in decimal integer "
-                            "literals are not permitted; "
-                            "use an 0o prefix for octal integers"));
-                }
-                if (!verify_end_of_number(tok, c, "decimal")) {
-                    return MAKE_TOKEN(ERRORTOKEN);
-                }
-            }
-        }
-        else {
-            /* Decimal */
-            c = tok_decimal_tail(tok);
-            if (c == 0) {
-                return MAKE_TOKEN(ERRORTOKEN);
-            }
-            {
-                /* Accept floating point numbers. */
-                if (c == '.') {
-                    c = tok_nextc(tok);
-        fraction:
-                    /* Fraction */
-                    if (Py_ISDIGIT(c)) {
-                        c = tok_decimal_tail(tok);
-                        if (c == 0) {
-                            return MAKE_TOKEN(ERRORTOKEN);
-                        }
-                    }
-                }
-                if (c == 'e' || c == 'E') {
-                    int e;
-                  exponent:
-                    e = c;
-                    /* Exponent part */
-                    c = tok_nextc(tok);
-                    if (c == '+' || c == '-') {
-                        c = tok_nextc(tok);
-                        if (!Py_ISDIGIT(c)) {
-                            tok_backup(tok, c);
-                            return MAKE_TOKEN(syntaxerror(tok, "invalid decimal literal"));
-                        }
-                    } else if (!Py_ISDIGIT(c)) {
-                        tok_backup(tok, c);
-                        if (!verify_end_of_number(tok, e, "decimal")) {
-                            return MAKE_TOKEN(ERRORTOKEN);
-                        }
-                        tok_backup(tok, e);
-                        p_start = tok->start;
-                        p_end = tok->cur;
-                        return MAKE_TOKEN(NUMBER);
-                    }
-                    c = tok_decimal_tail(tok);
-                    if (c == 0) {
-                        return MAKE_TOKEN(ERRORTOKEN);
-                    }
-                }
-                if (c == 'j' || c == 'J') {
-                    /* Imaginary part */
-        imaginary:
-                    c = tok_nextc(tok);
-                    if (!verify_end_of_number(tok, c, "imaginary")) {
-                        return MAKE_TOKEN(ERRORTOKEN);
-                    }
-                }
-                else if (!verify_end_of_number(tok, c, "decimal")) {
-                    return MAKE_TOKEN(ERRORTOKEN);
-                }
-            }
-        }
-        tok_backup(tok, c);
-        p_start = tok->start;
-        p_end = tok->cur;
-        return MAKE_TOKEN(NUMBER);
-    }
-
-  f_string_quote:
-    if (((Py_TOLOWER(*tok->start) == 'f' || Py_TOLOWER(*tok->start) == 'r') && (c == '\'' || c == '"'))) {
-        int quote = c;
-        int quote_size = 1;             /* 1 or 3 */
-
-        /* Nodes of type STRING, especially multi line strings
-           must be handled differently in order to get both
-           the starting line number and the column offset right.
-           (cf. issue 16806) */
-        tok->first_lineno = tok->lineno;
-        tok->multi_line_start = tok->line_start;
-
-        /* Find the quote size and start of string */
-        int after_quote = tok_nextc(tok);
-        if (after_quote == quote) {
-            int after_after_quote = tok_nextc(tok);
-            if (after_after_quote == quote) {
-                quote_size = 3;
-            }
-            else {
-                // TODO: Check this
-                tok_backup(tok, after_after_quote);
-                tok_backup(tok, after_quote);
-            }
-        }
-        if (after_quote != quote) {
-            tok_backup(tok, after_quote);
-        }
-
-
-        p_start = tok->start;
-        p_end = tok->cur;
-        if (tok->tok_mode_stack_index + 1 >= MAXFSTRINGLEVEL) {
-            return MAKE_TOKEN(syntaxerror(tok, "too many nested f-strings"));
-        }
-        tokenizer_mode *the_current_tok = TOK_NEXT_MODE(tok);
-        the_current_tok->kind = TOK_FSTRING_MODE;
-        the_current_tok->f_string_quote = quote;
-        the_current_tok->f_string_quote_size = quote_size;
-        the_current_tok->f_string_start = tok->start;
-        the_current_tok->f_string_multi_line_start = tok->line_start;
-        the_current_tok->f_string_line_start = tok->lineno;
-        the_current_tok->f_string_start_offset = -1;
-        the_current_tok->f_string_multi_line_start_offset = -1;
-        the_current_tok->last_expr_buffer = NULL;
-        the_current_tok->last_expr_size = 0;
-        the_current_tok->last_expr_end = -1;
-        the_current_tok->f_string_debug = 0;
-
-        switch (*tok->start) {
-            case 'F':
-            case 'f':
-                the_current_tok->f_string_raw = Py_TOLOWER(*(tok->start + 1)) == 'r';
-                break;
-            case 'R':
-            case 'r':
-                the_current_tok->f_string_raw = 1;
-                break;
-            default:
-                Py_UNREACHABLE();
-        }
-
-        the_current_tok->curly_bracket_depth = 0;
-        the_current_tok->curly_bracket_expr_start_depth = -1;
-        return MAKE_TOKEN(FSTRING_START);
-    }
-
-  letter_quote:
-    /* String */
-    if (c == '\'' || c == '"') {
-        int quote = c;
-        int quote_size = 1;             /* 1 or 3 */
-        int end_quote_size = 0;
-
-        /* Nodes of type STRING, especially multi line strings
-           must be handled differently in order to get both
-           the starting line number and the column offset right.
-           (cf. issue 16806) */
-        tok->first_lineno = tok->lineno;
-        tok->multi_line_start = tok->line_start;
-
-        /* Find the quote size and start of string */
-        c = tok_nextc(tok);
-        if (c == quote) {
-            c = tok_nextc(tok);
-            if (c == quote) {
-                quote_size = 3;
-            }
-            else {
-                end_quote_size = 1;     /* empty string found */
-            }
-        }
-        if (c != quote) {
-            tok_backup(tok, c);
-        }
-
-        /* Get rest of string */
-        while (end_quote_size != quote_size) {
-            c = tok_nextc(tok);
-            if (tok->done == E_ERROR) {
-                return MAKE_TOKEN(ERRORTOKEN);
-            }
-            if (tok->done == E_DECODE) {
-                break;
-            }
-            if (c == EOF || (quote_size == 1 && c == '\n')) {
-                assert(tok->multi_line_start != NULL);
-                // shift the tok_state's location into
-                // the start of string, and report the error
-                // from the initial quote character
-                tok->cur = (char *)tok->start;
-                tok->cur++;
-                tok->line_start = tok->multi_line_start;
-                int start = tok->lineno;
-                tok->lineno = tok->first_lineno;
-
-                if (INSIDE_FSTRING(tok)) {
-                    /* When we are in an f-string, before raising the
-                     * unterminated string literal error, check whether
-                     * does the initial quote matches with f-strings quotes
-                     * and if it is, then this must be a missing '}' token
-                     * so raise the proper error */
-                    tokenizer_mode *the_current_tok = TOK_GET_MODE(tok);
-                    if (the_current_tok->f_string_quote == quote &&
-                        the_current_tok->f_string_quote_size == quote_size) {
-                        return MAKE_TOKEN(syntaxerror(tok, "f-string: expecting '}'", start));
-                    }
-                }
-
-                if (quote_size == 3) {
-                    syntaxerror(tok, "unterminated triple-quoted string literal"
-                                     " (detected at line %d)", start);
-                    if (c != '\n') {
-                        tok->done = E_EOFS;
-                    }
-                    return MAKE_TOKEN(ERRORTOKEN);
-                }
-                else {
-                    syntaxerror(tok, "unterminated string literal (detected at"
-                                     " line %d)", start);
-                    if (c != '\n') {
-                        tok->done = E_EOLS;
-                    }
-                    return MAKE_TOKEN(ERRORTOKEN);
-                }
-            }
-            if (c == quote) {
-                end_quote_size += 1;
-            }
-            else {
-                end_quote_size = 0;
-                if (c == '\\') {
-                    c = tok_nextc(tok);  /* skip escaped char */
-                    if (c == '\r') {
-                        c = tok_nextc(tok);
-                    }
-                }
-            }
-        }
-
-        p_start = tok->start;
-        p_end = tok->cur;
-        return MAKE_TOKEN(STRING);
-    }
-
-    /* Line continuation */
-    if (c == '\\') {
-        if ((c = tok_continuation_line(tok)) == -1) {
-            return MAKE_TOKEN(ERRORTOKEN);
-        }
-        tok->cont_line = 1;
-        goto again; /* Read next line */
-    }
-
-    /* Punctuation character */
-    int is_punctuation = (c == ':' || c == '}' || c == '!' || c == '{');
-    if (is_punctuation && INSIDE_FSTRING(tok) && INSIDE_FSTRING_EXPR(current_tok)) {
-        /* This code block gets executed before the curly_bracket_depth is incremented
-         * by the `{` case, so for ensuring that we are on the 0th level, we need
-         * to adjust it manually */
-        int cursor = current_tok->curly_bracket_depth - (c != '{');
-        if (cursor == 0 && !update_fstring_expr(tok, c)) {
-            return MAKE_TOKEN(ENDMARKER);
-        }
-        if (cursor == 0 && c != '{' && set_fstring_expr(tok, token, c)) {
-            return MAKE_TOKEN(ERRORTOKEN);
-        }
-
-        if (c == ':' && cursor == current_tok->curly_bracket_expr_start_depth) {
-            current_tok->kind = TOK_FSTRING_MODE;
-            p_start = tok->start;
-            p_end = tok->cur;
-            return MAKE_TOKEN(_PyToken_OneChar(c));
-        }
-    }
-
-    /* Check for two-character token */
-    {
-        int c2 = tok_nextc(tok);
-        int current_token = _PyToken_TwoChars(c, c2);
-        if (current_token != OP) {
-            int c3 = tok_nextc(tok);
-            int current_token3 = _PyToken_ThreeChars(c, c2, c3);
-            if (current_token3 != OP) {
-                current_token = current_token3;
-            }
-            else {
-                tok_backup(tok, c3);
-            }
-            p_start = tok->start;
-            p_end = tok->cur;
-            return MAKE_TOKEN(current_token);
-        }
-        tok_backup(tok, c2);
-    }
-
-    /* Keep track of parentheses nesting level */
-    switch (c) {
-    case '(':
-    case '[':
-    case '{':
-        if (tok->level >= MAXLEVEL) {
-            return MAKE_TOKEN(syntaxerror(tok, "too many nested parentheses"));
-        }
-        tok->parenstack[tok->level] = c;
-        tok->parenlinenostack[tok->level] = tok->lineno;
-        tok->parencolstack[tok->level] = (int)(tok->start - tok->line_start);
-        tok->level++;
-        if (INSIDE_FSTRING(tok)) {
-            current_tok->curly_bracket_depth++;
-        }
-        break;
-    case ')':
-    case ']':
-    case '}':
-        if (INSIDE_FSTRING(tok) && !current_tok->curly_bracket_depth && c == '}') {
-            return MAKE_TOKEN(syntaxerror(tok, "f-string: single '}' is not allowed"));
-        }
-        if (!tok->tok_extra_tokens && !tok->level) {
-            return MAKE_TOKEN(syntaxerror(tok, "unmatched '%c'", c));
-        }
-        if (tok->level > 0) {
-            tok->level--;
-            int opening = tok->parenstack[tok->level];
-            if (!tok->tok_extra_tokens && !((opening == '(' && c == ')') ||
-                                            (opening == '[' && c == ']') ||
-                                            (opening == '{' && c == '}'))) {
-                /* If the opening bracket belongs to an f-string's expression
-                part (e.g. f"{)}") and the closing bracket is an arbitrary
-                nested expression, then instead of matching a different
-                syntactical construct with it; we'll throw an unmatched
-                parentheses error. */
-                if (INSIDE_FSTRING(tok) && opening == '{') {
-                    assert(current_tok->curly_bracket_depth >= 0);
-                    int previous_bracket = current_tok->curly_bracket_depth - 1;
-                    if (previous_bracket == current_tok->curly_bracket_expr_start_depth) {
-                        return MAKE_TOKEN(syntaxerror(tok, "f-string: unmatched '%c'", c));
-                    }
-                }
-                if (tok->parenlinenostack[tok->level] != tok->lineno) {
-                    return MAKE_TOKEN(syntaxerror(tok,
-                            "closing parenthesis '%c' does not match "
-                            "opening parenthesis '%c' on line %d",
-                            c, opening, tok->parenlinenostack[tok->level]));
-                }
-                else {
-                    return MAKE_TOKEN(syntaxerror(tok,
-                            "closing parenthesis '%c' does not match "
-                            "opening parenthesis '%c'",
-                            c, opening));
-                }
-            }
-        }
-
-        if (INSIDE_FSTRING(tok)) {
-            current_tok->curly_bracket_depth--;
-            if (c == '}' && current_tok->curly_bracket_depth == current_tok->curly_bracket_expr_start_depth) {
-                current_tok->curly_bracket_expr_start_depth--;
-                current_tok->kind = TOK_FSTRING_MODE;
-                current_tok->f_string_debug = 0;
-            }
-        }
-        break;
-    default:
-        break;
-    }
-
-    if (!Py_UNICODE_ISPRINTABLE(c)) {
-        return MAKE_TOKEN(syntaxerror(tok, "invalid non-printable character U+%04X", c));
-    }
-
-    if( c == '=' && INSIDE_FSTRING_EXPR(current_tok)) {
-        current_tok->f_string_debug = 1;
-    }
-
-    /* Punctuation character */
-    p_start = tok->start;
-    p_end = tok->cur;
-    return MAKE_TOKEN(_PyToken_OneChar(c));
-}
-
-static int
-tok_get_fstring_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct token *token)
-{
-    const char *p_start = NULL;
-    const char *p_end = NULL;
-    int end_quote_size = 0;
-    int unicode_escape = 0;
-
-    tok->start = tok->cur;
-    tok->first_lineno = tok->lineno;
-    tok->starting_col_offset = tok->col_offset;
-
-    // If we start with a bracket, we defer to the normal mode as there is nothing for us to tokenize
-    // before it.
-    int start_char = tok_nextc(tok);
-    if (start_char == '{') {
-        int peek1 = tok_nextc(tok);
-        tok_backup(tok, peek1);
-        tok_backup(tok, start_char);
-        if (peek1 != '{') {
-            current_tok->curly_bracket_expr_start_depth++;
-            if (current_tok->curly_bracket_expr_start_depth >= MAX_EXPR_NESTING) {
-                return MAKE_TOKEN(syntaxerror(tok, "f-string: expressions nested too deeply"));
-            }
-            TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
-            return tok_get_normal_mode(tok, current_tok, token);
-        }
-    }
-    else {
-        tok_backup(tok, start_char);
-    }
-
-    // Check if we are at the end of the string
-    for (int i = 0; i < current_tok->f_string_quote_size; i++) {
-        int quote = tok_nextc(tok);
-        if (quote != current_tok->f_string_quote) {
-            tok_backup(tok, quote);
-            goto f_string_middle;
-        }
-    }
-
-    if (current_tok->last_expr_buffer != NULL) {
-        PyMem_Free(current_tok->last_expr_buffer);
-        current_tok->last_expr_buffer = NULL;
-        current_tok->last_expr_size = 0;
-        current_tok->last_expr_end = -1;
-    }
-
-    p_start = tok->start;
-    p_end = tok->cur;
-    tok->tok_mode_stack_index--;
-    return MAKE_TOKEN(FSTRING_END);
-
-f_string_middle:
-
-    // TODO: This is a bit of a hack, but it works for now. We need to find a better way to handle
-    // this.
-    tok->multi_line_start = tok->line_start;
-    while (end_quote_size != current_tok->f_string_quote_size) {
-        int c = tok_nextc(tok);
-        if (tok->done == E_ERROR) {
-            return MAKE_TOKEN(ERRORTOKEN);
-        }
-        int in_format_spec = (
-                current_tok->last_expr_end != -1
-                &&
-                INSIDE_FSTRING_EXPR(current_tok)
-        );
-
-       if (c == EOF || (current_tok->f_string_quote_size == 1 && c == '\n')) {
-            if (tok->decoding_erred) {
-                return MAKE_TOKEN(ERRORTOKEN);
-            }
-
-            // If we are in a format spec and we found a newline,
-            // it means that the format spec ends here and we should
-            // return to the regular mode.
-            if (in_format_spec && c == '\n') {
-                tok_backup(tok, c);
-                TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
-                p_start = tok->start;
-                p_end = tok->cur;
-                return MAKE_TOKEN(FSTRING_MIDDLE);
-            }
-
-            assert(tok->multi_line_start != NULL);
-            // shift the tok_state's location into
-            // the start of string, and report the error
-            // from the initial quote character
-            tok->cur = (char *)current_tok->f_string_start;
-            tok->cur++;
-            tok->line_start = current_tok->f_string_multi_line_start;
-            int start = tok->lineno;
-
-            tokenizer_mode *the_current_tok = TOK_GET_MODE(tok);
-            tok->lineno = the_current_tok->f_string_line_start;
-
-            if (current_tok->f_string_quote_size == 3) {
-                return MAKE_TOKEN(syntaxerror(tok,
-                                    "unterminated triple-quoted f-string literal"
-                                    " (detected at line %d)", start));
-            }
-            else {
-                return MAKE_TOKEN(syntaxerror(tok,
-                                    "unterminated f-string literal (detected at"
-                                    " line %d)", start));
-            }
-        }
-
-        if (c == current_tok->f_string_quote) {
-            end_quote_size += 1;
-            continue;
-        } else {
-            end_quote_size = 0;
-        }
-
-        if (c == '{') {
-            int peek = tok_nextc(tok);
-            if (peek != '{' || in_format_spec) {
-                tok_backup(tok, peek);
-                tok_backup(tok, c);
-                current_tok->curly_bracket_expr_start_depth++;
-                if (current_tok->curly_bracket_expr_start_depth >= MAX_EXPR_NESTING) {
-                    return MAKE_TOKEN(syntaxerror(tok, "f-string: expressions nested too deeply"));
-                }
-                TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
-                p_start = tok->start;
-                p_end = tok->cur;
-            } else {
-                p_start = tok->start;
-                p_end = tok->cur - 1;
-            }
-            return MAKE_TOKEN(FSTRING_MIDDLE);
-        } else if (c == '}') {
-            if (unicode_escape) {
-                p_start = tok->start;
-                p_end = tok->cur;
-                return MAKE_TOKEN(FSTRING_MIDDLE);
-            }
-            int peek = tok_nextc(tok);
-
-            // The tokenizer can only be in the format spec if we have already completed the expression
-            // scanning (indicated by the end of the expression being set) and we are not at the top level
-            // of the bracket stack (-1 is the top level). Since format specifiers can't legally use double
-            // brackets, we can bypass it here.
-            if (peek == '}' && !in_format_spec) {
-                p_start = tok->start;
-                p_end = tok->cur - 1;
-            } else {
-                tok_backup(tok, peek);
-                tok_backup(tok, c);
-                TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
-                p_start = tok->start;
-                p_end = tok->cur;
-            }
-            return MAKE_TOKEN(FSTRING_MIDDLE);
-        } else if (c == '\\') {
-            int peek = tok_nextc(tok);
-            if (peek == '\r') {
-                peek = tok_nextc(tok);
-            }
-            // Special case when the backslash is right before a curly
-            // brace. We have to restore and return the control back
-            // to the loop for the next iteration.
-            if (peek == '{' || peek == '}') {
-                if (!current_tok->f_string_raw) {
-                    if (warn_invalid_escape_sequence(tok, peek)) {
-                        return MAKE_TOKEN(ERRORTOKEN);
-                    }
-                }
-                tok_backup(tok, peek);
-                continue;
-            }
-
-            if (!current_tok->f_string_raw) {
-                if (peek == 'N') {
-                    /* Handle named unicode escapes (\N{BULLET}) */
-                    peek = tok_nextc(tok);
-                    if (peek == '{') {
-                        unicode_escape = 1;
-                    } else {
-                        tok_backup(tok, peek);
-                    }
-                }
-            } /* else {
-                skip the escaped character
-            }*/
-        }
-    }
-
-    // Backup the f-string quotes to emit a final FSTRING_MIDDLE and
-    // add the quotes to the FSTRING_END in the next tokenizer iteration.
-    for (int i = 0; i < current_tok->f_string_quote_size; i++) {
-        tok_backup(tok, current_tok->f_string_quote);
-    }
-    p_start = tok->start;
-    p_end = tok->cur;
-    return MAKE_TOKEN(FSTRING_MIDDLE);
-}
-
-
-static int
-tok_get(struct tok_state *tok, struct token *token)
-{
-    tokenizer_mode *current_tok = TOK_GET_MODE(tok);
-    if (current_tok->kind == TOK_REGULAR_MODE) {
-        return tok_get_normal_mode(tok, current_tok, token);
-    } else {
-        return tok_get_fstring_mode(tok, current_tok, token);
-    }
-}
-
-int
-_PyTokenizer_Get(struct tok_state *tok, struct token *token)
-{
-    int result = tok_get(tok, token);
-    if (tok->decoding_erred) {
-        result = ERRORTOKEN;
-        tok->done = E_DECODE;
-    }
-    return result;
-}
-
-#if defined(__wasi__) || (defined(__EMSCRIPTEN__) && (__EMSCRIPTEN_major__ >= 3))
-// fdopen() with borrowed fd. WASI does not provide dup() and Emscripten's
-// dup() emulation with open() is slow.
-typedef union {
-    void *cookie;
-    int fd;
-} borrowed;
-
-static ssize_t
-borrow_read(void *cookie, char *buf, size_t size)
-{
-    borrowed b = {.cookie = cookie};
-    return read(b.fd, (void *)buf, size);
-}
-
-static FILE *
-fdopen_borrow(int fd) {
-    // supports only reading. seek fails. close and write are no-ops.
-    cookie_io_functions_t io_cb = {borrow_read, NULL, NULL, NULL};
-    borrowed b = {.fd = fd};
-    return fopencookie(b.cookie, "r", io_cb);
-}
-#else
-static FILE *
-fdopen_borrow(int fd) {
-    fd = _Py_dup(fd);
-    if (fd < 0) {
-        return NULL;
-    }
-    return fdopen(fd, "r");
-}
-#endif
-
-/* Get the encoding of a Python file. Check for the coding cookie and check if
-   the file starts with a BOM.
-
-   _PyTokenizer_FindEncodingFilename() returns NULL when it can't find the
-   encoding in the first or second line of the file (in which case the encoding
-   should be assumed to be UTF-8).
-
-   The char* returned is malloc'ed via PyMem_Malloc() and thus must be freed
-   by the caller. */
-
-char *
-_PyTokenizer_FindEncodingFilename(int fd, PyObject *filename)
-{
-    struct tok_state *tok;
-    FILE *fp;
-    char *encoding = NULL;
-
-    fp = fdopen_borrow(fd);
-    if (fp == NULL) {
-        return NULL;
-    }
-    tok = _PyTokenizer_FromFile(fp, NULL, NULL, NULL);
-    if (tok == NULL) {
-        fclose(fp);
-        return NULL;
-    }
-    if (filename != NULL) {
-        tok->filename = Py_NewRef(filename);
-    }
-    else {
-        tok->filename = PyUnicode_FromString("<string>");
-        if (tok->filename == NULL) {
-            fclose(fp);
-            _PyTokenizer_Free(tok);
-            return encoding;
-        }
-    }
-    struct token token;
-    // We don't want to report warnings here because it could cause infinite recursion
-    // if fetching the encoding shows a warning.
-    tok->report_warnings = 0;
-    while (tok->lineno < 2 && tok->done == E_OK) {
-        _PyToken_Init(&token);
-        _PyTokenizer_Get(tok, &token);
-        _PyToken_Free(&token);
-    }
-    fclose(fp);
-    if (tok->encoding) {
-        encoding = (char *)PyMem_Malloc(strlen(tok->encoding) + 1);
-        if (encoding) {
-            strcpy(encoding, tok->encoding);
-        }
-    }
-    _PyTokenizer_Free(tok);
-    return encoding;
-}
-
-#ifdef Py_DEBUG
-void
-tok_dump(int type, char *start, char *end)
-{
-    fprintf(stderr, "%s", _PyParser_TokenNames[type]);
-    if (type == NAME || type == NUMBER || type == STRING || type == OP)
-        fprintf(stderr, "(%.*s)", (int)(end - start), start);
-}
-#endif  // Py_DEBUG
diff --git a/Parser/tokenizer/file_tokenizer.c b/Parser/tokenizer/file_tokenizer.c
new file mode 100644 (file)
index 0000000..05ab5a0
--- /dev/null
@@ -0,0 +1,470 @@
+#ifdef HAVE_UNISTD_H
+#  include <unistd.h>             // read()
+#endif
+
+#include "Python.h"
+#include "pycore_call.h"
+#include "pycore_import.h"
+#include "pycore_fileutils.h"
+#include "errcode.h"
+
+#include "helpers.h"
+#include "../lexer/state.h"
+#include "../lexer/lexer.h"
+#include "../lexer/buffer.h"
+
+static int
+tok_concatenate_interactive_new_line(struct tok_state *tok, const char *line) {
+    assert(tok->fp_interactive);
+
+    if (!line) {
+        return 0;
+    }
+
+    Py_ssize_t current_size = tok->interactive_src_end - tok->interactive_src_start;
+    Py_ssize_t line_size = strlen(line);
+    char last_char = line[line_size > 0 ? line_size - 1 : line_size];
+    if (last_char != '\n') {
+        line_size += 1;
+    }
+    char* new_str = tok->interactive_src_start;
+
+    new_str = PyMem_Realloc(new_str, current_size + line_size + 1);
+    if (!new_str) {
+        if (tok->interactive_src_start) {
+            PyMem_Free(tok->interactive_src_start);
+        }
+        tok->interactive_src_start = NULL;
+        tok->interactive_src_end = NULL;
+        tok->done = E_NOMEM;
+        return -1;
+    }
+    strcpy(new_str + current_size, line);
+    tok->implicit_newline = 0;
+    if (last_char != '\n') {
+        /* Last line does not end in \n, fake one */
+        new_str[current_size + line_size - 1] = '\n';
+        new_str[current_size + line_size] = '\0';
+        tok->implicit_newline = 1;
+    }
+    tok->interactive_src_start = new_str;
+    tok->interactive_src_end = new_str + current_size + line_size;
+    return 0;
+}
+
+static int
+tok_readline_raw(struct tok_state *tok)
+{
+    do {
+        if (!_PyLexer_tok_reserve_buf(tok, BUFSIZ)) {
+            return 0;
+        }
+        int n_chars = (int)(tok->end - tok->inp);
+        size_t line_size = 0;
+        char *line = _Py_UniversalNewlineFgetsWithSize(tok->inp, n_chars, tok->fp, NULL, &line_size);
+        if (line == NULL) {
+            return 1;
+        }
+        if (tok->fp_interactive &&
+            tok_concatenate_interactive_new_line(tok, line) == -1) {
+            return 0;
+        }
+        tok->inp += line_size;
+        if (tok->inp == tok->buf) {
+            return 0;
+        }
+    } while (tok->inp[-1] != '\n');
+    return 1;
+}
+
+static int
+tok_readline_recode(struct tok_state *tok) {
+    PyObject *line;
+    const  char *buf;
+    Py_ssize_t buflen;
+    line = tok->decoding_buffer;
+    if (line == NULL) {
+        line = PyObject_CallNoArgs(tok->decoding_readline);
+        if (line == NULL) {
+            _PyTokenizer_error_ret(tok);
+            goto error;
+        }
+    }
+    else {
+        tok->decoding_buffer = NULL;
+    }
+    buf = PyUnicode_AsUTF8AndSize(line, &buflen);
+    if (buf == NULL) {
+        _PyTokenizer_error_ret(tok);
+        goto error;
+    }
+    // Make room for the null terminator *and* potentially
+    // an extra newline character that we may need to artificially
+    // add.
+    size_t buffer_size = buflen + 2;
+    if (!_PyLexer_tok_reserve_buf(tok, buffer_size)) {
+        goto error;
+    }
+    memcpy(tok->inp, buf, buflen);
+    tok->inp += buflen;
+    *tok->inp = '\0';
+    if (tok->fp_interactive &&
+        tok_concatenate_interactive_new_line(tok, buf) == -1) {
+        goto error;
+    }
+    Py_DECREF(line);
+    return 1;
+error:
+    Py_XDECREF(line);
+    return 0;
+}
+
+/* Fetch the next byte from TOK. */
+static int fp_getc(struct tok_state *tok) {
+    return getc(tok->fp);
+}
+
+/* Unfetch the last byte back into TOK.  */
+static void fp_ungetc(int c, struct tok_state *tok) {
+    ungetc(c, tok->fp);
+}
+
+/* Set the readline function for TOK to a StreamReader's
+   readline function. The StreamReader is named ENC.
+
+   This function is called from _PyTokenizer_check_bom and _PyTokenizer_check_coding_spec.
+
+   ENC is usually identical to the future value of tok->encoding,
+   except for the (currently unsupported) case of UTF-16.
+
+   Return 1 on success, 0 on failure. */
+static int
+fp_setreadl(struct tok_state *tok, const char* enc)
+{
+    PyObject *readline, *open, *stream;
+    int fd;
+    long pos;
+
+    fd = fileno(tok->fp);
+    /* Due to buffering the file offset for fd can be different from the file
+     * position of tok->fp.  If tok->fp was opened in text mode on Windows,
+     * its file position counts CRLF as one char and can't be directly mapped
+     * to the file offset for fd.  Instead we step back one byte and read to
+     * the end of line.*/
+    pos = ftell(tok->fp);
+    if (pos == -1 ||
+        lseek(fd, (off_t)(pos > 0 ? pos - 1 : pos), SEEK_SET) == (off_t)-1) {
+        PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL);
+        return 0;
+    }
+
+    open = _PyImport_GetModuleAttrString("io", "open");
+    if (open == NULL) {
+        return 0;
+    }
+    stream = PyObject_CallFunction(open, "isisOOO",
+                    fd, "r", -1, enc, Py_None, Py_None, Py_False);
+    Py_DECREF(open);
+    if (stream == NULL) {
+        return 0;
+    }
+
+    readline = PyObject_GetAttr(stream, &_Py_ID(readline));
+    Py_DECREF(stream);
+    if (readline == NULL) {
+        return 0;
+    }
+    Py_XSETREF(tok->decoding_readline, readline);
+
+    if (pos > 0) {
+        PyObject *bufobj = _PyObject_CallNoArgs(readline);
+        if (bufobj == NULL) {
+            return 0;
+        }
+        Py_DECREF(bufobj);
+    }
+
+    return 1;
+}
+
+static int
+tok_underflow_interactive(struct tok_state *tok) {
+    if (tok->interactive_underflow == IUNDERFLOW_STOP) {
+        tok->done = E_INTERACT_STOP;
+        return 1;
+    }
+    char *newtok = PyOS_Readline(tok->fp ? tok->fp : stdin, stdout, tok->prompt);
+    if (newtok != NULL) {
+        char *translated = _PyTokenizer_translate_newlines(newtok, 0, 0, tok);
+        PyMem_Free(newtok);
+        if (translated == NULL) {
+            return 0;
+        }
+        newtok = translated;
+    }
+    if (tok->encoding && newtok && *newtok) {
+        /* Recode to UTF-8 */
+        Py_ssize_t buflen;
+        const char* buf;
+        PyObject *u = _PyTokenizer_translate_into_utf8(newtok, tok->encoding);
+        PyMem_Free(newtok);
+        if (u == NULL) {
+            tok->done = E_DECODE;
+            return 0;
+        }
+        buflen = PyBytes_GET_SIZE(u);
+        buf = PyBytes_AS_STRING(u);
+        newtok = PyMem_Malloc(buflen+1);
+        if (newtok == NULL) {
+            Py_DECREF(u);
+            tok->done = E_NOMEM;
+            return 0;
+        }
+        strcpy(newtok, buf);
+        Py_DECREF(u);
+    }
+    if (tok->fp_interactive &&
+        tok_concatenate_interactive_new_line(tok, newtok) == -1) {
+        PyMem_Free(newtok);
+        return 0;
+    }
+    if (tok->nextprompt != NULL) {
+        tok->prompt = tok->nextprompt;
+    }
+    if (newtok == NULL) {
+        tok->done = E_INTR;
+    }
+    else if (*newtok == '\0') {
+        PyMem_Free(newtok);
+        tok->done = E_EOF;
+    }
+    else if (tok->start != NULL) {
+        Py_ssize_t cur_multi_line_start = tok->multi_line_start - tok->buf;
+        _PyLexer_remember_fstring_buffers(tok);
+        size_t size = strlen(newtok);
+        ADVANCE_LINENO();
+        if (!_PyLexer_tok_reserve_buf(tok, size + 1)) {
+            PyMem_Free(tok->buf);
+            tok->buf = NULL;
+            PyMem_Free(newtok);
+            return 0;
+        }
+        memcpy(tok->cur, newtok, size + 1);
+        PyMem_Free(newtok);
+        tok->inp += size;
+        tok->multi_line_start = tok->buf + cur_multi_line_start;
+        _PyLexer_restore_fstring_buffers(tok);
+    }
+    else {
+        _PyLexer_remember_fstring_buffers(tok);
+        ADVANCE_LINENO();
+        PyMem_Free(tok->buf);
+        tok->buf = newtok;
+        tok->cur = tok->buf;
+        tok->line_start = tok->buf;
+        tok->inp = strchr(tok->buf, '\0');
+        tok->end = tok->inp + 1;
+        _PyLexer_restore_fstring_buffers(tok);
+    }
+    if (tok->done != E_OK) {
+        if (tok->prompt != NULL) {
+            PySys_WriteStderr("\n");
+        }
+        return 0;
+    }
+
+    if (tok->tok_mode_stack_index && !_PyLexer_update_fstring_expr(tok, 0)) {
+        return 0;
+    }
+    return 1;
+}
+
+static int
+tok_underflow_file(struct tok_state *tok) {
+    if (tok->start == NULL && !INSIDE_FSTRING(tok)) {
+        tok->cur = tok->inp = tok->buf;
+    }
+    if (tok->decoding_state == STATE_INIT) {
+        /* We have not yet determined the encoding.
+           If an encoding is found, use the file-pointer
+           reader functions from now on. */
+        if (!_PyTokenizer_check_bom(fp_getc, fp_ungetc, fp_setreadl, tok)) {
+            _PyTokenizer_error_ret(tok);
+            return 0;
+        }
+        assert(tok->decoding_state != STATE_INIT);
+    }
+    /* Read until '\n' or EOF */
+    if (tok->decoding_readline != NULL) {
+        /* We already have a codec associated with this input. */
+        if (!tok_readline_recode(tok)) {
+            return 0;
+        }
+    }
+    else {
+        /* We want a 'raw' read. */
+        if (!tok_readline_raw(tok)) {
+            return 0;
+        }
+    }
+    if (tok->inp == tok->cur) {
+        tok->done = E_EOF;
+        return 0;
+    }
+    tok->implicit_newline = 0;
+    if (tok->inp[-1] != '\n') {
+        assert(tok->inp + 1 < tok->end);
+        /* Last line does not end in \n, fake one */
+        *tok->inp++ = '\n';
+        *tok->inp = '\0';
+        tok->implicit_newline = 1;
+    }
+
+    if (tok->tok_mode_stack_index && !_PyLexer_update_fstring_expr(tok, 0)) {
+        return 0;
+    }
+
+    ADVANCE_LINENO();
+    if (tok->decoding_state != STATE_NORMAL) {
+        if (tok->lineno > 2) {
+            tok->decoding_state = STATE_NORMAL;
+        }
+        else if (!_PyTokenizer_check_coding_spec(tok->cur, strlen(tok->cur),
+                                    tok, fp_setreadl))
+        {
+            return 0;
+        }
+    }
+    /* The default encoding is UTF-8, so make sure we don't have any
+       non-UTF-8 sequences in it. */
+    if (!tok->encoding && !_PyTokenizer_ensure_utf8(tok->cur, tok)) {
+        _PyTokenizer_error_ret(tok);
+        return 0;
+    }
+    assert(tok->done == E_OK);
+    return tok->done == E_OK;
+}
+
+/* Set up tokenizer for file */
+struct tok_state *
+_PyTokenizer_FromFile(FILE *fp, const char* enc,
+                      const char *ps1, const char *ps2)
+{
+    struct tok_state *tok = _PyTokenizer_tok_new();
+    if (tok == NULL)
+        return NULL;
+    if ((tok->buf = (char *)PyMem_Malloc(BUFSIZ)) == NULL) {
+        _PyTokenizer_Free(tok);
+        return NULL;
+    }
+    tok->cur = tok->inp = tok->buf;
+    tok->end = tok->buf + BUFSIZ;
+    tok->fp = fp;
+    tok->prompt = ps1;
+    tok->nextprompt = ps2;
+    if (ps1 || ps2) {
+        tok->underflow = &tok_underflow_interactive;
+    } else {
+        tok->underflow = &tok_underflow_file;
+    }
+    if (enc != NULL) {
+        /* Must copy encoding declaration since it
+           gets copied into the parse tree. */
+        tok->encoding = _PyTokenizer_new_string(enc, strlen(enc), tok);
+        if (!tok->encoding) {
+            _PyTokenizer_Free(tok);
+            return NULL;
+        }
+        tok->decoding_state = STATE_NORMAL;
+    }
+    return tok;
+}
+
+#if defined(__wasi__) || (defined(__EMSCRIPTEN__) && (__EMSCRIPTEN_major__ >= 3))
+// fdopen() with borrowed fd. WASI does not provide dup() and Emscripten's
+// dup() emulation with open() is slow.
+typedef union {
+    void *cookie;
+    int fd;
+} borrowed;
+
+static ssize_t
+borrow_read(void *cookie, char *buf, size_t size)
+{
+    borrowed b = {.cookie = cookie};
+    return read(b.fd, (void *)buf, size);
+}
+
+static FILE *
+fdopen_borrow(int fd) {
+    // supports only reading. seek fails. close and write are no-ops.
+    cookie_io_functions_t io_cb = {borrow_read, NULL, NULL, NULL};
+    borrowed b = {.fd = fd};
+    return fopencookie(b.cookie, "r", io_cb);
+}
+#else
+static FILE *
+fdopen_borrow(int fd) {
+    fd = _Py_dup(fd);
+    if (fd < 0) {
+        return NULL;
+    }
+    return fdopen(fd, "r");
+}
+#endif
+
+/* Get the encoding of a Python file. Check for the coding cookie and check if
+   the file starts with a BOM.
+
+   _PyTokenizer_FindEncodingFilename() returns NULL when it can't find the
+   encoding in the first or second line of the file (in which case the encoding
+   should be assumed to be UTF-8).
+
+   The char* returned is malloc'ed via PyMem_Malloc() and thus must be freed
+   by the caller. */
+char *
+_PyTokenizer_FindEncodingFilename(int fd, PyObject *filename)
+{
+    struct tok_state *tok;
+    FILE *fp;
+    char *encoding = NULL;
+
+    fp = fdopen_borrow(fd);
+    if (fp == NULL) {
+        return NULL;
+    }
+    tok = _PyTokenizer_FromFile(fp, NULL, NULL, NULL);
+    if (tok == NULL) {
+        fclose(fp);
+        return NULL;
+    }
+    if (filename != NULL) {
+        tok->filename = Py_NewRef(filename);
+    }
+    else {
+        tok->filename = PyUnicode_FromString("<string>");
+        if (tok->filename == NULL) {
+            fclose(fp);
+            _PyTokenizer_Free(tok);
+            return encoding;
+        }
+    }
+    struct token token;
+    // We don't want to report warnings here because it could cause infinite recursion
+    // if fetching the encoding shows a warning.
+    tok->report_warnings = 0;
+    while (tok->lineno < 2 && tok->done == E_OK) {
+        _PyToken_Init(&token);
+        _PyTokenizer_Get(tok, &token);
+        _PyToken_Free(&token);
+    }
+    fclose(fp);
+    if (tok->encoding) {
+        encoding = (char *)PyMem_Malloc(strlen(tok->encoding) + 1);
+        if (encoding) {
+            strcpy(encoding, tok->encoding);
+        }
+    }
+    _PyTokenizer_Free(tok);
+    return encoding;
+}
diff --git a/Parser/tokenizer/helpers.c b/Parser/tokenizer/helpers.c
new file mode 100644 (file)
index 0000000..9c9d05b
--- /dev/null
@@ -0,0 +1,552 @@
+#include "Python.h"
+#include "errcode.h"
+#include "pycore_token.h"
+
+#include "../lexer/state.h"
+
+
+/* ############## ERRORS ############## */
+
+static int
+_syntaxerror_range(struct tok_state *tok, const char *format,
+                   int col_offset, int end_col_offset,
+                   va_list vargs)
+{
+    // In release builds, we don't want to overwrite a previous error, but in debug builds we
+    // want to fail if we are not doing it so we can fix it.
+    assert(tok->done != E_ERROR);
+    if (tok->done == E_ERROR) {
+        return ERRORTOKEN;
+    }
+    PyObject *errmsg, *errtext, *args;
+    errmsg = PyUnicode_FromFormatV(format, vargs);
+    if (!errmsg) {
+        goto error;
+    }
+
+    errtext = PyUnicode_DecodeUTF8(tok->line_start, tok->cur - tok->line_start,
+                                   "replace");
+    if (!errtext) {
+        goto error;
+    }
+
+    if (col_offset == -1) {
+        col_offset = (int)PyUnicode_GET_LENGTH(errtext);
+    }
+    if (end_col_offset == -1) {
+        end_col_offset = col_offset;
+    }
+
+    Py_ssize_t line_len = strcspn(tok->line_start, "\n");
+    if (line_len != tok->cur - tok->line_start) {
+        Py_DECREF(errtext);
+        errtext = PyUnicode_DecodeUTF8(tok->line_start, line_len,
+                                       "replace");
+    }
+    if (!errtext) {
+        goto error;
+    }
+
+    args = Py_BuildValue("(O(OiiNii))", errmsg, tok->filename, tok->lineno,
+                         col_offset, errtext, tok->lineno, end_col_offset);
+    if (args) {
+        PyErr_SetObject(PyExc_SyntaxError, args);
+        Py_DECREF(args);
+    }
+
+error:
+    Py_XDECREF(errmsg);
+    tok->done = E_ERROR;
+    return ERRORTOKEN;
+}
+
+int
+_PyTokenizer_syntaxerror(struct tok_state *tok, const char *format, ...)
+{
+    // This errors are cleaned on startup. Todo: Fix it.
+    va_list vargs;
+    va_start(vargs, format);
+    int ret = _syntaxerror_range(tok, format, -1, -1, vargs);
+    va_end(vargs);
+    return ret;
+}
+
+int
+_PyTokenizer_syntaxerror_known_range(struct tok_state *tok,
+                        int col_offset, int end_col_offset,
+                        const char *format, ...)
+{
+    va_list vargs;
+    va_start(vargs, format);
+    int ret = _syntaxerror_range(tok, format, col_offset, end_col_offset, vargs);
+    va_end(vargs);
+    return ret;
+}
+
+int
+_PyTokenizer_indenterror(struct tok_state *tok)
+{
+    tok->done = E_TABSPACE;
+    tok->cur = tok->inp;
+    return ERRORTOKEN;
+}
+
+char *
+_PyTokenizer_error_ret(struct tok_state *tok) /* XXX */
+{
+    tok->decoding_erred = 1;
+    if ((tok->fp != NULL || tok->readline != NULL) && tok->buf != NULL) {/* see _PyTokenizer_Free */
+        PyMem_Free(tok->buf);
+    }
+    tok->buf = tok->cur = tok->inp = NULL;
+    tok->start = NULL;
+    tok->end = NULL;
+    tok->done = E_DECODE;
+    return NULL;                /* as if it were EOF */
+}
+
+int
+_PyTokenizer_warn_invalid_escape_sequence(struct tok_state *tok, int first_invalid_escape_char)
+{
+    if (!tok->report_warnings) {
+        return 0;
+    }
+
+    PyObject *msg = PyUnicode_FromFormat(
+        "invalid escape sequence '\\%c'",
+        (char) first_invalid_escape_char
+    );
+
+    if (msg == NULL) {
+        return -1;
+    }
+
+    if (PyErr_WarnExplicitObject(PyExc_SyntaxWarning, msg, tok->filename,
+                                 tok->lineno, NULL, NULL) < 0) {
+        Py_DECREF(msg);
+
+        if (PyErr_ExceptionMatches(PyExc_SyntaxWarning)) {
+            /* Replace the SyntaxWarning exception with a SyntaxError
+               to get a more accurate error report */
+            PyErr_Clear();
+            return _PyTokenizer_syntaxerror(tok, "invalid escape sequence '\\%c'", (char) first_invalid_escape_char);
+        }
+
+        return -1;
+    }
+
+    Py_DECREF(msg);
+    return 0;
+}
+
+int
+_PyTokenizer_parser_warn(struct tok_state *tok, PyObject *category, const char *format, ...)
+{
+    if (!tok->report_warnings) {
+        return 0;
+    }
+
+    PyObject *errmsg;
+    va_list vargs;
+    va_start(vargs, format);
+    errmsg = PyUnicode_FromFormatV(format, vargs);
+    va_end(vargs);
+    if (!errmsg) {
+        goto error;
+    }
+
+    if (PyErr_WarnExplicitObject(category, errmsg, tok->filename,
+                                 tok->lineno, NULL, NULL) < 0) {
+        if (PyErr_ExceptionMatches(category)) {
+            /* Replace the DeprecationWarning exception with a SyntaxError
+               to get a more accurate error report */
+            PyErr_Clear();
+            _PyTokenizer_syntaxerror(tok, "%U", errmsg);
+        }
+        goto error;
+    }
+    Py_DECREF(errmsg);
+    return 0;
+
+error:
+    Py_XDECREF(errmsg);
+    tok->done = E_ERROR;
+    return -1;
+}
+
+
+/* ############## STRING MANIPULATION ############## */
+
+char *
+_PyTokenizer_new_string(const char *s, Py_ssize_t len, struct tok_state *tok)
+{
+    char* result = (char *)PyMem_Malloc(len + 1);
+    if (!result) {
+        tok->done = E_NOMEM;
+        return NULL;
+    }
+    memcpy(result, s, len);
+    result[len] = '\0';
+    return result;
+}
+
+PyObject *
+_PyTokenizer_translate_into_utf8(const char* str, const char* enc) {
+    PyObject *utf8;
+    PyObject* buf = PyUnicode_Decode(str, strlen(str), enc, NULL);
+    if (buf == NULL)
+        return NULL;
+    utf8 = PyUnicode_AsUTF8String(buf);
+    Py_DECREF(buf);
+    return utf8;
+}
+
+char *
+_PyTokenizer_translate_newlines(const char *s, int exec_input, int preserve_crlf,
+                   struct tok_state *tok) {
+    int skip_next_lf = 0;
+    size_t needed_length = strlen(s) + 2, final_length;
+    char *buf, *current;
+    char c = '\0';
+    buf = PyMem_Malloc(needed_length);
+    if (buf == NULL) {
+        tok->done = E_NOMEM;
+        return NULL;
+    }
+    for (current = buf; *s; s++, current++) {
+        c = *s;
+        if (skip_next_lf) {
+            skip_next_lf = 0;
+            if (c == '\n') {
+                c = *++s;
+                if (!c)
+                    break;
+            }
+        }
+        if (!preserve_crlf && c == '\r') {
+            skip_next_lf = 1;
+            c = '\n';
+        }
+        *current = c;
+    }
+    /* If this is exec input, add a newline to the end of the string if
+       there isn't one already. */
+    if (exec_input && c != '\n' && c != '\0') {
+        *current = '\n';
+        current++;
+    }
+    *current = '\0';
+    final_length = current - buf + 1;
+    if (final_length < needed_length && final_length) {
+        /* should never fail */
+        char* result = PyMem_Realloc(buf, final_length);
+        if (result == NULL) {
+            PyMem_Free(buf);
+        }
+        buf = result;
+    }
+    return buf;
+}
+
+/* ############## ENCODING STUFF ############## */
+
+
+/* See whether the file starts with a BOM. If it does,
+   invoke the set_readline function with the new encoding.
+   Return 1 on success, 0 on failure.  */
+int
+_PyTokenizer_check_bom(int get_char(struct tok_state *),
+          void unget_char(int, struct tok_state *),
+          int set_readline(struct tok_state *, const char *),
+          struct tok_state *tok)
+{
+    int ch1, ch2, ch3;
+    ch1 = get_char(tok);
+    tok->decoding_state = STATE_SEEK_CODING;
+    if (ch1 == EOF) {
+        return 1;
+    } else if (ch1 == 0xEF) {
+        ch2 = get_char(tok);
+        if (ch2 != 0xBB) {
+            unget_char(ch2, tok);
+            unget_char(ch1, tok);
+            return 1;
+        }
+        ch3 = get_char(tok);
+        if (ch3 != 0xBF) {
+            unget_char(ch3, tok);
+            unget_char(ch2, tok);
+            unget_char(ch1, tok);
+            return 1;
+        }
+    } else {
+        unget_char(ch1, tok);
+        return 1;
+    }
+    if (tok->encoding != NULL)
+        PyMem_Free(tok->encoding);
+    tok->encoding = _PyTokenizer_new_string("utf-8", 5, tok);
+    if (!tok->encoding)
+        return 0;
+    /* No need to set_readline: input is already utf-8 */
+    return 1;
+}
+
+static const char *
+get_normal_name(const char *s)  /* for utf-8 and latin-1 */
+{
+    char buf[13];
+    int i;
+    for (i = 0; i < 12; i++) {
+        int c = s[i];
+        if (c == '\0')
+            break;
+        else if (c == '_')
+            buf[i] = '-';
+        else
+            buf[i] = Py_TOLOWER(c);
+    }
+    buf[i] = '\0';
+    if (strcmp(buf, "utf-8") == 0 ||
+        strncmp(buf, "utf-8-", 6) == 0)
+        return "utf-8";
+    else if (strcmp(buf, "latin-1") == 0 ||
+             strcmp(buf, "iso-8859-1") == 0 ||
+             strcmp(buf, "iso-latin-1") == 0 ||
+             strncmp(buf, "latin-1-", 8) == 0 ||
+             strncmp(buf, "iso-8859-1-", 11) == 0 ||
+             strncmp(buf, "iso-latin-1-", 12) == 0)
+        return "iso-8859-1";
+    else
+        return s;
+}
+
+/* Return the coding spec in S, or NULL if none is found.  */
+static int
+get_coding_spec(const char *s, char **spec, Py_ssize_t size, struct tok_state *tok)
+{
+    Py_ssize_t i;
+    *spec = NULL;
+    /* Coding spec must be in a comment, and that comment must be
+     * the only statement on the source code line. */
+    for (i = 0; i < size - 6; i++) {
+        if (s[i] == '#')
+            break;
+        if (s[i] != ' ' && s[i] != '\t' && s[i] != '\014')
+            return 1;
+    }
+    for (; i < size - 6; i++) { /* XXX inefficient search */
+        const char* t = s + i;
+        if (memcmp(t, "coding", 6) == 0) {
+            const char* begin = NULL;
+            t += 6;
+            if (t[0] != ':' && t[0] != '=')
+                continue;
+            do {
+                t++;
+            } while (t[0] == ' ' || t[0] == '\t');
+
+            begin = t;
+            while (Py_ISALNUM(t[0]) ||
+                   t[0] == '-' || t[0] == '_' || t[0] == '.')
+                t++;
+
+            if (begin < t) {
+                char* r = _PyTokenizer_new_string(begin, t - begin, tok);
+                const char* q;
+                if (!r)
+                    return 0;
+                q = get_normal_name(r);
+                if (r != q) {
+                    PyMem_Free(r);
+                    r = _PyTokenizer_new_string(q, strlen(q), tok);
+                    if (!r)
+                        return 0;
+                }
+                *spec = r;
+                break;
+            }
+        }
+    }
+    return 1;
+}
+
+/* Check whether the line contains a coding spec. If it does,
+   invoke the set_readline function for the new encoding.
+   This function receives the tok_state and the new encoding.
+   Return 1 on success, 0 on failure.  */
+int
+_PyTokenizer_check_coding_spec(const char* line, Py_ssize_t size, struct tok_state *tok,
+                  int set_readline(struct tok_state *, const char *))
+{
+    char *cs;
+    if (tok->cont_line) {
+        /* It's a continuation line, so it can't be a coding spec. */
+        tok->decoding_state = STATE_NORMAL;
+        return 1;
+    }
+    if (!get_coding_spec(line, &cs, size, tok)) {
+        return 0;
+    }
+    if (!cs) {
+        Py_ssize_t i;
+        for (i = 0; i < size; i++) {
+            if (line[i] == '#' || line[i] == '\n' || line[i] == '\r')
+                break;
+            if (line[i] != ' ' && line[i] != '\t' && line[i] != '\014') {
+                /* Stop checking coding spec after a line containing
+                 * anything except a comment. */
+                tok->decoding_state = STATE_NORMAL;
+                break;
+            }
+        }
+        return 1;
+    }
+    tok->decoding_state = STATE_NORMAL;
+    if (tok->encoding == NULL) {
+        assert(tok->decoding_readline == NULL);
+        if (strcmp(cs, "utf-8") != 0 && !set_readline(tok, cs)) {
+            _PyTokenizer_error_ret(tok);
+            PyErr_Format(PyExc_SyntaxError, "encoding problem: %s", cs);
+            PyMem_Free(cs);
+            return 0;
+        }
+        tok->encoding = cs;
+    } else {                /* then, compare cs with BOM */
+        if (strcmp(tok->encoding, cs) != 0) {
+            _PyTokenizer_error_ret(tok);
+            PyErr_Format(PyExc_SyntaxError,
+                         "encoding problem: %s with BOM", cs);
+            PyMem_Free(cs);
+            return 0;
+        }
+        PyMem_Free(cs);
+    }
+    return 1;
+}
+
+/* Check whether the characters at s start a valid
+   UTF-8 sequence. Return the number of characters forming
+   the sequence if yes, 0 if not.  The special cases match
+   those in stringlib/codecs.h:utf8_decode.
+*/
+static int
+valid_utf8(const unsigned char* s)
+{
+    int expected = 0;
+    int length;
+    if (*s < 0x80) {
+        /* single-byte code */
+        return 1;
+    }
+    else if (*s < 0xE0) {
+        /* \xC2\x80-\xDF\xBF -- 0080-07FF */
+        if (*s < 0xC2) {
+            /* invalid sequence
+               \x80-\xBF -- continuation byte
+               \xC0-\xC1 -- fake 0000-007F */
+            return 0;
+        }
+        expected = 1;
+    }
+    else if (*s < 0xF0) {
+        /* \xE0\xA0\x80-\xEF\xBF\xBF -- 0800-FFFF */
+        if (*s == 0xE0 && *(s + 1) < 0xA0) {
+            /* invalid sequence
+               \xE0\x80\x80-\xE0\x9F\xBF -- fake 0000-0800 */
+            return 0;
+        }
+        else if (*s == 0xED && *(s + 1) >= 0xA0) {
+            /* Decoding UTF-8 sequences in range \xED\xA0\x80-\xED\xBF\xBF
+               will result in surrogates in range D800-DFFF. Surrogates are
+               not valid UTF-8 so they are rejected.
+               See https://www.unicode.org/versions/Unicode5.2.0/ch03.pdf
+               (table 3-7) and http://www.rfc-editor.org/rfc/rfc3629.txt */
+            return 0;
+        }
+        expected = 2;
+    }
+    else if (*s < 0xF5) {
+        /* \xF0\x90\x80\x80-\xF4\x8F\xBF\xBF -- 10000-10FFFF */
+        if (*(s + 1) < 0x90 ? *s == 0xF0 : *s == 0xF4) {
+            /* invalid sequence -- one of:
+               \xF0\x80\x80\x80-\xF0\x8F\xBF\xBF -- fake 0000-FFFF
+               \xF4\x90\x80\x80- -- 110000- overflow */
+            return 0;
+        }
+        expected = 3;
+    }
+    else {
+        /* invalid start byte */
+        return 0;
+    }
+    length = expected + 1;
+    for (; expected; expected--)
+        if (s[expected] < 0x80 || s[expected] >= 0xC0)
+            return 0;
+    return length;
+}
+
+int
+_PyTokenizer_ensure_utf8(char *line, struct tok_state *tok)
+{
+    int badchar = 0;
+    unsigned char *c;
+    int length;
+    for (c = (unsigned char *)line; *c; c += length) {
+        if (!(length = valid_utf8(c))) {
+            badchar = *c;
+            break;
+        }
+    }
+    if (badchar) {
+        PyErr_Format(PyExc_SyntaxError,
+                     "Non-UTF-8 code starting with '\\x%.2x' "
+                     "in file %U on line %i, "
+                     "but no encoding declared; "
+                     "see https://peps.python.org/pep-0263/ for details",
+                     badchar, tok->filename, tok->lineno);
+        return 0;
+    }
+    return 1;
+}
+
+
+/* ############## DEBUGGING STUFF ############## */
+
+#ifdef Py_DEBUG
+void
+_PyTokenizer_print_escape(FILE *f, const char *s, Py_ssize_t size)
+{
+    if (s == NULL) {
+        fputs("NULL", f);
+        return;
+    }
+    putc('"', f);
+    while (size-- > 0) {
+        unsigned char c = *s++;
+        switch (c) {
+            case '\n': fputs("\\n", f); break;
+            case '\r': fputs("\\r", f); break;
+            case '\t': fputs("\\t", f); break;
+            case '\f': fputs("\\f", f); break;
+            case '\'': fputs("\\'", f); break;
+            case '"': fputs("\\\"", f); break;
+            default:
+                if (0x20 <= c && c <= 0x7f)
+                    putc(c, f);
+                else
+                    fprintf(f, "\\x%02x", c);
+        }
+    }
+    putc('"', f);
+}
+
+void
+_PyTokenizer_tok_dump(int type, char *start, char *end)
+{
+    fprintf(stderr, "%s", _PyParser_TokenNames[type]);
+    if (type == NAME || type == NUMBER || type == STRING || type == OP)
+        fprintf(stderr, "(%.*s)", (int)(end - start), start);
+}
+#endif
diff --git a/Parser/tokenizer/helpers.h b/Parser/tokenizer/helpers.h
new file mode 100644 (file)
index 0000000..42ea13c
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef _PY_TOKENIZER_HELPERS_H_
+#define _PY_TOKENIZER_HELPERS_H_
+
+#include "Python.h"
+
+#include "../lexer/state.h"
+
+#define ADVANCE_LINENO() \
+            tok->lineno++; \
+            tok->col_offset = 0;
+
+int _PyTokenizer_syntaxerror(struct tok_state *tok, const char *format, ...);
+int _PyTokenizer_syntaxerror_known_range(struct tok_state *tok, int col_offset, int end_col_offset, const char *format, ...);
+int _PyTokenizer_indenterror(struct tok_state *tok);
+int _PyTokenizer_warn_invalid_escape_sequence(struct tok_state *tok, int first_invalid_escape_char);
+int _PyTokenizer_parser_warn(struct tok_state *tok, PyObject *category, const char *format, ...);
+char *_PyTokenizer_error_ret(struct tok_state *tok);
+
+char *_PyTokenizer_new_string(const char *s, Py_ssize_t len, struct tok_state *tok);
+char *_PyTokenizer_translate_newlines(const char *s, int exec_input, int preserve_crlf, struct tok_state *tok);
+PyObject *_PyTokenizer_translate_into_utf8(const char* str, const char* enc);
+
+int _PyTokenizer_check_bom(int get_char(struct tok_state *),
+          void unget_char(int, struct tok_state *),
+          int set_readline(struct tok_state *, const char *),
+          struct tok_state *tok);
+int _PyTokenizer_check_coding_spec(const char* line, Py_ssize_t size, struct tok_state *tok,
+                  int set_readline(struct tok_state *, const char *));
+int _PyTokenizer_ensure_utf8(char *line, struct tok_state *tok);
+
+#ifdef Py_DEBUG
+void _PyTokenizer_print_escape(FILE *f, const char *s, Py_ssize_t size);
+void _PyTokenizer_tok_dump(int type, char *start, char *end);
+#endif
+
+
+#endif
diff --git a/Parser/tokenizer/readline_tokenizer.c b/Parser/tokenizer/readline_tokenizer.c
new file mode 100644 (file)
index 0000000..a2637af
--- /dev/null
@@ -0,0 +1,134 @@
+#include "Python.h"
+#include "errcode.h"
+
+#include "helpers.h"
+#include "../lexer/lexer.h"
+#include "../lexer/state.h"
+#include "../lexer/buffer.h"
+
+static int
+tok_readline_string(struct tok_state* tok) {
+    PyObject* line = NULL;
+    PyObject* raw_line = PyObject_CallNoArgs(tok->readline);
+    if (raw_line == NULL) {
+        if (PyErr_ExceptionMatches(PyExc_StopIteration)) {
+            PyErr_Clear();
+            return 1;
+        }
+        _PyTokenizer_error_ret(tok);
+        goto error;
+    }
+    if(tok->encoding != NULL) {
+        if (!PyBytes_Check(raw_line)) {
+            PyErr_Format(PyExc_TypeError, "readline() returned a non-bytes object");
+            _PyTokenizer_error_ret(tok);
+            goto error;
+        }
+        line = PyUnicode_Decode(PyBytes_AS_STRING(raw_line), PyBytes_GET_SIZE(raw_line),
+                                tok->encoding, "replace");
+        Py_CLEAR(raw_line);
+        if (line == NULL) {
+            _PyTokenizer_error_ret(tok);
+            goto error;
+        }
+    } else {
+        if(!PyUnicode_Check(raw_line)) {
+            PyErr_Format(PyExc_TypeError, "readline() returned a non-string object");
+            _PyTokenizer_error_ret(tok);
+            goto error;
+        }
+        line = raw_line;
+        raw_line = NULL;
+    }
+    Py_ssize_t buflen;
+    const char* buf = PyUnicode_AsUTF8AndSize(line, &buflen);
+    if (buf == NULL) {
+        _PyTokenizer_error_ret(tok);
+        goto error;
+    }
+
+    // Make room for the null terminator *and* potentially
+    // an extra newline character that we may need to artificially
+    // add.
+    size_t buffer_size = buflen + 2;
+    if (!_PyLexer_tok_reserve_buf(tok, buffer_size)) {
+        goto error;
+    }
+    memcpy(tok->inp, buf, buflen);
+    tok->inp += buflen;
+    *tok->inp = '\0';
+
+    tok->line_start = tok->cur;
+    Py_DECREF(line);
+    return 1;
+error:
+    Py_XDECREF(raw_line);
+    Py_XDECREF(line);
+    return 0;
+}
+
+static int
+tok_underflow_readline(struct tok_state* tok) {
+    assert(tok->decoding_state == STATE_NORMAL);
+    assert(tok->fp == NULL && tok->input == NULL && tok->decoding_readline == NULL);
+    if (tok->start == NULL && !INSIDE_FSTRING(tok)) {
+        tok->cur = tok->inp = tok->buf;
+    }
+    if (!tok_readline_string(tok)) {
+        return 0;
+    }
+    if (tok->inp == tok->cur) {
+        tok->done = E_EOF;
+        return 0;
+    }
+    tok->implicit_newline = 0;
+    if (tok->inp[-1] != '\n') {
+        assert(tok->inp + 1 < tok->end);
+        /* Last line does not end in \n, fake one */
+        *tok->inp++ = '\n';
+        *tok->inp = '\0';
+        tok->implicit_newline = 1;
+    }
+
+    if (tok->tok_mode_stack_index && !_PyLexer_update_fstring_expr(tok, 0)) {
+        return 0;
+    }
+
+    ADVANCE_LINENO();
+    /* The default encoding is UTF-8, so make sure we don't have any
+       non-UTF-8 sequences in it. */
+    if (!tok->encoding && !_PyTokenizer_ensure_utf8(tok->cur, tok)) {
+        _PyTokenizer_error_ret(tok);
+        return 0;
+    }
+    assert(tok->done == E_OK);
+    return tok->done == E_OK;
+}
+
+struct tok_state *
+_PyTokenizer_FromReadline(PyObject* readline, const char* enc,
+                          int exec_input, int preserve_crlf)
+{
+    struct tok_state *tok = _PyTokenizer_tok_new();
+    if (tok == NULL)
+        return NULL;
+    if ((tok->buf = (char *)PyMem_Malloc(BUFSIZ)) == NULL) {
+        _PyTokenizer_Free(tok);
+        return NULL;
+    }
+    tok->cur = tok->inp = tok->buf;
+    tok->end = tok->buf + BUFSIZ;
+    tok->fp = NULL;
+    if (enc != NULL) {
+        tok->encoding = _PyTokenizer_new_string(enc, strlen(enc), tok);
+        if (!tok->encoding) {
+            _PyTokenizer_Free(tok);
+            return NULL;
+        }
+    }
+    tok->decoding_state = STATE_NORMAL;
+    tok->underflow = &tok_underflow_readline;
+    Py_INCREF(readline);
+    tok->readline = readline;
+    return tok;
+}
diff --git a/Parser/tokenizer/string_tokenizer.c b/Parser/tokenizer/string_tokenizer.c
new file mode 100644 (file)
index 0000000..0c26d5d
--- /dev/null
@@ -0,0 +1,129 @@
+#include "Python.h"
+#include "errcode.h"
+
+#include "helpers.h"
+#include "../lexer/state.h"
+
+static int
+tok_underflow_string(struct tok_state *tok) {
+    char *end = strchr(tok->inp, '\n');
+    if (end != NULL) {
+        end++;
+    }
+    else {
+        end = strchr(tok->inp, '\0');
+        if (end == tok->inp) {
+            tok->done = E_EOF;
+            return 0;
+        }
+    }
+    if (tok->start == NULL) {
+        tok->buf = tok->cur;
+    }
+    tok->line_start = tok->cur;
+    ADVANCE_LINENO();
+    tok->inp = end;
+    return 1;
+}
+
+/* Fetch a byte from TOK, using the string buffer. */
+static int
+buf_getc(struct tok_state *tok) {
+    return Py_CHARMASK(*tok->str++);
+}
+
+/* Unfetch a byte from TOK, using the string buffer. */
+static void
+buf_ungetc(int c, struct tok_state *tok) {
+    tok->str--;
+    assert(Py_CHARMASK(*tok->str) == c);        /* tok->cur may point to read-only segment */
+}
+
+/* Set the readline function for TOK to ENC. For the string-based
+   tokenizer, this means to just record the encoding. */
+static int
+buf_setreadl(struct tok_state *tok, const char* enc) {
+    tok->enc = enc;
+    return 1;
+}
+
+/* Decode a byte string STR for use as the buffer of TOK.
+   Look for encoding declarations inside STR, and record them
+   inside TOK.  */
+static char *
+decode_str(const char *input, int single, struct tok_state *tok, int preserve_crlf)
+{
+    PyObject* utf8 = NULL;
+    char *str;
+    const char *s;
+    const char *newl[2] = {NULL, NULL};
+    int lineno = 0;
+    tok->input = str = _PyTokenizer_translate_newlines(input, single, preserve_crlf, tok);
+    if (str == NULL)
+        return NULL;
+    tok->enc = NULL;
+    tok->str = str;
+    if (!_PyTokenizer_check_bom(buf_getc, buf_ungetc, buf_setreadl, tok))
+        return _PyTokenizer_error_ret(tok);
+    str = tok->str;             /* string after BOM if any */
+    assert(str);
+    if (tok->enc != NULL) {
+        utf8 = _PyTokenizer_translate_into_utf8(str, tok->enc);
+        if (utf8 == NULL)
+            return _PyTokenizer_error_ret(tok);
+        str = PyBytes_AsString(utf8);
+    }
+    for (s = str;; s++) {
+        if (*s == '\0') break;
+        else if (*s == '\n') {
+            assert(lineno < 2);
+            newl[lineno] = s;
+            lineno++;
+            if (lineno == 2) break;
+        }
+    }
+    tok->enc = NULL;
+    /* need to check line 1 and 2 separately since check_coding_spec
+       assumes a single line as input */
+    if (newl[0]) {
+        if (!_PyTokenizer_check_coding_spec(str, newl[0] - str, tok, buf_setreadl)) {
+            return NULL;
+        }
+        if (tok->enc == NULL && tok->decoding_state != STATE_NORMAL && newl[1]) {
+            if (!_PyTokenizer_check_coding_spec(newl[0]+1, newl[1] - newl[0],
+                                   tok, buf_setreadl))
+                return NULL;
+        }
+    }
+    if (tok->enc != NULL) {
+        assert(utf8 == NULL);
+        utf8 = _PyTokenizer_translate_into_utf8(str, tok->enc);
+        if (utf8 == NULL)
+            return _PyTokenizer_error_ret(tok);
+        str = PyBytes_AS_STRING(utf8);
+    }
+    assert(tok->decoding_buffer == NULL);
+    tok->decoding_buffer = utf8; /* CAUTION */
+    return str;
+}
+
+/* Set up tokenizer for string */
+struct tok_state *
+_PyTokenizer_FromString(const char *str, int exec_input, int preserve_crlf)
+{
+    struct tok_state *tok = _PyTokenizer_tok_new();
+    char *decoded;
+
+    if (tok == NULL)
+        return NULL;
+    decoded = decode_str(str, exec_input, tok, preserve_crlf);
+    if (decoded == NULL) {
+        _PyTokenizer_Free(tok);
+        return NULL;
+    }
+
+    tok->buf = tok->cur = tok->inp = decoded;
+    tok->end = decoded;
+    tok->underflow = &tok_underflow_string;
+    return tok;
+}
diff --git a/Parser/tokenizer/tokenizer.h b/Parser/tokenizer/tokenizer.h
new file mode 100644 (file)
index 0000000..8fbeb2d
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef Py_TOKENIZER_H
+#define Py_TOKENIZER_H
+
+#include "Python.h"
+
+struct tok_state *_PyTokenizer_FromString(const char *, int, int);
+struct tok_state *_PyTokenizer_FromUTF8(const char *, int, int);
+struct tok_state *_PyTokenizer_FromReadline(PyObject*, const char*, int, int);
+struct tok_state *_PyTokenizer_FromFile(FILE *, const char*,
+                                              const char *, const char *);
+
+#define tok_dump _Py_tok_dump
+
+#endif /* !Py_TOKENIZER_H */
diff --git a/Parser/tokenizer/utf8_tokenizer.c b/Parser/tokenizer/utf8_tokenizer.c
new file mode 100644 (file)
index 0000000..1a925f4
--- /dev/null
@@ -0,0 +1,55 @@
+#include "Python.h"
+#include "errcode.h"
+
+#include "helpers.h"
+#include "../lexer/state.h"
+
+static int
+tok_underflow_string(struct tok_state *tok) {
+    char *end = strchr(tok->inp, '\n');
+    if (end != NULL) {
+        end++;
+    }
+    else {
+        end = strchr(tok->inp, '\0');
+        if (end == tok->inp) {
+            tok->done = E_EOF;
+            return 0;
+        }
+    }
+    if (tok->start == NULL) {
+        tok->buf = tok->cur;
+    }
+    tok->line_start = tok->cur;
+    ADVANCE_LINENO();
+    tok->inp = end;
+    return 1;
+}
+
+/* Set up tokenizer for UTF-8 string */
+struct tok_state *
+_PyTokenizer_FromUTF8(const char *str, int exec_input, int preserve_crlf)
+{
+    struct tok_state *tok = _PyTokenizer_tok_new();
+    char *translated;
+    if (tok == NULL)
+        return NULL;
+    tok->input = translated = _PyTokenizer_translate_newlines(str, exec_input, preserve_crlf, tok);
+    if (translated == NULL) {
+        _PyTokenizer_Free(tok);
+        return NULL;
+    }
+    tok->decoding_state = STATE_NORMAL;
+    tok->enc = NULL;
+    tok->str = translated;
+    tok->encoding = _PyTokenizer_new_string("utf-8", 5, tok);
+    if (!tok->encoding) {
+        _PyTokenizer_Free(tok);
+        return NULL;
+    }
+
+    tok->buf = tok->cur = tok->inp = translated;
+    tok->end = translated;
+    tok->underflow = &tok_underflow_string;
+    return tok;
+}
index 1b021069c5e10b77fff7c52304712e28feb3163e..83b4aa4b1a7e118fcf86c0b99e455d3a53e26bc8 100644 (file)
@@ -1,6 +1,8 @@
 #include "Python.h"
 #include "errcode.h"
-#include "../Parser/tokenizer.h"
+#include "../Parser/lexer/state.h"
+#include "../Parser/lexer/lexer.h"
+#include "../Parser/tokenizer/tokenizer.h"
 #include "../Parser/pegen.h"      // _PyPegen_byte_offset_to_character_offset()
 #include "../Parser/pegen.h"      // _PyPegen_byte_offset_to_character_offset()
 
index ca656f0760ab4fca41199e90c584d52ae109dae7..0ba7ab15ce89b8d8cdf57be968a038e991183bdb 100644 (file)
@@ -335,7 +335,7 @@ Objects/unicodeobject.c     unicode_encode_call_errorhandler        argparse        -
 Objects/unicodeobject.c        unicode_translate_call_errorhandler     argparse        -
 Parser/parser.c        -       reserved_keywords       -
 Parser/parser.c        -       soft_keywords   -
-Parser/tokenizer.c     -       type_comment_prefix     -
+Parser/lexer/lexer.c   -       type_comment_prefix     -
 Python/ast_opt.c       fold_unaryop    ops     -
 Python/ceval.c -       _PyEval_BinaryOps       -
 Python/ceval.c -       _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS -
index d68c4e8e9b0756492774e552dfd4ae8f5d173707..c87f518382f2ece0f898a875c212ef3309ffd98a 100755 (executable)
--- a/configure
+++ b/configure
@@ -26679,6 +26679,8 @@ SRCDIRS="\
   Modules/expat \
   Objects \
   Parser \
+  Parser/tokenizer \
+  Parser/lexer \
   Programs \
   Python \
   Python/frozen_modules \
index 9a9c76f3545c30a3261f1212aeac691bd6d94f08..cd69f0ede5449630628b471f69d6101fee7fdea1 100644 (file)
@@ -6526,6 +6526,8 @@ SRCDIRS="\
   Modules/expat \
   Objects \
   Parser \
+  Parser/tokenizer \
+  Parser/lexer \
   Programs \
   Python \
   Python/frozen_modules \