args: [--exit-non-zero-on-fix]
files: ^Lib/test/
- id: ruff
- name: Run Ruff (lint) on Tools/build/check_warnings.py
+ name: Run Ruff (lint) on Tools/build/
args: [--exit-non-zero-on-fix, --config=Tools/build/.ruff.toml]
- files: ^Tools/build/check_warnings.py
+ files: ^Tools/build/
- id: ruff
name: Run Ruff (lint) on Argument Clinic
args: [--exit-non-zero-on-fix, --config=Tools/clinic/.ruff.toml]
"W", # pycodestyle
"YTT", # flake8-2020
]
+ignore = [
+ "E501", # Line too long
+ "F541", # f-string without any placeholders
+ "PYI024", # Use `typing.NamedTuple` instead of `collections.namedtuple`
+ "PYI025", # Use `from collections.abc import Set as AbstractSet`
+ "UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)`
+]
+
+[per-file-target-version]
+"deepfreeze.py" = "py310"
+"stable_abi.py" = "py311" # requires 'tomllib'
+
+[lint.per-file-ignores]
+"{check_extension_modules,freeze_modules}.py" = [
+ "UP031", # Use format specifiers instead of percent format
+]
+"generate_{re_casefix,sre_constants,token}.py" = [
+ "UP031", # Use format specifiers instead of percent format
+]
See --help for more information
"""
+import _imp
import argparse
import collections
import enum
import sys
import sysconfig
import warnings
-import _imp
-
+from collections.abc import Iterable
from importlib._bootstrap import _load as bootstrap_load
-from importlib.machinery import BuiltinImporter, ExtensionFileLoader, ModuleSpec
+from importlib.machinery import (
+ BuiltinImporter,
+ ExtensionFileLoader,
+ ModuleSpec,
+)
from importlib.util import spec_from_file_location, spec_from_loader
-from typing import Iterable
SRC_DIR = pathlib.Path(__file__).parent.parent.parent
# guarantee zip() doesn't drop anything
while len(names) % 3:
names.append("")
- for l, m, r in zip(names[::3], names[1::3], names[2::3]):
+ for l, m, r in zip(names[::3], names[1::3], names[2::3]): # noqa: E741
print("%-*s %-*s %-*s" % (longest, l, longest, m, longest, r))
if verbose and self.builtin_ok:
except ImportError as e:
logger.error("%s failed to import: %s", modinfo.name, e)
raise
- except Exception as e:
+ except Exception:
if not hasattr(_imp, 'create_dynamic'):
logger.warning("Dynamic extension '%s' ignored", modinfo.name)
return
import re
import time
import types
-from typing import Dict, FrozenSet, TextIO, Tuple
+from typing import TextIO
import umarshal
def get_localsplus_counts(code: types.CodeType,
- names: Tuple[str, ...],
- kinds: bytes) -> Tuple[int, int, int, int]:
+ names: tuple[str, ...],
+ kinds: bytes) -> tuple[int, int, int]:
nlocals = 0
ncellvars = 0
nfreevars = 0
PyUnicode_4BYTE_KIND = 4
-def analyze_character_width(s: str) -> Tuple[int, bool]:
+def analyze_character_width(s: str) -> tuple[int, bool]:
maxchar = ' '
for c in s:
maxchar = max(maxchar, c)
def __init__(self, file: TextIO) -> None:
self.level = 0
self.file = file
- self.cache: Dict[tuple[type, object, str], str] = {}
+ self.cache: dict[tuple[type, object, str], str] = {}
self.hits, self.misses = 0, 0
self.finis: list[str] = []
self.inits: list[str] = []
self.inits.append(f"_PyStaticCode_Init({name_as_code})")
return f"& {name}.ob_base.ob_base"
- def generate_tuple(self, name: str, t: Tuple[object, ...]) -> str:
+ def generate_tuple(self, name: str, t: tuple[object, ...]) -> str:
if len(t) == 0:
return f"(PyObject *)& _Py_SINGLETON(tuple_empty)"
items = [self.generate(f"{name}_{i}", it) for i, it in enumerate(t)]
self.write(f".cval = {{ {z.real}, {z.imag} }},")
return f"&{name}.ob_base"
- def generate_frozenset(self, name: str, fs: FrozenSet[object]) -> str:
+ def generate_frozenset(self, name: str, fs: frozenset[object]) -> str:
try:
fs = sorted(fs)
except TypeError:
printer = Printer(output)
for arg in args:
file, modname = arg.rsplit(':', 1)
- with open(file, "r", encoding="utf8") as fd:
+ with open(file, encoding="utf8") as fd:
source = fd.read()
if is_frozen_header(source):
code = decode_frozen_data(source)
if args.file:
if verbose:
print(f"Reading targets from {args.file}")
- with open(args.file, "rt", encoding="utf-8-sig") as fin:
+ with open(args.file, encoding="utf-8-sig") as fin:
rules = [x.strip() for x in fin]
else:
rules = args.args
See the notes at the top of Python/frozen.c for more info.
"""
-from collections import namedtuple
import hashlib
import ntpath
import os
import posixpath
+from collections import namedtuple
from update_file import updating_file_with_tmpfile
-
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
ROOT_DIR = os.path.abspath(ROOT_DIR)
FROZEN_ONLY = os.path.join(ROOT_DIR, 'Tools', 'freeze', 'flag.py')
header = relpath_for_posix_display(src.frozenfile, parentdir)
headerlines.append(f'#include "{header}"')
- externlines = UniqueList()
bootstraplines = []
stdliblines = []
testlines = []
def regen_pcbuild(modules):
projlines = []
filterlines = []
- corelines = []
for src in _iter_sources(modules):
pyfile = relpath_for_windows_display(src.pyfile, ROOT_DIR)
header = relpath_for_windows_display(src.frozenfile, ROOT_DIR)
break
else:
raise NotImplementedError
- assert nsmallposints and nsmallnegints
+ assert nsmallposints
+ assert nsmallnegints
# Then target the runtime initializer.
filename = os.path.join(INTERNAL, 'pycore_runtime_init_generated.h')
# To cover tricky cases (like "\n") we also generate C asserts.
raise ValueError(
'do not use &_Py_ID or &_Py_STR for one-character latin-1 '
- + f'strings, use _Py_LATIN1_CHR instead: {string!r}')
+ f'strings, use _Py_LATIN1_CHR instead: {string!r}')
if string not in strings:
strings[string] = name
elif name != strings[string]:
"""Generate 10,000 unique examples for the Levenshtein short-circuit tests."""
import argparse
-from functools import lru_cache
import json
import os.path
+from functools import lru_cache
from random import choices, randrange
-
# This should be in sync with Lib/traceback.py. It's not importing those values
# because this script is being executed by PYTHON_FOR_REGEN and not by the in-tree
# build of Python.
def update_file(file, content):
try:
- with open(file, 'r', encoding='utf-8') as fobj:
+ with open(file, encoding='utf-8') as fobj:
if fobj.read() == content:
return False
except (OSError, ValueError):
# List of codes of lowercased characters which have the same uppercase.
equivalent_lower_codes = [sorted(t)
for s in equivalent_chars
- for t in [set(ord(c.lower()) for c in s)]
+ for t in [{ord(c.lower()) for c in s}]
if len(t) > 1]
bad_codes = []
"""Tool for generating Software Bill of Materials (SBOM) for Python's dependencies"""
-import os
-import re
+
+import glob
import hashlib
import json
-import glob
-from pathlib import Path, PurePosixPath, PureWindowsPath
+import os
+import re
import subprocess
import sys
-import urllib.request
import typing
+import urllib.request
+from pathlib import Path, PurePosixPath, PureWindowsPath
CPYTHON_ROOT_DIR = Path(__file__).parent.parent.parent
license_concluded = package["licenseConcluded"]
error_if(
license_concluded != "NOASSERTION",
- f"License identifier must be 'NOASSERTION'"
+ "License identifier must be 'NOASSERTION'"
)
def update_file(file, content):
try:
- with open(file, 'r') as fobj:
+ with open(file) as fobj:
if fobj.read() == content:
return False
except (OSError, ValueError):
from check_extension_modules import ModuleChecker
-
SCRIPT_NAME = 'Tools/build/generate_stdlib_module_names.py'
SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
import re
-
SCRIPT_NAME = 'Tools/build/generate_token.py'
AUTO_GENERATED_BY_SCRIPT = f'Auto-generated by {SCRIPT_NAME}'
NT_OFFSET = 256
def update_file(file, content):
try:
- with open(file, 'r') as fobj:
+ with open(file) as fobj:
if fobj.read() == content:
return False
except (OSError, ValueError):
Written by Ezio Melotti and Iuliia Proskurnia.
"""
+import json
import os
import sys
-import json
-from urllib.request import urlopen
from html.entities import html5
+from urllib.request import urlopen
SCRIPT_NAME = 'Tools/build/parse_html5_entities.py'
PAGE_URL = 'https://html.spec.whatwg.org/multipage/named-characters.html'
"""Compare the old and new dicts and print the differences."""
added = new.keys() - old.keys()
if added:
- print('{} entitie(s) have been added:'.format(len(added)))
+ print(f'{len(added)} entitie(s) have been added:')
for name in sorted(added):
- print(' {!r}: {!r}'.format(name, new[name]))
+ print(f' {name!r}: {new[name]!r}')
removed = old.keys() - new.keys()
if removed:
- print('{} entitie(s) have been removed:'.format(len(removed)))
+ print(f'{len(removed)} entitie(s) have been removed:')
for name in sorted(removed):
- print(' {!r}: {!r}'.format(name, old[name]))
+ print(f' {name!r}: {old[name]!r}')
changed = set()
for name in (old.keys() & new.keys()):
if old[name] != new[name]:
changed.add((name, old[name], new[name]))
if changed:
- print('{} entitie(s) have been modified:'.format(len(changed)))
+ print(f'{len(changed)} entitie(s) have been modified:')
for item in sorted(changed):
print(' {!r}: {!r} -> {!r}'.format(*item))
print('The current dictionary is updated.')
else:
compare_dicts(html5, new_html5)
- print('Run "./python {0} --patch" to update Lib/html/entities.html '
- 'or "./python {0} --create" to see the generated ' 'dictionary.'.format(__file__))
+ print(f'Run "./python {__file__} --patch" to update Lib/html/entities.html '
+ f'or "./python {__file__} --create" to see the generated dictionary.')
import sys
import sysconfig
-
ALLOWED_PREFIXES = ('Py', '_Py')
if sys.platform == 'darwin':
ALLOWED_PREFIXES += ('__Py',)
if dynamic:
args.append('--dynamic')
args.append(library)
- print("+ %s" % ' '.join(args))
- proc = subprocess.run(args, stdout=subprocess.PIPE, universal_newlines=True)
+ print(f"+ {' '.join(args)}")
+ proc = subprocess.run(args, stdout=subprocess.PIPE, encoding='utf-8')
if proc.returncode:
sys.stdout.write(proc.stdout)
sys.exit(proc.returncode)
symtype = parts[1].strip()
symbol = parts[-1]
- result = '%s (type: %s)' % (symbol, symtype)
+ result = f'{symbol} (type: {symtype})'
if (symbol.startswith(ALLOWED_PREFIXES) or
symbol in EXCEPTIONS or
print()
smelly_symbols.sort()
for symbol in smelly_symbols:
- print("Smelly symbol: %s" % symbol)
+ print(f"Smelly symbol: {symbol}")
print()
- print("ERROR: Found %s smelly symbols!" % len(smelly_symbols))
+ print(f"ERROR: Found {len(smelly_symbols)} smelly symbols!")
return len(smelly_symbols)
(relative to the manifest file, as they appear in the CPython codebase).
"""
-from functools import partial
-from pathlib import Path
-import dataclasses
-import subprocess
-import sysconfig
import argparse
-import textwrap
-import tomllib
+import csv
+import dataclasses
import difflib
-import pprint
-import sys
+import io
import os
import os.path
-import io
+import pprint
import re
-import csv
+import subprocess
+import sys
+import sysconfig
+import textwrap
+import tomllib
+from functools import partial
+from pathlib import Path
SCRIPT_NAME = 'Tools/build/stable_abi.py'
DEFAULT_MANIFEST_PATH = (
class Manifest:
"""Collection of `ABIItem`s forming the stable ABI/limited API."""
def __init__(self):
- self.contents = dict()
+ self.contents = {}
def add(self, item):
if item.name in self.contents:
# Get all macros first: we'll need feature macros like HAVE_FORK and
# MS_WINDOWS for everything else
present_macros = gcc_get_limited_api_macros(['Include/Python.h'])
- feature_macros = set(m.name for m in manifest.select({'feature_macro'}))
+ feature_macros = {m.name for m in manifest.select({'feature_macro'})}
feature_macros &= present_macros
# Check that we have all needed macros
- expected_macros = set(
- item.name for item in manifest.select({'macro'})
- )
+ expected_macros = {item.name for item in manifest.select({'macro'})}
missing_macros = expected_macros - present_macros
okay &= _report_unexpected_items(
missing_macros,
- 'Some macros from are not defined from "Include/Python.h"'
- + 'with Py_LIMITED_API:')
+ 'Some macros from are not defined from "Include/Python.h" '
+ 'with Py_LIMITED_API:')
- expected_symbols = set(item.name for item in manifest.select(
+ expected_symbols = {item.name for item in manifest.select(
{'function', 'data'}, include_abi_only=True, ifdef=feature_macros,
- ))
+ )}
# Check the static library (*.a)
LIBRARY = sysconfig.get_config_var("LIBRARY")
manifest, LDLIBRARY, expected_symbols, dynamic=False)
# Check definitions in the header files
- expected_defs = set(item.name for item in manifest.select(
+ expected_defs = {item.name for item in manifest.select(
{'function', 'data'}, include_abi_only=False, ifdef=feature_macros,
- ))
+ )}
found_defs = gcc_get_limited_api_definitions(['Include/Python.h'])
missing_defs = expected_defs - found_defs
okay &= _report_unexpected_items(
missing_defs,
'Some expected declarations were not declared in '
- + '"Include/Python.h" with Py_LIMITED_API:')
+ '"Include/Python.h" with Py_LIMITED_API:')
# Some Limited API macros are defined in terms of private symbols.
# These are not part of Limited API (even though they're defined with
okay &= _report_unexpected_items(
extra_defs,
'Some extra declarations were found in "Include/Python.h" '
- + 'with Py_LIMITED_API:')
+ 'with Py_LIMITED_API:')
return okay
if dynamic:
args.append("--dynamic")
args.append(library)
- proc = subprocess.run(args, stdout=subprocess.PIPE, universal_newlines=True)
+ proc = subprocess.run(args, stdout=subprocess.PIPE, encoding='utf-8')
if proc.returncode:
sys.stdout.write(proc.stdout)
sys.exit(proc.returncode)
"-E",
]
+ [str(file) for file in headers],
- text=True,
+ encoding='utf-8',
)
- return {
- target
- for target in re.findall(
- r"#define (\w+)", preprocessor_output_with_macros
- )
- }
+ return set(re.findall(r"#define (\w+)", preprocessor_output_with_macros))
def gcc_get_limited_api_definitions(headers):
"-E",
]
+ [str(file) for file in headers],
- text=True,
+ encoding='utf-8',
stderr=subprocess.DEVNULL,
)
stable_functions = set(
if name.startswith('_') and not item.abi_only:
raise ValueError(
f'`{name}` is private (underscore-prefixed) and should be '
- + 'removed from the stable ABI list or marked `abi_only`')
+ 'removed from the stable ABI list or marked `abi_only`')
def check_dump(manifest, filename):
"""Check that manifest.dump() corresponds to the data.
with filename.open('rb') as file:
from_file = tomllib.load(file)
if dumped != from_file:
- print(f'Dump differs from loaded data!', file=sys.stderr)
+ print('Dump differs from loaded data!', file=sys.stderr)
diff = difflib.unified_diff(
pprint.pformat(dumped).splitlines(),
pprint.pformat(from_file).splitlines(),
parser.add_argument(
"--generate-all", action='store_true',
help="as --generate, but generate all file(s) using default filenames."
- + " (unlike --all, does not run any extra checks)",
+ " (unlike --all, does not run any extra checks)",
)
parser.add_argument(
"-a", "--all", action='store_true',
if not results:
if args.generate:
parser.error('No file specified. Use --generate-all to regenerate '
- + 'all files, or --help for usage.')
+ 'all files, or --help for usage.')
parser.error('No check specified. Use --all to check all files, '
- + 'or --help for usage.')
+ 'or --help for usage.')
failed_results = [name for name, result in results.items() if not result]
# Implementation of marshal.loads() in pure Python
import ast
-
-from typing import Any, Tuple
+from typing import Any
class Type:
def __repr__(self) -> str:
return f"Code(**{self.__dict__})"
- co_localsplusnames: Tuple[str]
- co_localspluskinds: Tuple[int]
+ co_localsplusnames: tuple[str, ...]
+ co_localspluskinds: tuple[int, ...]
- def get_localsplus_names(self, select_kind: int) -> Tuple[str, ...]:
+ def get_localsplus_names(self, select_kind: int) -> tuple[str, ...]:
varnames: list[str] = []
for name, kind in zip(self.co_localsplusnames,
self.co_localspluskinds):
return tuple(varnames)
@property
- def co_varnames(self) -> Tuple[str, ...]:
+ def co_varnames(self) -> tuple[str, ...]:
return self.get_localsplus_names(CO_FAST_LOCAL)
@property
- def co_cellvars(self) -> Tuple[str, ...]:
+ def co_cellvars(self) -> tuple[str, ...]:
return self.get_localsplus_names(CO_FAST_CELL)
@property
- def co_freevars(self) -> Tuple[str, ...]:
+ def co_freevars(self) -> tuple[str, ...]:
return self.get_localsplus_names(CO_FAST_FREE)
@property
def main():
# Test
- import marshal, pprint
+ import marshal
+ import pprint
sample = {'foo': {(42, "bar", 3.14)}}
data = marshal.dumps(sample)
retval = loads(data)