]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
[3.12] gh-110756: Sync regrtest with main branch (#110758)
authorVictor Stinner <vstinner@python.org>
Thu, 12 Oct 2023 20:03:07 +0000 (22:03 +0200)
committerGitHub <noreply@github.com>
Thu, 12 Oct 2023 20:03:07 +0000 (22:03 +0200)
gh-110756: Sync regrtest with main branch

Copy files from main to this branch:

* Lib/test/libregrtest/*.py
* Lib/test/__init__.py
* Lib/test/__main__.py
* Lib/test/autotest.py
* Lib/test/pythoninfo.py
* Lib/test/regrtest.py
* Lib/test/test_regrtest.py

Do not modify scripts running tests such as Makefile.pre.in,
.github/workflows/build.yml or Tools/scripts/run_tests.py: do not use
--fast-ci and --slow-ci in this change.

Changes:

* SPLITTESTDIRS: don't include test_inspect.
* Add utils.process_cpu_count() using len(os.sched_getaffinity(0)).
* test_regrtest doesn't use @support.without_optimizer which doesn't
  exist in Python 3.12.
* Add support.set_sanitizer_env_var().
* Update test_faulthandler to use support.set_sanitizer_env_var().

23 files changed:
Lib/test/__main__.py
Lib/test/autotest.py
Lib/test/libregrtest/__init__.py
Lib/test/libregrtest/cmdline.py
Lib/test/libregrtest/findtests.py [new file with mode: 0644]
Lib/test/libregrtest/logger.py [new file with mode: 0644]
Lib/test/libregrtest/main.py
Lib/test/libregrtest/pgo.py
Lib/test/libregrtest/refleak.py
Lib/test/libregrtest/result.py [new file with mode: 0644]
Lib/test/libregrtest/results.py [new file with mode: 0644]
Lib/test/libregrtest/run_workers.py [new file with mode: 0644]
Lib/test/libregrtest/runtests.py [new file with mode: 0644]
Lib/test/libregrtest/save_env.py
Lib/test/libregrtest/setup.py
Lib/test/libregrtest/single.py [new file with mode: 0644]
Lib/test/libregrtest/utils.py
Lib/test/libregrtest/worker.py [new file with mode: 0644]
Lib/test/pythoninfo.py
Lib/test/regrtest.py
Lib/test/support/__init__.py
Lib/test/test_faulthandler.py
Lib/test/test_regrtest.py

index 19a6b2b8904526ace5ada89879ac66810676b321..82b50ad2c6e7773e2dae9c5e4733e8b4d9c41f99 100644 (file)
@@ -1,2 +1,2 @@
-from test.libregrtest import main
-main()
+from test.libregrtest.main import main
+main(_add_python_opts=True)
index fa85cc153a133aaac6a7bdeda496a92f4d2bcee1..b5a1fab404c72d75a354bcd479450e498941e915 100644 (file)
@@ -1,5 +1,5 @@
 # This should be equivalent to running regrtest.py from the cmdline.
 # It can be especially handy if you're in an interactive shell, e.g.,
 # from test import autotest.
-from test.libregrtest import main
+from test.libregrtest.main import main
 main()
index 5e8dba5dbde71a2b4075c47eac5c8c95997729cb..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644 (file)
@@ -1,2 +0,0 @@
-from test.libregrtest.cmdline import _parse_args, RESOURCE_NAMES, ALL_RESOURCES
-from test.libregrtest.main import main
index d1a590d8c1a5b31f1633bf123453e20de7ab6cdf..dd4cd335bef7e3e31ca6c8f44efd0ffa71283e4c 100644 (file)
@@ -1,8 +1,9 @@
 import argparse
-import os
+import os.path
 import shlex
 import sys
 from test.support import os_helper
+from .utils import ALL_RESOURCES, RESOURCE_NAMES
 
 
 USAGE = """\
@@ -27,8 +28,10 @@ EPILOG = """\
 Additional option details:
 
 -r randomizes test execution order. You can use --randseed=int to provide an
-int seed value for the randomizer; this is useful for reproducing troublesome
-test orders.
+int seed value for the randomizer. The randseed value will be used
+to set seeds for all random usages in tests
+(including randomizing the tests order if -r is set).
+By default we always set random seed, but do not randomize test order.
 
 -s On the first invocation of regrtest using -s, the first test file found
 or the first test file given on the command line is run, and the name of
@@ -130,25 +133,17 @@ Pattern examples:
 """
 
 
-ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network',
-                 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'walltime')
-
-# Other resources excluded from --use=all:
-#
-# - extralagefile (ex: test_zipfile64): really too slow to be enabled
-#   "by default"
-# - tzdata: while needed to validate fully test_datetime, it makes
-#   test_datetime too slow (15-20 min on some buildbots) and so is disabled by
-#   default (see bpo-30822).
-RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata')
-
-
 class Namespace(argparse.Namespace):
     def __init__(self, **kwargs) -> None:
+        self.ci = False
         self.testdir = None
         self.verbose = 0
         self.quiet = False
         self.exclude = False
+        self.cleanup = False
+        self.wait = False
+        self.list_cases = False
+        self.list_tests = False
         self.single = False
         self.randomize = False
         self.fromfile = None
@@ -157,7 +152,7 @@ class Namespace(argparse.Namespace):
         self.trace = False
         self.coverdir = 'coverage'
         self.runleaks = False
-        self.huntrleaks = False
+        self.huntrleaks: tuple[int, int, str] | None = None
         self.rerun = False
         self.verbose3 = False
         self.print_slow = False
@@ -170,6 +165,14 @@ class Namespace(argparse.Namespace):
         self.ignore_tests = None
         self.pgo = False
         self.pgo_extended = False
+        self.worker_json = None
+        self.start = None
+        self.timeout = None
+        self.memlimit = None
+        self.threshold = None
+        self.fail_rerun = False
+        self.tempdir = None
+        self._add_python_opts = True
 
         super().__init__(**kwargs)
 
@@ -198,19 +201,27 @@ def _create_parser():
     # We add help explicitly to control what argument group it renders under.
     group.add_argument('-h', '--help', action='help',
                        help='show this help message and exit')
-    group.add_argument('--timeout', metavar='TIMEOUT', type=float,
+    group.add_argument('--fast-ci', action='store_true',
+                       help='Fast Continuous Integration (CI) mode used by '
+                            'GitHub Actions')
+    group.add_argument('--slow-ci', action='store_true',
+                       help='Slow Continuous Integration (CI) mode used by '
+                            'buildbot workers')
+    group.add_argument('--timeout', metavar='TIMEOUT',
                         help='dump the traceback and exit if a test takes '
                              'more than TIMEOUT seconds; disabled if TIMEOUT '
                              'is negative or equals to zero')
     group.add_argument('--wait', action='store_true',
                        help='wait for user input, e.g., allow a debugger '
                             'to be attached')
-    group.add_argument('--worker-args', metavar='ARGS')
     group.add_argument('-S', '--start', metavar='START',
                        help='the name of the test at which to start.' +
                             more_details)
     group.add_argument('-p', '--python', metavar='PYTHON',
                        help='Command to run Python test subprocesses with.')
+    group.add_argument('--randseed', metavar='SEED',
+                       dest='random_seed', type=int,
+                       help='pass a global random seed')
 
     group = parser.add_argument_group('Verbosity')
     group.add_argument('-v', '--verbose', action='count',
@@ -231,10 +242,6 @@ def _create_parser():
     group = parser.add_argument_group('Selecting tests')
     group.add_argument('-r', '--randomize', action='store_true',
                        help='randomize test execution order.' + more_details)
-    group.add_argument('--randseed', metavar='SEED',
-                       dest='random_seed', type=int,
-                       help='pass a random seed to reproduce a previous '
-                            'random run')
     group.add_argument('-f', '--fromfile', metavar='FILE',
                        help='read names of tests to run from a file.' +
                             more_details)
@@ -324,6 +331,9 @@ def _create_parser():
                        help='override the working directory for the test run')
     group.add_argument('--cleanup', action='store_true',
                        help='remove old test_python_* directories')
+    group.add_argument('--dont-add-python-opts', dest='_add_python_opts',
+                       action='store_false',
+                       help="internal option, don't use it")
     return parser
 
 
@@ -374,7 +384,50 @@ def _parse_args(args, **kwargs):
     for arg in ns.args:
         if arg.startswith('-'):
             parser.error("unrecognized arguments: %s" % arg)
-            sys.exit(1)
+
+    if ns.timeout is not None:
+        # Support "--timeout=" (no value) so Makefile.pre.pre TESTTIMEOUT
+        # can be used by "make buildbottest" and "make test".
+        if ns.timeout != "":
+            try:
+                ns.timeout = float(ns.timeout)
+            except ValueError:
+                parser.error(f"invalid timeout value: {ns.timeout!r}")
+        else:
+            ns.timeout = None
+
+    # Continuous Integration (CI): common options for fast/slow CI modes
+    if ns.slow_ci or ns.fast_ci:
+        # Similar to options:
+        #
+        #     -j0 --randomize --fail-env-changed --fail-rerun --rerun
+        #     --slowest --verbose3
+        if ns.use_mp is None:
+            ns.use_mp = 0
+        ns.randomize = True
+        ns.fail_env_changed = True
+        ns.fail_rerun = True
+        if ns.python is None:
+            ns.rerun = True
+        ns.print_slow = True
+        ns.verbose3 = True
+    else:
+        ns._add_python_opts = False
+
+    # When both --slow-ci and --fast-ci options are present,
+    # --slow-ci has the priority
+    if ns.slow_ci:
+        # Similar to: -u "all" --timeout=1200
+        if not ns.use:
+            ns.use = [['all']]
+        if ns.timeout is None:
+            ns.timeout = 1200  # 20 minutes
+    elif ns.fast_ci:
+        # Similar to: -u "all,-cpu" --timeout=600
+        if not ns.use:
+            ns.use = [['all', '-cpu']]
+        if ns.timeout is None:
+            ns.timeout = 600  # 10 minutes
 
     if ns.single and ns.fromfile:
         parser.error("-s and -f don't go together!")
@@ -401,10 +454,6 @@ def _parse_args(args, **kwargs):
     if ns.timeout is not None:
         if ns.timeout <= 0:
             ns.timeout = None
-    if ns.use_mp is not None:
-        if ns.use_mp <= 0:
-            # Use all cores + extras for tests that like to sleep
-            ns.use_mp = 2 + (os.cpu_count() or 1)
     if ns.use:
         for a in ns.use:
             for r in a:
@@ -448,4 +497,13 @@ def _parse_args(args, **kwargs):
         # --forever implies --failfast
         ns.failfast = True
 
+    if ns.huntrleaks:
+        warmup, repetitions, _ = ns.huntrleaks
+        if warmup < 1 or repetitions < 1:
+            msg = ("Invalid values for the --huntrleaks/-R parameters. The "
+                   "number of warmups and repetitions must be at least 1 "
+                   "each (1:1).")
+            print(msg, file=sys.stderr, flush=True)
+            sys.exit(2)
+
     return ns
diff --git a/Lib/test/libregrtest/findtests.py b/Lib/test/libregrtest/findtests.py
new file mode 100644 (file)
index 0000000..96cc3e0
--- /dev/null
@@ -0,0 +1,105 @@
+import os
+import sys
+import unittest
+
+from test import support
+
+from .utils import (
+    StrPath, TestName, TestTuple, TestList, FilterTuple,
+    abs_module_name, count, printlist)
+
+
+# If these test directories are encountered recurse into them and treat each
+# "test_*.py" file or each sub-directory as a separate test module. This can
+# increase parallelism.
+#
+# Beware this can't generally be done for any directory with sub-tests as the
+# __init__.py may do things which alter what tests are to be run.
+SPLITTESTDIRS: set[TestName] = {
+    "test_asyncio",
+    "test_concurrent_futures",
+    "test_future_stmt",
+    "test_gdb",
+    "test_multiprocessing_fork",
+    "test_multiprocessing_forkserver",
+    "test_multiprocessing_spawn",
+}
+
+
+def findtestdir(path: StrPath | None = None) -> StrPath:
+    return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
+
+
+def findtests(*, testdir: StrPath | None = None, exclude=(),
+              split_test_dirs: set[TestName] = SPLITTESTDIRS,
+              base_mod: str = "") -> TestList:
+    """Return a list of all applicable test modules."""
+    testdir = findtestdir(testdir)
+    tests = []
+    for name in os.listdir(testdir):
+        mod, ext = os.path.splitext(name)
+        if (not mod.startswith("test_")) or (mod in exclude):
+            continue
+        if base_mod:
+            fullname = f"{base_mod}.{mod}"
+        else:
+            fullname = mod
+        if fullname in split_test_dirs:
+            subdir = os.path.join(testdir, mod)
+            if not base_mod:
+                fullname = f"test.{mod}"
+            tests.extend(findtests(testdir=subdir, exclude=exclude,
+                                   split_test_dirs=split_test_dirs,
+                                   base_mod=fullname))
+        elif ext in (".py", ""):
+            tests.append(fullname)
+    return sorted(tests)
+
+
+def split_test_packages(tests, *, testdir: StrPath | None = None, exclude=(),
+                        split_test_dirs=SPLITTESTDIRS):
+    testdir = findtestdir(testdir)
+    splitted = []
+    for name in tests:
+        if name in split_test_dirs:
+            subdir = os.path.join(testdir, name)
+            splitted.extend(findtests(testdir=subdir, exclude=exclude,
+                                      split_test_dirs=split_test_dirs,
+                                      base_mod=name))
+        else:
+            splitted.append(name)
+    return splitted
+
+
+def _list_cases(suite):
+    for test in suite:
+        if isinstance(test, unittest.loader._FailedTest):
+            continue
+        if isinstance(test, unittest.TestSuite):
+            _list_cases(test)
+        elif isinstance(test, unittest.TestCase):
+            if support.match_test(test):
+                print(test.id())
+
+def list_cases(tests: TestTuple, *,
+               match_tests: FilterTuple | None = None,
+               ignore_tests: FilterTuple | None = None,
+               test_dir: StrPath | None = None):
+    support.verbose = False
+    support.set_match_tests(match_tests, ignore_tests)
+
+    skipped = []
+    for test_name in tests:
+        module_name = abs_module_name(test_name, test_dir)
+        try:
+            suite = unittest.defaultTestLoader.loadTestsFromName(module_name)
+            _list_cases(suite)
+        except unittest.SkipTest:
+            skipped.append(test_name)
+
+    if skipped:
+        sys.stdout.flush()
+        stderr = sys.stderr
+        print(file=stderr)
+        print(count(len(skipped), "test"), "skipped:", file=stderr)
+        printlist(skipped, file=stderr)
diff --git a/Lib/test/libregrtest/logger.py b/Lib/test/libregrtest/logger.py
new file mode 100644 (file)
index 0000000..a125706
--- /dev/null
@@ -0,0 +1,86 @@
+import os
+import time
+
+from test.support import MS_WINDOWS
+from .results import TestResults
+from .runtests import RunTests
+from .utils import print_warning
+
+if MS_WINDOWS:
+    from .win_utils import WindowsLoadTracker
+
+
+class Logger:
+    def __init__(self, results: TestResults, quiet: bool, pgo: bool):
+        self.start_time = time.perf_counter()
+        self.test_count_text = ''
+        self.test_count_width = 3
+        self.win_load_tracker: WindowsLoadTracker | None = None
+        self._results: TestResults = results
+        self._quiet: bool = quiet
+        self._pgo: bool = pgo
+
+    def log(self, line: str = '') -> None:
+        empty = not line
+
+        # add the system load prefix: "load avg: 1.80 "
+        load_avg = self.get_load_avg()
+        if load_avg is not None:
+            line = f"load avg: {load_avg:.2f} {line}"
+
+        # add the timestamp prefix:  "0:01:05 "
+        log_time = time.perf_counter() - self.start_time
+
+        mins, secs = divmod(int(log_time), 60)
+        hours, mins = divmod(mins, 60)
+        formatted_log_time = "%d:%02d:%02d" % (hours, mins, secs)
+
+        line = f"{formatted_log_time} {line}"
+        if empty:
+            line = line[:-1]
+
+        print(line, flush=True)
+
+    def get_load_avg(self) -> float | None:
+        if hasattr(os, 'getloadavg'):
+            return os.getloadavg()[0]
+        if self.win_load_tracker is not None:
+            return self.win_load_tracker.getloadavg()
+        return None
+
+    def display_progress(self, test_index: int, text: str) -> None:
+        if self._quiet:
+            return
+        results = self._results
+
+        # "[ 51/405/1] test_tcl passed"
+        line = f"{test_index:{self.test_count_width}}{self.test_count_text}"
+        fails = len(results.bad) + len(results.env_changed)
+        if fails and not self._pgo:
+            line = f"{line}/{fails}"
+        self.log(f"[{line}] {text}")
+
+    def set_tests(self, runtests: RunTests) -> None:
+        if runtests.forever:
+            self.test_count_text = ''
+            self.test_count_width = 3
+        else:
+            self.test_count_text = '/{}'.format(len(runtests.tests))
+            self.test_count_width = len(self.test_count_text) - 1
+
+    def start_load_tracker(self) -> None:
+        if not MS_WINDOWS:
+            return
+
+        try:
+            self.win_load_tracker = WindowsLoadTracker()
+        except PermissionError as error:
+            # Standard accounts may not have access to the performance
+            # counters.
+            print_warning(f'Failed to create WindowsLoadTracker: {error}')
+
+    def stop_load_tracker(self) -> None:
+        if self.win_load_tracker is None:
+            return
+        self.win_load_tracker.close()
+        self.win_load_tracker = None
index 56d562d0b068dda5cc030f9e18ae29e27e8d8a96..fe35df05c80e896567f049f9d2230bd292ed9b9e 100644 (file)
@@ -1,38 +1,30 @@
-import faulthandler
-import locale
 import os
-import platform
 import random
 import re
+import shlex
 import sys
 import sysconfig
-import tempfile
 import time
-import unittest
-from test.libregrtest.cmdline import _parse_args
-from test.libregrtest.runtest import (
-    findtests, split_test_packages, runtest, abs_module_name,
-    PROGRESS_MIN_TIME, State, MatchTestsDict, RunTests)
-from test.libregrtest.setup import setup_tests
-from test.libregrtest.pgo import setup_pgo_tests
-from test.libregrtest.utils import (strip_py_suffix, count, format_duration,
-                                    printlist, get_build_info)
-from test import support
-from test.support import TestStats
-from test.support import os_helper
-from test.support import threading_helper
-
-
-# bpo-38203: Maximum delay in seconds to exit Python (call Py_Finalize()).
-# Used to protect against threading._shutdown() hang.
-# Must be smaller than buildbot "1200 seconds without output" limit.
-EXIT_TIMEOUT = 120.0
 
-EXITCODE_BAD_TEST = 2
-EXITCODE_ENV_CHANGED = 3
-EXITCODE_NO_TESTS_RAN = 4
-EXITCODE_RERUN_FAIL = 5
-EXITCODE_INTERRUPTED = 130
+from test import support
+from test.support import os_helper, MS_WINDOWS
+
+from .cmdline import _parse_args, Namespace
+from .findtests import findtests, split_test_packages, list_cases
+from .logger import Logger
+from .pgo import setup_pgo_tests
+from .result import State
+from .results import TestResults, EXITCODE_INTERRUPTED
+from .runtests import RunTests, HuntRefleak
+from .setup import setup_process, setup_test_dir
+from .single import run_single_test, PROGRESS_MIN_TIME
+from .utils import (
+    StrPath, StrJSON, TestName, TestList, TestTuple, FilterTuple,
+    strip_py_suffix, count, format_duration,
+    printlist, get_temp_dir, get_work_dir, exit_timeout,
+    display_header, cleanup_temp_dir, print_warning,
+    is_cross_compiled, get_host_runner, process_cpu_count,
+    EXIT_TIMEOUT)
 
 
 class Regrtest:
@@ -58,306 +50,212 @@ class Regrtest:
     directly to set the values that would normally be set by flags
     on the command line.
     """
-    def __init__(self):
-        # Namespace of command line options
-        self.ns = None
-
-        # tests
-        self.tests = []
-        self.selected = []
-        self.all_runtests: list[RunTests] = []
-
-        # test results
-        self.good: list[str] = []
-        self.bad: list[str] = []
-        self.rerun_bad: list[str] = []
-        self.skipped: list[str] = []
-        self.resource_denied: list[str] = []
-        self.environment_changed: list[str] = []
-        self.run_no_tests: list[str] = []
-        self.rerun: list[str] = []
-
-        self.need_rerun: list[TestResult] = []
+    def __init__(self, ns: Namespace, _add_python_opts: bool = False):
+        # Log verbosity
+        self.verbose: int = int(ns.verbose)
+        self.quiet: bool = ns.quiet
+        self.pgo: bool = ns.pgo
+        self.pgo_extended: bool = ns.pgo_extended
+
+        # Test results
+        self.results: TestResults = TestResults()
         self.first_state: str | None = None
-        self.interrupted = False
-        self.total_stats = TestStats()
 
-        # used by --slow
-        self.test_times = []
+        # Logger
+        self.logger = Logger(self.results, self.quiet, self.pgo)
+
+        # Actions
+        self.want_header: bool = ns.header
+        self.want_list_tests: bool = ns.list_tests
+        self.want_list_cases: bool = ns.list_cases
+        self.want_wait: bool = ns.wait
+        self.want_cleanup: bool = ns.cleanup
+        self.want_rerun: bool = ns.rerun
+        self.want_run_leaks: bool = ns.runleaks
+
+        self.ci_mode: bool = (ns.fast_ci or ns.slow_ci)
+        self.want_add_python_opts: bool = (_add_python_opts
+                                           and ns._add_python_opts)
+
+        # Select tests
+        if ns.match_tests:
+            self.match_tests: FilterTuple | None = tuple(ns.match_tests)
+        else:
+            self.match_tests = None
+        if ns.ignore_tests:
+            self.ignore_tests: FilterTuple | None = tuple(ns.ignore_tests)
+        else:
+            self.ignore_tests = None
+        self.exclude: bool = ns.exclude
+        self.fromfile: StrPath | None = ns.fromfile
+        self.starting_test: TestName | None = ns.start
+        self.cmdline_args: TestList = ns.args
 
-        # used by --coverage, trace.Trace instance
-        self.tracer = None
+        # Workers
+        if ns.use_mp is None:
+            num_workers = 0  # run sequentially
+        elif ns.use_mp <= 0:
+            num_workers = -1  # use the number of CPUs
+        else:
+            num_workers = ns.use_mp
+        self.num_workers: int = num_workers
+        self.worker_json: StrJSON | None = ns.worker_json
+
+        # Options to run tests
+        self.fail_fast: bool = ns.failfast
+        self.fail_env_changed: bool = ns.fail_env_changed
+        self.fail_rerun: bool = ns.fail_rerun
+        self.forever: bool = ns.forever
+        self.output_on_failure: bool = ns.verbose3
+        self.timeout: float | None = ns.timeout
+        if ns.huntrleaks:
+            warmups, runs, filename = ns.huntrleaks
+            filename = os.path.abspath(filename)
+            self.hunt_refleak: HuntRefleak | None = HuntRefleak(warmups, runs, filename)
+        else:
+            self.hunt_refleak = None
+        self.test_dir: StrPath | None = ns.testdir
+        self.junit_filename: StrPath | None = ns.xmlpath
+        self.memory_limit: str | None = ns.memlimit
+        self.gc_threshold: int | None = ns.threshold
+        self.use_resources: tuple[str, ...] = tuple(ns.use_resources)
+        if ns.python:
+            self.python_cmd: tuple[str, ...] | None = tuple(ns.python)
+        else:
+            self.python_cmd = None
+        self.coverage: bool = ns.trace
+        self.coverage_dir: StrPath | None = ns.coverdir
+        self.tmp_dir: StrPath | None = ns.tempdir
+
+        # Randomize
+        self.randomize: bool = ns.randomize
+        self.random_seed: int | None =  (
+            ns.random_seed
+            if ns.random_seed is not None
+            else random.getrandbits(32)
+        )
+        if 'SOURCE_DATE_EPOCH' in os.environ:
+            self.randomize = False
+            self.random_seed = None
+
+        # tests
+        self.first_runtests: RunTests | None = None
+
+        # used by --slowest
+        self.print_slowest: bool = ns.print_slow
 
         # used to display the progress bar "[ 3/100]"
         self.start_time = time.perf_counter()
-        self.test_count_text = ''
-        self.test_count_width = 1
 
         # used by --single
-        self.next_single_test = None
-        self.next_single_filename = None
-
-        # used by --junit-xml
-        self.testsuite_xml = None
-
-        # misc
-        self.win_load_tracker = None
-        self.tmp_dir = None
-
-    def get_executed(self):
-        return (set(self.good) | set(self.bad) | set(self.skipped)
-                | set(self.resource_denied) | set(self.environment_changed)
-                | set(self.run_no_tests))
-
-    def accumulate_result(self, result, rerun=False):
-        fail_env_changed = self.ns.fail_env_changed
-        test_name = result.test_name
-
-        match result.state:
-            case State.PASSED:
-                self.good.append(test_name)
-            case State.ENV_CHANGED:
-                self.environment_changed.append(test_name)
-            case State.SKIPPED:
-                self.skipped.append(test_name)
-            case State.RESOURCE_DENIED:
-                self.resource_denied.append(test_name)
-            case State.INTERRUPTED:
-                self.interrupted = True
-            case State.DID_NOT_RUN:
-                self.run_no_tests.append(test_name)
-            case _:
-                if result.is_failed(fail_env_changed):
-                    self.bad.append(test_name)
-                    self.need_rerun.append(result)
-                else:
-                    raise ValueError(f"invalid test state: {result.state!r}")
-
-        if result.has_meaningful_duration() and not rerun:
-            self.test_times.append((result.duration, test_name))
-        if result.stats is not None:
-            self.total_stats.accumulate(result.stats)
-        if rerun:
-            self.rerun.append(test_name)
-
-        xml_data = result.xml_data
-        if xml_data:
-            import xml.etree.ElementTree as ET
-            for e in xml_data:
-                try:
-                    self.testsuite_xml.append(ET.fromstring(e))
-                except ET.ParseError:
-                    print(xml_data, file=sys.__stderr__)
-                    raise
+        self.single_test_run: bool = ns.single
+        self.next_single_test: TestName | None = None
+        self.next_single_filename: StrPath | None = None
 
     def log(self, line=''):
-        empty = not line
-
-        # add the system load prefix: "load avg: 1.80 "
-        load_avg = self.getloadavg()
-        if load_avg is not None:
-            line = f"load avg: {load_avg:.2f} {line}"
-
-        # add the timestamp prefix:  "0:01:05 "
-        test_time = time.perf_counter() - self.start_time
-
-        mins, secs = divmod(int(test_time), 60)
-        hours, mins = divmod(mins, 60)
-        test_time = "%d:%02d:%02d" % (hours, mins, secs)
+        self.logger.log(line)
 
-        line = f"{test_time} {line}"
-        if empty:
-            line = line[:-1]
-
-        print(line, flush=True)
-
-    def display_progress(self, test_index, text):
-        quiet = self.ns.quiet
-        pgo = self.ns.pgo
-        if quiet:
-            return
-
-        # "[ 51/405/1] test_tcl passed"
-        line = f"{test_index:{self.test_count_width}}{self.test_count_text}"
-        fails = len(self.bad) + len(self.environment_changed)
-        if fails and not pgo:
-            line = f"{line}/{fails}"
-        self.log(f"[{line}] {text}")
-
-    def parse_args(self, kwargs):
-        ns = _parse_args(sys.argv[1:], **kwargs)
-
-        if ns.xmlpath:
-            support.junit_xml_list = self.testsuite_xml = []
-
-        strip_py_suffix(ns.args)
-
-        if ns.huntrleaks:
-            warmup, repetitions, _ = ns.huntrleaks
-            if warmup < 1 or repetitions < 1:
-                msg = ("Invalid values for the --huntrleaks/-R parameters. The "
-                       "number of warmups and repetitions must be at least 1 "
-                       "each (1:1).")
-                print(msg, file=sys.stderr, flush=True)
-                sys.exit(2)
-
-        if ns.tempdir:
-            ns.tempdir = os.path.expanduser(ns.tempdir)
-
-        self.ns = ns
-
-    def find_tests(self, tests):
-        ns = self.ns
-        single = ns.single
-        fromfile = ns.fromfile
-        pgo = ns.pgo
-        exclude = ns.exclude
-        test_dir = ns.testdir
-        starting_test = ns.start
-        randomize = ns.randomize
-
-        self.tests = tests
-
-        if single:
+    def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList | None]:
+        if self.single_test_run:
             self.next_single_filename = os.path.join(self.tmp_dir, 'pynexttest')
             try:
                 with open(self.next_single_filename, 'r') as fp:
                     next_test = fp.read().strip()
-                    self.tests = [next_test]
+                    tests = [next_test]
             except OSError:
                 pass
 
-        if fromfile:
-            self.tests = []
+        if self.fromfile:
+            tests = []
             # regex to match 'test_builtin' in line:
             # '0:00:00 [  4/400] test_builtin -- test_dict took 1 sec'
             regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
-            with open(os.path.join(os_helper.SAVEDCWD, fromfile)) as fp:
+            with open(os.path.join(os_helper.SAVEDCWD, self.fromfile)) as fp:
                 for line in fp:
                     line = line.split('#', 1)[0]
                     line = line.strip()
                     match = regex.search(line)
                     if match is not None:
-                        self.tests.append(match.group())
+                        tests.append(match.group())
 
-        strip_py_suffix(self.tests)
+        strip_py_suffix(tests)
 
-        if pgo:
+        if self.pgo:
             # add default PGO tests if no tests are specified
-            setup_pgo_tests(ns)
+            setup_pgo_tests(self.cmdline_args, self.pgo_extended)
 
         exclude_tests = set()
-        if exclude:
-            for arg in ns.args:
+        if self.exclude:
+            for arg in self.cmdline_args:
                 exclude_tests.add(arg)
-            ns.args = []
+            self.cmdline_args = []
 
-        alltests = findtests(testdir=test_dir, exclude=exclude_tests)
+        alltests = findtests(testdir=self.test_dir,
+                             exclude=exclude_tests)
 
-        if not fromfile:
-            self.selected = self.tests or ns.args
-            if self.selected:
-                self.selected = split_test_packages(self.selected)
+        if not self.fromfile:
+            selected = tests or self.cmdline_args
+            if selected:
+                selected = split_test_packages(selected)
             else:
-                self.selected = alltests
+                selected = alltests
         else:
-            self.selected = self.tests
+            selected = tests
 
-        if single:
-            self.selected = self.selected[:1]
+        if self.single_test_run:
+            selected = selected[:1]
             try:
-                pos = alltests.index(self.selected[0])
+                pos = alltests.index(selected[0])
                 self.next_single_test = alltests[pos + 1]
             except IndexError:
                 pass
 
         # Remove all the selected tests that precede start if it's set.
-        if starting_test:
+        if self.starting_test:
             try:
-                del self.selected[:self.selected.index(starting_test)]
+                del selected[:selected.index(self.starting_test)]
             except ValueError:
-                print(f"Cannot find starting test: {starting_test}")
+                print(f"Cannot find starting test: {self.starting_test}")
                 sys.exit(1)
 
-        if randomize:
-            if ns.random_seed is None:
-                ns.random_seed = random.randrange(10000000)
-            random.seed(ns.random_seed)
-            random.shuffle(self.selected)
+        random.seed(self.random_seed)
+        if self.randomize:
+            random.shuffle(selected)
 
-    def list_tests(self):
-        for name in self.selected:
+        return (tuple(selected), tests)
+
+    @staticmethod
+    def list_tests(tests: TestTuple):
+        for name in tests:
             print(name)
 
-    def _list_cases(self, suite):
-        for test in suite:
-            if isinstance(test, unittest.loader._FailedTest):
-                continue
-            if isinstance(test, unittest.TestSuite):
-                self._list_cases(test)
-            elif isinstance(test, unittest.TestCase):
-                if support.match_test(test):
-                    print(test.id())
-
-    def list_cases(self):
-        ns = self.ns
-        test_dir = ns.testdir
-        support.verbose = False
-        support.set_match_tests(ns.match_tests, ns.ignore_tests)
-
-        skipped = []
-        for test_name in self.selected:
-            module_name = abs_module_name(test_name, test_dir)
-            try:
-                suite = unittest.defaultTestLoader.loadTestsFromName(module_name)
-                self._list_cases(suite)
-            except unittest.SkipTest:
-                skipped.append(test_name)
-
-        if skipped:
-            sys.stdout.flush()
-            stderr = sys.stderr
-            print(file=stderr)
-            print(count(len(skipped), "test"), "skipped:", file=stderr)
-            printlist(skipped, file=stderr)
-
-    def get_rerun_match(self, rerun_list) -> MatchTestsDict:
-        rerun_match_tests = {}
-        for result in rerun_list:
-            match_tests = result.get_rerun_match_tests()
-            # ignore empty match list
-            if match_tests:
-                rerun_match_tests[result.test_name] = match_tests
-        return rerun_match_tests
-
-    def _rerun_failed_tests(self, need_rerun):
+    def _rerun_failed_tests(self, runtests: RunTests):
         # Configure the runner to re-run tests
-        ns = self.ns
-        ns.verbose = True
-        ns.failfast = False
-        ns.verbose3 = False
-        ns.forever = False
-        if ns.use_mp is None:
-            ns.use_mp = 1
+        if self.num_workers == 0:
+            # Always run tests in fresh processes to have more deterministic
+            # initial state. Don't re-run tests in parallel but limit to a
+            # single worker process to have side effects (on the system load
+            # and timings) between tests.
+            self.num_workers = 1
 
-        # Get tests to re-run
-        tests = [result.test_name for result in need_rerun]
-        match_tests = self.get_rerun_match(need_rerun)
-        self.set_tests(tests)
-
-        # Clear previously failed tests
-        self.rerun_bad.extend(self.bad)
-        self.bad.clear()
-        self.need_rerun.clear()
+        tests, match_tests_dict = self.results.prepare_rerun()
 
         # Re-run failed tests
         self.log(f"Re-running {len(tests)} failed tests in verbose mode in subprocesses")
-        runtests = RunTests(tests, match_tests=match_tests, rerun=True)
-        self.all_runtests.append(runtests)
-        self._run_tests_mp(runtests)
-
-    def rerun_failed_tests(self, need_rerun):
-        if self.ns.python:
+        runtests = runtests.copy(
+            tests=tests,
+            rerun=True,
+            verbose=True,
+            forever=False,
+            fail_fast=False,
+            match_tests_dict=match_tests_dict,
+            output_on_failure=False)
+        self.logger.set_tests(runtests)
+        self._run_tests_mp(runtests, self.num_workers)
+        return runtests
+
+    def rerun_failed_tests(self, runtests: RunTests):
+        if self.python_cmd:
             # Temp patch for https://github.com/python/cpython/issues/94052
             self.log(
                 "Re-running failed tests is not supported with --python "
@@ -365,126 +263,61 @@ class Regrtest:
             )
             return
 
-        self.first_state = self.get_tests_state()
+        self.first_state = self.get_state()
 
         print()
-        self._rerun_failed_tests(need_rerun)
+        rerun_runtests = self._rerun_failed_tests(runtests)
 
-        if self.bad:
-            print(count(len(self.bad), 'test'), "failed again:")
-            printlist(self.bad)
+        if self.results.bad:
+            print(count(len(self.results.bad), 'test'), "failed again:")
+            printlist(self.results.bad)
 
-        self.display_result()
-
-    def display_result(self):
-        pgo = self.ns.pgo
-        quiet = self.ns.quiet
-        print_slow = self.ns.print_slow
+        self.display_result(rerun_runtests)
 
+    def display_result(self, runtests):
         # If running the test suite for PGO then no one cares about results.
-        if pgo:
+        if runtests.pgo:
             return
 
+        state = self.get_state()
         print()
-        print("== Tests result: %s ==" % self.get_tests_state())
-
-        if self.interrupted:
-            print("Test suite interrupted by signal SIGINT.")
-
-        omitted = set(self.selected) - self.get_executed()
-        if omitted:
-            print()
-            print(count(len(omitted), "test"), "omitted:")
-            printlist(omitted)
-
-        if self.good and not quiet:
-            print()
-            if (not self.bad
-                and not self.skipped
-                and not self.interrupted
-                and len(self.good) > 1):
-                print("All", end=' ')
-            print(count(len(self.good), "test"), "OK.")
-
-        if print_slow:
-            self.test_times.sort(reverse=True)
-            print()
-            print("10 slowest tests:")
-            for test_time, test in self.test_times[:10]:
-                print("- %s: %s" % (test, format_duration(test_time)))
-
-        if self.bad:
-            print()
-            print(count(len(self.bad), "test"), "failed:")
-            printlist(self.bad)
-
-        if self.environment_changed:
-            print()
-            print("{} altered the execution environment:".format(
-                     count(len(self.environment_changed), "test")))
-            printlist(self.environment_changed)
-
-        if self.skipped and not quiet:
-            print()
-            print(count(len(self.skipped), "test"), "skipped:")
-            printlist(self.skipped)
-
-        if self.resource_denied and not quiet:
-            print()
-            print(count(len(self.resource_denied), "test"), "skipped (resource denied):")
-            printlist(self.resource_denied)
-
-        if self.rerun:
-            print()
-            print("%s:" % count(len(self.rerun), "re-run test"))
-            printlist(self.rerun)
-
-        if self.run_no_tests:
-            print()
-            print(count(len(self.run_no_tests), "test"), "run no tests:")
-            printlist(self.run_no_tests)
-
-    def run_test(self, test_index, test_name, previous_test, save_modules):
-        text = test_name
-        if previous_test:
-            text = '%s -- %s' % (text, previous_test)
-        self.display_progress(test_index, text)
+        print(f"== Tests result: {state} ==")
 
-        if self.tracer:
+        self.results.display_result(runtests.tests,
+                                    self.quiet, self.print_slowest)
+
+    def run_test(self, test_name: TestName, runtests: RunTests, tracer):
+        if tracer is not None:
             # If we're tracing code coverage, then we don't exit with status
             # if on a false return value from main.
-            cmd = ('result = runtest(self.ns, test_name); '
-                   'self.accumulate_result(result)')
-            ns = dict(locals())
-            self.tracer.runctx(cmd, globals=globals(), locals=ns)
-            result = ns['result']
+            cmd = ('result = run_single_test(test_name, runtests)')
+            namespace = dict(locals())
+            tracer.runctx(cmd, globals=globals(), locals=namespace)
+            result = namespace['result']
         else:
-            result = runtest(self.ns, test_name)
-            self.accumulate_result(result)
+            result = run_single_test(test_name, runtests)
 
-        # Unload the newly imported modules (best effort finalization)
-        for module in sys.modules.keys():
-            if module not in save_modules and module.startswith("test."):
-                support.unload(module)
+        self.results.accumulate_result(result, runtests)
 
         return result
 
     def run_tests_sequentially(self, runtests):
-        ns = self.ns
-        coverage = ns.trace
-        fail_fast = ns.failfast
-        fail_env_changed = ns.fail_env_changed
-        timeout = ns.timeout
-
-        if coverage:
+        if self.coverage:
             import trace
-            self.tracer = trace.Trace(trace=False, count=True)
+            tracer = trace.Trace(trace=False, count=True)
+        else:
+            tracer = None
 
         save_modules = sys.modules.keys()
 
-        msg = "Run tests sequentially"
-        if timeout:
-            msg += " (timeout: %s)" % format_duration(timeout)
+        jobs = runtests.get_jobs()
+        if jobs is not None:
+            tests = count(jobs, 'test')
+        else:
+            tests = 'tests'
+        msg = f"Run {tests} sequentially"
+        if runtests.timeout:
+            msg += " (timeout: %s)" % format_duration(runtests.timeout)
         self.log(msg)
 
         previous_test = None
@@ -492,10 +325,19 @@ class Regrtest:
         for test_index, test_name in enumerate(tests_iter, 1):
             start_time = time.perf_counter()
 
-            result = self.run_test(test_index, test_name,
-                                   previous_test, save_modules)
+            text = test_name
+            if previous_test:
+                text = '%s -- %s' % (text, previous_test)
+            self.logger.display_progress(test_index, text)
+
+            result = self.run_test(test_name, runtests, tracer)
 
-            if result.must_stop(fail_fast, fail_env_changed):
+            # Unload the newly imported modules (best effort finalization)
+            for module in sys.modules.keys():
+                if module not in save_modules and module.startswith("test."):
+                    support.unload(module)
+
+            if result.must_stop(self.fail_fast, self.fail_env_changed):
                 break
 
             previous_test = str(result)
@@ -509,129 +351,19 @@ class Regrtest:
         if previous_test:
             print(previous_test)
 
-    def display_header(self):
-        # Print basic platform information
-        print("==", platform.python_implementation(), *sys.version.split())
-        print("==", platform.platform(aliased=True),
-                      "%s-endian" % sys.byteorder)
-        print("== Python build:", ' '.join(get_build_info()))
-        print("== cwd:", os.getcwd())
-        cpu_count = os.cpu_count()
-        if cpu_count:
-            print("== CPU count:", cpu_count)
-        print("== encodings: locale=%s, FS=%s"
-              % (locale.getencoding(), sys.getfilesystemencoding()))
-        self.display_sanitizers()
-
-    def display_sanitizers(self):
-        # This makes it easier to remember what to set in your local
-        # environment when trying to reproduce a sanitizer failure.
-        asan = support.check_sanitizer(address=True)
-        msan = support.check_sanitizer(memory=True)
-        ubsan = support.check_sanitizer(ub=True)
-        sanitizers = []
-        if asan:
-            sanitizers.append("address")
-        if msan:
-            sanitizers.append("memory")
-        if ubsan:
-            sanitizers.append("undefined behavior")
-        if not sanitizers:
-            return
+        return tracer
 
-        print(f"== sanitizers: {', '.join(sanitizers)}")
-        for sanitizer, env_var in (
-            (asan, "ASAN_OPTIONS"),
-            (msan, "MSAN_OPTIONS"),
-            (ubsan, "UBSAN_OPTIONS"),
-        ):
-            options= os.environ.get(env_var)
-            if sanitizer and options is not None:
-                print(f"== {env_var}={options!r}")
-
-    def no_tests_run(self):
-        return not any((self.good, self.bad, self.skipped, self.interrupted,
-                        self.environment_changed))
-
-    def get_tests_state(self):
-        fail_env_changed = self.ns.fail_env_changed
-
-        result = []
-        if self.bad:
-            result.append("FAILURE")
-        elif fail_env_changed and self.environment_changed:
-            result.append("ENV CHANGED")
-        elif self.no_tests_run():
-            result.append("NO TESTS RAN")
-
-        if self.interrupted:
-            result.append("INTERRUPTED")
-
-        if not result:
-            result.append("SUCCESS")
-
-        result = ', '.join(result)
+    def get_state(self):
+        state = self.results.get_state(self.fail_env_changed)
         if self.first_state:
-            result = '%s then %s' % (self.first_state, result)
-        return result
-
-    def _run_tests_mp(self, runtests: RunTests) -> None:
-        from test.libregrtest.runtest_mp import run_tests_multiprocess
-        # If we're on windows and this is the parent runner (not a worker),
-        # track the load average.
-        if sys.platform == 'win32':
-            from test.libregrtest.win_utils import WindowsLoadTracker
+            state = f'{self.first_state} then {state}'
+        return state
 
-            try:
-                self.win_load_tracker = WindowsLoadTracker()
-            except PermissionError as error:
-                # Standard accounts may not have access to the performance
-                # counters.
-                print(f'Failed to create WindowsLoadTracker: {error}')
+    def _run_tests_mp(self, runtests: RunTests, num_workers: int) -> None:
+        from .run_workers import RunWorkers
+        RunWorkers(num_workers, runtests, self.logger, self.results).run()
 
-        try:
-            run_tests_multiprocess(self, runtests)
-        finally:
-            if self.win_load_tracker is not None:
-                self.win_load_tracker.close()
-                self.win_load_tracker = None
-
-    def set_tests(self, tests):
-        self.tests = tests
-        if self.ns.forever:
-            self.test_count_text = ''
-            self.test_count_width = 3
-        else:
-            self.test_count_text = '/{}'.format(len(self.tests))
-            self.test_count_width = len(self.test_count_text) - 1
-
-    def run_tests(self):
-        # For a partial run, we do not need to clutter the output.
-        if (self.ns.header
-            or not(self.ns.pgo or self.ns.quiet or self.ns.single
-                   or self.tests or self.ns.args)):
-            self.display_header()
-
-        if self.ns.huntrleaks:
-            warmup, repetitions, _ = self.ns.huntrleaks
-            if warmup < 3:
-                msg = ("WARNING: Running tests with --huntrleaks/-R and less than "
-                        "3 warmup repetitions can give false positives!")
-                print(msg, file=sys.stdout, flush=True)
-
-        if self.ns.randomize:
-            print("Using random seed", self.ns.random_seed)
-
-        tests = self.selected
-        self.set_tests(tests)
-        runtests = RunTests(tests, forever=self.ns.forever)
-        self.all_runtests.append(runtests)
-        if self.ns.use_mp:
-            self._run_tests_mp(runtests)
-        else:
-            self.run_tests_sequentially(runtests)
-
-    def finalize(self):
+    def finalize_tests(self, tracer):
         if self.next_single_filename:
             if self.next_single_test:
                 with open(self.next_single_filename, 'w') as fp:
@@ -639,255 +371,299 @@ class Regrtest:
             else:
                 os.unlink(self.next_single_filename)
 
-        if self.tracer:
-            r = self.tracer.results()
-            r.write_results(show_missing=True, summary=True,
-                            coverdir=self.ns.coverdir)
+        if tracer is not None:
+            results = tracer.results()
+            results.write_results(show_missing=True, summary=True,
+                                  coverdir=self.coverage_dir)
 
-        if self.ns.runleaks:
+        if self.want_run_leaks:
             os.system("leaks %d" % os.getpid())
 
-        self.save_xml_result()
+        if self.junit_filename:
+            self.results.write_junit(self.junit_filename)
 
     def display_summary(self):
-        duration = time.perf_counter() - self.start_time
-        first_runtests = self.all_runtests[0]
-        # the second runtests (re-run failed tests) disables forever,
-        # use the first runtests
-        forever = first_runtests.forever
-        filtered = bool(self.ns.match_tests) or bool(self.ns.ignore_tests)
+        duration = time.perf_counter() - self.logger.start_time
+        filtered = bool(self.match_tests) or bool(self.ignore_tests)
 
         # Total duration
         print()
         print("Total duration: %s" % format_duration(duration))
 
-        # Total tests
-        total = self.total_stats
-        text = f'run={total.tests_run:,}'
-        if filtered:
-            text = f"{text} (filtered)"
-        stats = [text]
-        if total.failures:
-            stats.append(f'failures={total.failures:,}')
-        if total.skipped:
-            stats.append(f'skipped={total.skipped:,}')
-        print(f"Total tests: {' '.join(stats)}")
-
-        # Total test files
-        all_tests = [self.good, self.bad, self.rerun,
-                     self.skipped,
-                     self.environment_changed, self.run_no_tests]
-        run = sum(map(len, all_tests))
-        text = f'run={run}'
-        if not forever:
-            ntest = len(first_runtests.tests)
-            text = f"{text}/{ntest}"
-        if filtered:
-            text = f"{text} (filtered)"
-        report = [text]
-        for name, tests in (
-            ('failed', self.bad),
-            ('env_changed', self.environment_changed),
-            ('skipped', self.skipped),
-            ('resource_denied', self.resource_denied),
-            ('rerun', self.rerun),
-            ('run_no_tests', self.run_no_tests),
-        ):
-            if tests:
-                report.append(f'{name}={len(tests)}')
-        print(f"Total test files: {' '.join(report)}")
+        self.results.display_summary(self.first_runtests, filtered)
 
         # Result
-        result = self.get_tests_state()
-        print(f"Result: {result}")
+        state = self.get_state()
+        print(f"Result: {state}")
+
+    def create_run_tests(self, tests: TestTuple):
+        return RunTests(
+            tests,
+            fail_fast=self.fail_fast,
+            fail_env_changed=self.fail_env_changed,
+            match_tests=self.match_tests,
+            ignore_tests=self.ignore_tests,
+            match_tests_dict=None,
+            rerun=False,
+            forever=self.forever,
+            pgo=self.pgo,
+            pgo_extended=self.pgo_extended,
+            output_on_failure=self.output_on_failure,
+            timeout=self.timeout,
+            verbose=self.verbose,
+            quiet=self.quiet,
+            hunt_refleak=self.hunt_refleak,
+            test_dir=self.test_dir,
+            use_junit=(self.junit_filename is not None),
+            memory_limit=self.memory_limit,
+            gc_threshold=self.gc_threshold,
+            use_resources=self.use_resources,
+            python_cmd=self.python_cmd,
+            randomize=self.randomize,
+            random_seed=self.random_seed,
+            json_file=None,
+        )
+
+    def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
+        if self.hunt_refleak and self.hunt_refleak.warmups < 3:
+            msg = ("WARNING: Running tests with --huntrleaks/-R and "
+                   "less than 3 warmup repetitions can give false positives!")
+            print(msg, file=sys.stdout, flush=True)
+
+        if self.num_workers < 0:
+            # Use all CPUs + 2 extra worker processes for tests
+            # that like to sleep
+            self.num_workers = (process_cpu_count() or 1) + 2
 
-    def save_xml_result(self):
-        if not self.ns.xmlpath and not self.testsuite_xml:
-            return
-
-        import xml.etree.ElementTree as ET
-        root = ET.Element("testsuites")
-
-        # Manually count the totals for the overall summary
-        totals = {'tests': 0, 'errors': 0, 'failures': 0}
-        for suite in self.testsuite_xml:
-            root.append(suite)
-            for k in totals:
-                try:
-                    totals[k] += int(suite.get(k, 0))
-                except ValueError:
-                    pass
-
-        for k, v in totals.items():
-            root.set(k, str(v))
-
-        xmlpath = os.path.join(os_helper.SAVEDCWD, self.ns.xmlpath)
-        with open(xmlpath, 'wb') as f:
-            for s in ET.tostringlist(root):
-                f.write(s)
-
-    def fix_umask(self):
-        if support.is_emscripten:
-            # Emscripten has default umask 0o777, which breaks some tests.
-            # see https://github.com/emscripten-core/emscripten/issues/17269
-            old_mask = os.umask(0)
-            if old_mask == 0o777:
-                os.umask(0o027)
-            else:
-                os.umask(old_mask)
-
-    def set_temp_dir(self):
-        if self.ns.tempdir:
-            self.tmp_dir = self.ns.tempdir
-
-        if not self.tmp_dir:
-            # When tests are run from the Python build directory, it is best practice
-            # to keep the test files in a subfolder.  This eases the cleanup of leftover
-            # files using the "make distclean" command.
-            if sysconfig.is_python_build():
-                self.tmp_dir = sysconfig.get_config_var('abs_builddir')
-                if self.tmp_dir is None:
-                    self.tmp_dir = sysconfig.get_config_var('abs_srcdir')
-                    if not self.tmp_dir:
-                        # gh-74470: On Windows, only srcdir is available. Using
-                        # abs_builddir mostly matters on UNIX when building
-                        # Python out of the source tree, especially when the
-                        # source tree is read only.
-                        self.tmp_dir = sysconfig.get_config_var('srcdir')
-                self.tmp_dir = os.path.join(self.tmp_dir, 'build')
-            else:
-                self.tmp_dir = tempfile.gettempdir()
+        # For a partial run, we do not need to clutter the output.
+        if (self.want_header
+            or not(self.pgo or self.quiet or self.single_test_run
+                   or tests or self.cmdline_args)):
+            display_header(self.use_resources, self.python_cmd)
 
-        self.tmp_dir = os.path.abspath(self.tmp_dir)
+        print("Using random seed", self.random_seed)
 
-    def is_worker(self):
-        return (self.ns.worker_args is not None)
+        runtests = self.create_run_tests(selected)
+        self.first_runtests = runtests
+        self.logger.set_tests(runtests)
 
-    def create_temp_dir(self):
-        os.makedirs(self.tmp_dir, exist_ok=True)
+        setup_process()
 
-        # Define a writable temp dir that will be used as cwd while running
-        # the tests. The name of the dir includes the pid to allow parallel
-        # testing (see the -j option).
-        # Emscripten and WASI have stubbed getpid(), Emscripten has only
-        # milisecond clock resolution. Use randint() instead.
-        if sys.platform in {"emscripten", "wasi"}:
-            nounce = random.randint(0, 1_000_000)
+        if self.hunt_refleak and not self.num_workers:
+            # gh-109739: WindowsLoadTracker thread interfers with refleak check
+            use_load_tracker = False
         else:
-            nounce = os.getpid()
+            # WindowsLoadTracker is only needed on Windows
+            use_load_tracker = MS_WINDOWS
 
-        if self.is_worker():
-            test_cwd = 'test_python_worker_{}'.format(nounce)
-        else:
-            test_cwd = 'test_python_{}'.format(nounce)
-        test_cwd += os_helper.FS_NONASCII
-        test_cwd = os.path.join(self.tmp_dir, test_cwd)
-        return test_cwd
-
-    def cleanup(self):
-        import glob
-
-        path = os.path.join(glob.escape(self.tmp_dir), 'test_python_*')
-        print("Cleanup %s directory" % self.tmp_dir)
-        for name in glob.glob(path):
-            if os.path.isdir(name):
-                print("Remove directory: %s" % name)
-                os_helper.rmtree(name)
+        if use_load_tracker:
+            self.logger.start_load_tracker()
+        try:
+            if self.num_workers:
+                self._run_tests_mp(runtests, self.num_workers)
+                tracer = None
             else:
-                print("Remove file: %s" % name)
-                os_helper.unlink(name)
+                tracer = self.run_tests_sequentially(runtests)
 
-    def main(self, tests=None, **kwargs):
-        self.parse_args(kwargs)
+            self.display_result(runtests)
 
-        self.set_temp_dir()
+            if self.want_rerun and self.results.need_rerun():
+                self.rerun_failed_tests(runtests)
+        finally:
+            if use_load_tracker:
+                self.logger.stop_load_tracker()
 
-        self.fix_umask()
+        self.display_summary()
+        self.finalize_tests(tracer)
 
-        if self.ns.cleanup:
-            self.cleanup()
-            sys.exit(0)
+        return self.results.get_exitcode(self.fail_env_changed,
+                                         self.fail_rerun)
 
-        test_cwd = self.create_temp_dir()
+    def run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
+        os.makedirs(self.tmp_dir, exist_ok=True)
+        work_dir = get_work_dir(self.tmp_dir)
 
-        try:
-            # Run the tests in a context manager that temporarily changes the CWD
-            # to a temporary and writable directory. If it's not possible to
-            # create or change the CWD, the original CWD will be used.
+        # Put a timeout on Python exit
+        with exit_timeout():
+            # Run the tests in a context manager that temporarily changes the
+            # CWD to a temporary and writable directory. If it's not possible
+            # to create or change the CWD, the original CWD will be used.
             # The original CWD is available from os_helper.SAVEDCWD.
-            with os_helper.temp_cwd(test_cwd, quiet=True):
-                # When using multiprocessing, worker processes will use test_cwd
-                # as their parent temporary directory. So when the main process
-                # exit, it removes also subdirectories of worker processes.
-                self.ns.tempdir = test_cwd
+            with os_helper.temp_cwd(work_dir, quiet=True):
+                # When using multiprocessing, worker processes will use
+                # work_dir as their parent temporary directory. So when the
+                # main process exit, it removes also subdirectories of worker
+                # processes.
+                return self._run_tests(selected, tests)
+
+    def _add_cross_compile_opts(self, regrtest_opts):
+        # WASM/WASI buildbot builders pass multiple PYTHON environment
+        # variables such as PYTHONPATH and _PYTHON_HOSTRUNNER.
+        keep_environ = bool(self.python_cmd)
+        environ = None
+
+        # Are we using cross-compilation?
+        cross_compile = is_cross_compiled()
+
+        # Get HOSTRUNNER
+        hostrunner = get_host_runner()
+
+        if cross_compile:
+            # emulate -E, but keep PYTHONPATH + cross compile env vars,
+            # so test executable can load correct sysconfigdata file.
+            keep = {
+                '_PYTHON_PROJECT_BASE',
+                '_PYTHON_HOST_PLATFORM',
+                '_PYTHON_SYSCONFIGDATA_NAME',
+                'PYTHONPATH'
+            }
+            old_environ = os.environ
+            new_environ = {
+                name: value for name, value in os.environ.items()
+                if not name.startswith(('PYTHON', '_PYTHON')) or name in keep
+            }
+            # Only set environ if at least one variable was removed
+            if new_environ != old_environ:
+                environ = new_environ
+            keep_environ = True
+
+        if cross_compile and hostrunner:
+            if self.num_workers == 0:
+                # For now use only two cores for cross-compiled builds;
+                # hostrunner can be expensive.
+                regrtest_opts.extend(['-j', '2'])
+
+            # If HOSTRUNNER is set and -p/--python option is not given, then
+            # use hostrunner to execute python binary for tests.
+            if not self.python_cmd:
+                buildpython = sysconfig.get_config_var("BUILDPYTHON")
+                python_cmd = f"{hostrunner} {buildpython}"
+                regrtest_opts.extend(["--python", python_cmd])
+                keep_environ = True
+
+        return (environ, keep_environ)
+
+    def _add_ci_python_opts(self, python_opts, keep_environ):
+        # --fast-ci and --slow-ci add options to Python:
+        # "-u -W default -bb -E"
+
+        # Unbuffered stdout and stderr
+        if not sys.stdout.write_through:
+            python_opts.append('-u')
+
+        # Add warnings filter 'default'
+        if 'default' not in sys.warnoptions:
+            python_opts.extend(('-W', 'default'))
+
+        # Error on bytes/str comparison
+        if sys.flags.bytes_warning < 2:
+            python_opts.append('-bb')
+
+        if not keep_environ:
+            # Ignore PYTHON* environment variables
+            if not sys.flags.ignore_environment:
+                python_opts.append('-E')
+
+    def _execute_python(self, cmd, environ):
+        # Make sure that messages before execv() are logged
+        sys.stdout.flush()
+        sys.stderr.flush()
+
+        cmd_text = shlex.join(cmd)
+        try:
+            print(f"+ {cmd_text}", flush=True)
 
-                self._main(tests, kwargs)
-        except SystemExit as exc:
-            # bpo-38203: Python can hang at exit in Py_Finalize(), especially
-            # on threading._shutdown() call: put a timeout
-            if threading_helper.can_start_thread:
-                faulthandler.dump_traceback_later(EXIT_TIMEOUT, exit=True)
+            if hasattr(os, 'execv') and not MS_WINDOWS:
+                os.execv(cmd[0], cmd)
+                # On success, execv() do no return.
+                # On error, it raises an OSError.
+            else:
+                import subprocess
+                with subprocess.Popen(cmd, env=environ) as proc:
+                    try:
+                        proc.wait()
+                    except KeyboardInterrupt:
+                        # There is no need to call proc.terminate(): on CTRL+C,
+                        # SIGTERM is also sent to the child process.
+                        try:
+                            proc.wait(timeout=EXIT_TIMEOUT)
+                        except subprocess.TimeoutExpired:
+                            proc.kill()
+                            proc.wait()
+                            sys.exit(EXITCODE_INTERRUPTED)
+
+                sys.exit(proc.returncode)
+        except Exception as exc:
+            print_warning(f"Failed to change Python options: {exc!r}\n"
+                          f"Command: {cmd_text}")
+            # continue executing main()
+
+    def _add_python_opts(self):
+        python_opts = []
+        regrtest_opts = []
+
+        environ, keep_environ = self._add_cross_compile_opts(regrtest_opts)
+        if self.ci_mode:
+            self._add_ci_python_opts(python_opts, keep_environ)
+
+        if (not python_opts) and (not regrtest_opts) and (environ is None):
+            # Nothing changed: nothing to do
+            return
 
-            sys.exit(exc.code)
+        # Create new command line
+        cmd = list(sys.orig_argv)
+        if python_opts:
+            cmd[1:1] = python_opts
+        if regrtest_opts:
+            cmd.extend(regrtest_opts)
+        cmd.append("--dont-add-python-opts")
 
-    def getloadavg(self):
-        if self.win_load_tracker is not None:
-            return self.win_load_tracker.getloadavg()
+        self._execute_python(cmd, environ)
 
-        if hasattr(os, 'getloadavg'):
-            return os.getloadavg()[0]
+    def _init(self):
+        # Set sys.stdout encoder error handler to backslashreplace,
+        # similar to sys.stderr error handler, to avoid UnicodeEncodeError
+        # when printing a traceback or any other non-encodable character.
+        sys.stdout.reconfigure(errors="backslashreplace")
 
-        return None
+        if self.junit_filename and not os.path.isabs(self.junit_filename):
+            self.junit_filename = os.path.abspath(self.junit_filename)
 
-    def get_exitcode(self):
-        exitcode = 0
-        if self.bad:
-            exitcode = EXITCODE_BAD_TEST
-        elif self.interrupted:
-            exitcode = EXITCODE_INTERRUPTED
-        elif self.ns.fail_env_changed and self.environment_changed:
-            exitcode = EXITCODE_ENV_CHANGED
-        elif self.no_tests_run():
-            exitcode = EXITCODE_NO_TESTS_RAN
-        elif self.rerun and self.ns.fail_rerun:
-            exitcode = EXITCODE_RERUN_FAIL
-        return exitcode
-
-    def action_run_tests(self):
-        self.run_tests()
-        self.display_result()
-
-        need_rerun = self.need_rerun
-        if self.ns.rerun and need_rerun:
-            self.rerun_failed_tests(need_rerun)
+        strip_py_suffix(self.cmdline_args)
 
-        self.display_summary()
-        self.finalize()
+        self.tmp_dir = get_temp_dir(self.tmp_dir)
 
-    def _main(self, tests, kwargs):
-        if self.is_worker():
-            from test.libregrtest.runtest_mp import run_tests_worker
-            run_tests_worker(self.ns.worker_args)
-            return
+    def main(self, tests: TestList | None = None):
+        if self.want_add_python_opts:
+            self._add_python_opts()
+
+        self._init()
+
+        if self.want_cleanup:
+            cleanup_temp_dir(self.tmp_dir)
+            sys.exit(0)
 
-        if self.ns.wait:
+        if self.want_wait:
             input("Press any key to continue...")
 
-        setup_tests(self.ns)
-        self.find_tests(tests)
+        setup_test_dir(self.test_dir)
+        selected, tests = self.find_tests(tests)
 
         exitcode = 0
-        if self.ns.list_tests:
-            self.list_tests()
-        elif self.ns.list_cases:
-            self.list_cases()
+        if self.want_list_tests:
+            self.list_tests(selected)
+        elif self.want_list_cases:
+            list_cases(selected,
+                       match_tests=self.match_tests,
+                       ignore_tests=self.ignore_tests,
+                       test_dir=self.test_dir)
         else:
-            self.action_run_tests()
-            exitcode = self.get_exitcode()
+            exitcode = self.run_tests(selected, tests)
 
         sys.exit(exitcode)
 
 
-def main(tests=None, **kwargs):
+def main(tests=None, _add_python_opts=False, **kwargs):
     """Run the Python suite."""
-    Regrtest().main(tests=tests, **kwargs)
+    ns = _parse_args(sys.argv[1:], **kwargs)
+    Regrtest(ns, _add_python_opts=_add_python_opts).main(tests=tests)
index 42ce5fba7a97c3a8e83c511222df57575fbc4a77..e3a6927be5db1d18e2e45d007fb878aad32d7565 100644 (file)
@@ -42,15 +42,15 @@ PGO_TESTS = [
     'test_set',
     'test_sqlite3',
     'test_statistics',
+    'test_str',
     'test_struct',
     'test_tabnanny',
     'test_time',
-    'test_unicode',
     'test_xml_etree',
     'test_xml_etree_c',
 ]
 
-def setup_pgo_tests(ns):
-    if not ns.args and not ns.pgo_extended:
+def setup_pgo_tests(cmdline_args, pgo_extended: bool):
+    if not cmdline_args and not pgo_extended:
         # run default set of tests for PGO training
-        ns.args = PGO_TESTS[:]
+        cmdline_args[:] = PGO_TESTS[:]
index 206802b60ddcd0ee814708d9dae7588bdfc2eb7a..ada1a65b867ee65d9b8fb7d62fd9be9040a3bcc7 100644 (file)
@@ -1,10 +1,13 @@
-import os
 import sys
 import warnings
 from inspect import isabstract
+from typing import Any
+
 from test import support
 from test.support import os_helper
-from test.libregrtest.utils import clear_caches
+
+from .runtests import HuntRefleak
+from .utils import clear_caches
 
 try:
     from _abc import _get_dump
@@ -19,7 +22,9 @@ except ImportError:
                 cls._abc_negative_cache, cls._abc_negative_cache_version)
 
 
-def dash_R(ns, test_name, test_func):
+def runtest_refleak(test_name, test_func,
+                    hunt_refleak: HuntRefleak,
+                    quiet: bool):
     """Run a test multiple times, looking for reference leaks.
 
     Returns:
@@ -41,6 +46,7 @@ def dash_R(ns, test_name, test_func):
     fs = warnings.filters[:]
     ps = copyreg.dispatch_table.copy()
     pic = sys.path_importer_cache.copy()
+    zdc: dict[str, Any] | None
     try:
         import zipimport
     except ImportError:
@@ -62,9 +68,10 @@ def dash_R(ns, test_name, test_func):
     def get_pooled_int(value):
         return int_pool.setdefault(value, value)
 
-    nwarmup, ntracked, fname = ns.huntrleaks
-    fname = os.path.join(os_helper.SAVEDCWD, fname)
-    repcount = nwarmup + ntracked
+    warmups = hunt_refleak.warmups
+    runs = hunt_refleak.runs
+    filename = hunt_refleak.filename
+    repcount = warmups + runs
 
     # Pre-allocate to ensure that the loop doesn't allocate anything new
     rep_range = list(range(repcount))
@@ -78,7 +85,7 @@ def dash_R(ns, test_name, test_func):
     # initialize variables to make pyflakes quiet
     rc_before = alloc_before = fd_before = interned_before = 0
 
-    if not ns.quiet:
+    if not quiet:
         print("beginning", repcount, "repetitions", file=sys.stderr)
         print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr,
               flush=True)
@@ -102,7 +109,7 @@ def dash_R(ns, test_name, test_func):
         rc_after = gettotalrefcount() - interned_after * 2
         fd_after = fd_count()
 
-        if not ns.quiet:
+        if not quiet:
             print('.', end='', file=sys.stderr, flush=True)
 
         rc_deltas[i] = get_pooled_int(rc_after - rc_before)
@@ -114,7 +121,7 @@ def dash_R(ns, test_name, test_func):
         fd_before = fd_after
         interned_before = interned_after
 
-    if not ns.quiet:
+    if not quiet:
         print(file=sys.stderr)
 
     # These checkers return False on success, True on failure
@@ -143,12 +150,12 @@ def dash_R(ns, test_name, test_func):
         (fd_deltas, 'file descriptors', check_fd_deltas)
     ]:
         # ignore warmup runs
-        deltas = deltas[nwarmup:]
+        deltas = deltas[warmups:]
         if checker(deltas):
             msg = '%s leaked %s %s, sum=%s' % (
                 test_name, deltas, item_name, sum(deltas))
             print(msg, file=sys.stderr, flush=True)
-            with open(fname, "a", encoding="utf-8") as refrep:
+            with open(filename, "a", encoding="utf-8") as refrep:
                 print(msg, file=refrep)
                 refrep.flush()
             failed = True
diff --git a/Lib/test/libregrtest/result.py b/Lib/test/libregrtest/result.py
new file mode 100644 (file)
index 0000000..d6b0d5a
--- /dev/null
@@ -0,0 +1,190 @@
+import dataclasses
+import json
+from typing import Any
+
+from test.support import TestStats
+
+from .utils import (
+    StrJSON, TestName, FilterTuple,
+    format_duration, normalize_test_name, print_warning)
+
+
+# Avoid enum.Enum to reduce the number of imports when tests are run
+class State:
+    PASSED = "PASSED"
+    FAILED = "FAILED"
+    SKIPPED = "SKIPPED"
+    UNCAUGHT_EXC = "UNCAUGHT_EXC"
+    REFLEAK = "REFLEAK"
+    ENV_CHANGED = "ENV_CHANGED"
+    RESOURCE_DENIED = "RESOURCE_DENIED"
+    INTERRUPTED = "INTERRUPTED"
+    WORKER_FAILED = "WORKER_FAILED"   # non-zero worker process exit code
+    WORKER_BUG = "WORKER_BUG"         # exception when running a worker
+    DID_NOT_RUN = "DID_NOT_RUN"
+    TIMEOUT = "TIMEOUT"
+
+    @staticmethod
+    def is_failed(state):
+        return state in {
+            State.FAILED,
+            State.UNCAUGHT_EXC,
+            State.REFLEAK,
+            State.WORKER_FAILED,
+            State.WORKER_BUG,
+            State.TIMEOUT}
+
+    @staticmethod
+    def has_meaningful_duration(state):
+        # Consider that the duration is meaningless for these cases.
+        # For example, if a whole test file is skipped, its duration
+        # is unlikely to be the duration of executing its tests,
+        # but just the duration to execute code which skips the test.
+        return state not in {
+            State.SKIPPED,
+            State.RESOURCE_DENIED,
+            State.INTERRUPTED,
+            State.WORKER_FAILED,
+            State.WORKER_BUG,
+            State.DID_NOT_RUN}
+
+    @staticmethod
+    def must_stop(state):
+        return state in {
+            State.INTERRUPTED,
+            State.WORKER_BUG,
+        }
+
+
+@dataclasses.dataclass(slots=True)
+class TestResult:
+    test_name: TestName
+    state: str | None = None
+    # Test duration in seconds
+    duration: float | None = None
+    xml_data: list[str] | None = None
+    stats: TestStats | None = None
+
+    # errors and failures copied from support.TestFailedWithDetails
+    errors: list[tuple[str, str]] | None = None
+    failures: list[tuple[str, str]] | None = None
+
+    def is_failed(self, fail_env_changed: bool) -> bool:
+        if self.state == State.ENV_CHANGED:
+            return fail_env_changed
+        return State.is_failed(self.state)
+
+    def _format_failed(self):
+        if self.errors and self.failures:
+            le = len(self.errors)
+            lf = len(self.failures)
+            error_s = "error" + ("s" if le > 1 else "")
+            failure_s = "failure" + ("s" if lf > 1 else "")
+            return f"{self.test_name} failed ({le} {error_s}, {lf} {failure_s})"
+
+        if self.errors:
+            le = len(self.errors)
+            error_s = "error" + ("s" if le > 1 else "")
+            return f"{self.test_name} failed ({le} {error_s})"
+
+        if self.failures:
+            lf = len(self.failures)
+            failure_s = "failure" + ("s" if lf > 1 else "")
+            return f"{self.test_name} failed ({lf} {failure_s})"
+
+        return f"{self.test_name} failed"
+
+    def __str__(self) -> str:
+        match self.state:
+            case State.PASSED:
+                return f"{self.test_name} passed"
+            case State.FAILED:
+                return self._format_failed()
+            case State.SKIPPED:
+                return f"{self.test_name} skipped"
+            case State.UNCAUGHT_EXC:
+                return f"{self.test_name} failed (uncaught exception)"
+            case State.REFLEAK:
+                return f"{self.test_name} failed (reference leak)"
+            case State.ENV_CHANGED:
+                return f"{self.test_name} failed (env changed)"
+            case State.RESOURCE_DENIED:
+                return f"{self.test_name} skipped (resource denied)"
+            case State.INTERRUPTED:
+                return f"{self.test_name} interrupted"
+            case State.WORKER_FAILED:
+                return f"{self.test_name} worker non-zero exit code"
+            case State.WORKER_BUG:
+                return f"{self.test_name} worker bug"
+            case State.DID_NOT_RUN:
+                return f"{self.test_name} ran no tests"
+            case State.TIMEOUT:
+                return f"{self.test_name} timed out ({format_duration(self.duration)})"
+            case _:
+                raise ValueError("unknown result state: {state!r}")
+
+    def has_meaningful_duration(self):
+        return State.has_meaningful_duration(self.state)
+
+    def set_env_changed(self):
+        if self.state is None or self.state == State.PASSED:
+            self.state = State.ENV_CHANGED
+
+    def must_stop(self, fail_fast: bool, fail_env_changed: bool) -> bool:
+        if State.must_stop(self.state):
+            return True
+        if fail_fast and self.is_failed(fail_env_changed):
+            return True
+        return False
+
+    def get_rerun_match_tests(self) -> FilterTuple | None:
+        match_tests = []
+
+        errors = self.errors or []
+        failures = self.failures or []
+        for error_list, is_error in (
+            (errors, True),
+            (failures, False),
+        ):
+            for full_name, *_ in error_list:
+                match_name = normalize_test_name(full_name, is_error=is_error)
+                if match_name is None:
+                    # 'setUpModule (test.test_sys)': don't filter tests
+                    return None
+                if not match_name:
+                    error_type = "ERROR" if is_error else "FAIL"
+                    print_warning(f"rerun failed to parse {error_type} test name: "
+                                  f"{full_name!r}: don't filter tests")
+                    return None
+                match_tests.append(match_name)
+
+        if not match_tests:
+            return None
+        return tuple(match_tests)
+
+    def write_json_into(self, file) -> None:
+        json.dump(self, file, cls=_EncodeTestResult)
+
+    @staticmethod
+    def from_json(worker_json: StrJSON) -> 'TestResult':
+        return json.loads(worker_json, object_hook=_decode_test_result)
+
+
+class _EncodeTestResult(json.JSONEncoder):
+    def default(self, o: Any) -> dict[str, Any]:
+        if isinstance(o, TestResult):
+            result = dataclasses.asdict(o)
+            result["__test_result__"] = o.__class__.__name__
+            return result
+        else:
+            return super().default(o)
+
+
+def _decode_test_result(data: dict[str, Any]) -> TestResult | dict[str, Any]:
+    if "__test_result__" in data:
+        data.pop('__test_result__')
+        if data['stats'] is not None:
+            data['stats'] = TestStats(**data['stats'])
+        return TestResult(**data)
+    else:
+        return data
diff --git a/Lib/test/libregrtest/results.py b/Lib/test/libregrtest/results.py
new file mode 100644 (file)
index 0000000..3708078
--- /dev/null
@@ -0,0 +1,261 @@
+import sys
+from test.support import TestStats
+
+from .runtests import RunTests
+from .result import State, TestResult
+from .utils import (
+    StrPath, TestName, TestTuple, TestList, FilterDict,
+    printlist, count, format_duration)
+
+
+# Python uses exit code 1 when an exception is not catched
+# argparse.ArgumentParser.error() uses exit code 2
+EXITCODE_BAD_TEST = 2
+EXITCODE_ENV_CHANGED = 3
+EXITCODE_NO_TESTS_RAN = 4
+EXITCODE_RERUN_FAIL = 5
+EXITCODE_INTERRUPTED = 130   # 128 + signal.SIGINT=2
+
+
+class TestResults:
+    def __init__(self):
+        self.bad: TestList = []
+        self.good: TestList = []
+        self.rerun_bad: TestList = []
+        self.skipped: TestList = []
+        self.resource_denied: TestList = []
+        self.env_changed: TestList = []
+        self.run_no_tests: TestList = []
+        self.rerun: TestList = []
+        self.rerun_results: list[TestResult] = []
+
+        self.interrupted: bool = False
+        self.worker_bug: bool = False
+        self.test_times: list[tuple[float, TestName]] = []
+        self.stats = TestStats()
+        # used by --junit-xml
+        self.testsuite_xml: list[str] = []
+
+    def is_all_good(self):
+        return (not self.bad
+                and not self.skipped
+                and not self.interrupted
+                and not self.worker_bug)
+
+    def get_executed(self):
+        return (set(self.good) | set(self.bad) | set(self.skipped)
+                | set(self.resource_denied) | set(self.env_changed)
+                | set(self.run_no_tests))
+
+    def no_tests_run(self):
+        return not any((self.good, self.bad, self.skipped, self.interrupted,
+                        self.env_changed))
+
+    def get_state(self, fail_env_changed):
+        state = []
+        if self.bad:
+            state.append("FAILURE")
+        elif fail_env_changed and self.env_changed:
+            state.append("ENV CHANGED")
+        elif self.no_tests_run():
+            state.append("NO TESTS RAN")
+
+        if self.interrupted:
+            state.append("INTERRUPTED")
+        if self.worker_bug:
+            state.append("WORKER BUG")
+        if not state:
+            state.append("SUCCESS")
+
+        return ', '.join(state)
+
+    def get_exitcode(self, fail_env_changed, fail_rerun):
+        exitcode = 0
+        if self.bad:
+            exitcode = EXITCODE_BAD_TEST
+        elif self.interrupted:
+            exitcode = EXITCODE_INTERRUPTED
+        elif fail_env_changed and self.env_changed:
+            exitcode = EXITCODE_ENV_CHANGED
+        elif self.no_tests_run():
+            exitcode = EXITCODE_NO_TESTS_RAN
+        elif fail_rerun and self.rerun:
+            exitcode = EXITCODE_RERUN_FAIL
+        elif self.worker_bug:
+            exitcode = EXITCODE_BAD_TEST
+        return exitcode
+
+    def accumulate_result(self, result: TestResult, runtests: RunTests):
+        test_name = result.test_name
+        rerun = runtests.rerun
+        fail_env_changed = runtests.fail_env_changed
+
+        match result.state:
+            case State.PASSED:
+                self.good.append(test_name)
+            case State.ENV_CHANGED:
+                self.env_changed.append(test_name)
+                self.rerun_results.append(result)
+            case State.SKIPPED:
+                self.skipped.append(test_name)
+            case State.RESOURCE_DENIED:
+                self.resource_denied.append(test_name)
+            case State.INTERRUPTED:
+                self.interrupted = True
+            case State.DID_NOT_RUN:
+                self.run_no_tests.append(test_name)
+            case _:
+                if result.is_failed(fail_env_changed):
+                    self.bad.append(test_name)
+                    self.rerun_results.append(result)
+                else:
+                    raise ValueError(f"invalid test state: {result.state!r}")
+
+        if result.state == State.WORKER_BUG:
+            self.worker_bug = True
+
+        if result.has_meaningful_duration() and not rerun:
+            self.test_times.append((result.duration, test_name))
+        if result.stats is not None:
+            self.stats.accumulate(result.stats)
+        if rerun:
+            self.rerun.append(test_name)
+
+        xml_data = result.xml_data
+        if xml_data:
+            self.add_junit(xml_data)
+
+    def need_rerun(self):
+        return bool(self.rerun_results)
+
+    def prepare_rerun(self) -> tuple[TestTuple, FilterDict]:
+        tests: TestList = []
+        match_tests_dict = {}
+        for result in self.rerun_results:
+            tests.append(result.test_name)
+
+            match_tests = result.get_rerun_match_tests()
+            # ignore empty match list
+            if match_tests:
+                match_tests_dict[result.test_name] = match_tests
+
+        # Clear previously failed tests
+        self.rerun_bad.extend(self.bad)
+        self.bad.clear()
+        self.env_changed.clear()
+        self.rerun_results.clear()
+
+        return (tuple(tests), match_tests_dict)
+
+    def add_junit(self, xml_data: list[str]):
+        import xml.etree.ElementTree as ET
+        for e in xml_data:
+            try:
+                self.testsuite_xml.append(ET.fromstring(e))
+            except ET.ParseError:
+                print(xml_data, file=sys.__stderr__)
+                raise
+
+    def write_junit(self, filename: StrPath):
+        if not self.testsuite_xml:
+            # Don't create empty XML file
+            return
+
+        import xml.etree.ElementTree as ET
+        root = ET.Element("testsuites")
+
+        # Manually count the totals for the overall summary
+        totals = {'tests': 0, 'errors': 0, 'failures': 0}
+        for suite in self.testsuite_xml:
+            root.append(suite)
+            for k in totals:
+                try:
+                    totals[k] += int(suite.get(k, 0))
+                except ValueError:
+                    pass
+
+        for k, v in totals.items():
+            root.set(k, str(v))
+
+        with open(filename, 'wb') as f:
+            for s in ET.tostringlist(root):
+                f.write(s)
+
+    def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool):
+        if print_slowest:
+            self.test_times.sort(reverse=True)
+            print()
+            print("10 slowest tests:")
+            for test_time, test in self.test_times[:10]:
+                print("- %s: %s" % (test, format_duration(test_time)))
+
+        all_tests = []
+        omitted = set(tests) - self.get_executed()
+
+        # less important
+        all_tests.append((omitted, "test", "{} omitted:"))
+        if not quiet:
+            all_tests.append((self.skipped, "test", "{} skipped:"))
+            all_tests.append((self.resource_denied, "test", "{} skipped (resource denied):"))
+        all_tests.append((self.run_no_tests, "test", "{} run no tests:"))
+
+        # more important
+        all_tests.append((self.env_changed, "test", "{} altered the execution environment (env changed):"))
+        all_tests.append((self.rerun, "re-run test", "{}:"))
+        all_tests.append((self.bad, "test", "{} failed:"))
+
+        for tests_list, count_text, title_format in all_tests:
+            if tests_list:
+                print()
+                count_text = count(len(tests_list), count_text)
+                print(title_format.format(count_text))
+                printlist(tests_list)
+
+        if self.good and not quiet:
+            print()
+            text = count(len(self.good), "test")
+            text = f"{text} OK."
+            if (self.is_all_good() and len(self.good) > 1):
+                text = f"All {text}"
+            print(text)
+
+        if self.interrupted:
+            print()
+            print("Test suite interrupted by signal SIGINT.")
+
+    def display_summary(self, first_runtests: RunTests, filtered: bool):
+        # Total tests
+        stats = self.stats
+        text = f'run={stats.tests_run:,}'
+        if filtered:
+            text = f"{text} (filtered)"
+        report = [text]
+        if stats.failures:
+            report.append(f'failures={stats.failures:,}')
+        if stats.skipped:
+            report.append(f'skipped={stats.skipped:,}')
+        print(f"Total tests: {' '.join(report)}")
+
+        # Total test files
+        all_tests = [self.good, self.bad, self.rerun,
+                     self.skipped,
+                     self.env_changed, self.run_no_tests]
+        run = sum(map(len, all_tests))
+        text = f'run={run}'
+        if not first_runtests.forever:
+            ntest = len(first_runtests.tests)
+            text = f"{text}/{ntest}"
+        if filtered:
+            text = f"{text} (filtered)"
+        report = [text]
+        for name, tests in (
+            ('failed', self.bad),
+            ('env_changed', self.env_changed),
+            ('skipped', self.skipped),
+            ('resource_denied', self.resource_denied),
+            ('rerun', self.rerun),
+            ('run_no_tests', self.run_no_tests),
+        ):
+            if tests:
+                report.append(f'{name}={len(tests)}')
+        print(f"Total test files: {' '.join(report)}")
diff --git a/Lib/test/libregrtest/run_workers.py b/Lib/test/libregrtest/run_workers.py
new file mode 100644 (file)
index 0000000..16f8331
--- /dev/null
@@ -0,0 +1,607 @@
+import contextlib
+import dataclasses
+import faulthandler
+import os.path
+import queue
+import signal
+import subprocess
+import sys
+import tempfile
+import threading
+import time
+import traceback
+from typing import Literal, TextIO
+
+from test import support
+from test.support import os_helper, MS_WINDOWS
+
+from .logger import Logger
+from .result import TestResult, State
+from .results import TestResults
+from .runtests import RunTests, JsonFile, JsonFileType
+from .single import PROGRESS_MIN_TIME
+from .utils import (
+    StrPath, TestName,
+    format_duration, print_warning, count, plural, get_signal_name)
+from .worker import create_worker_process, USE_PROCESS_GROUP
+
+if MS_WINDOWS:
+    import locale
+    import msvcrt
+
+
+
+# Display the running tests if nothing happened last N seconds
+PROGRESS_UPDATE = 30.0   # seconds
+assert PROGRESS_UPDATE >= PROGRESS_MIN_TIME
+
+# Kill the main process after 5 minutes. It is supposed to write an update
+# every PROGRESS_UPDATE seconds. Tolerate 5 minutes for Python slowest
+# buildbot workers.
+MAIN_PROCESS_TIMEOUT = 5 * 60.0
+assert MAIN_PROCESS_TIMEOUT >= PROGRESS_UPDATE
+
+# Time to wait until a worker completes: should be immediate
+WAIT_COMPLETED_TIMEOUT = 30.0   # seconds
+
+# Time to wait a killed process (in seconds)
+WAIT_KILLED_TIMEOUT = 60.0
+
+
+# We do not use a generator so multiple threads can call next().
+class MultiprocessIterator:
+
+    """A thread-safe iterator over tests for multiprocess mode."""
+
+    def __init__(self, tests_iter):
+        self.lock = threading.Lock()
+        self.tests_iter = tests_iter
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        with self.lock:
+            if self.tests_iter is None:
+                raise StopIteration
+            return next(self.tests_iter)
+
+    def stop(self):
+        with self.lock:
+            self.tests_iter = None
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class MultiprocessResult:
+    result: TestResult
+    # bpo-45410: stderr is written into stdout to keep messages order
+    worker_stdout: str | None = None
+    err_msg: str | None = None
+
+
+ExcStr = str
+QueueOutput = tuple[Literal[False], MultiprocessResult] | tuple[Literal[True], ExcStr]
+
+
+class ExitThread(Exception):
+    pass
+
+
+class WorkerError(Exception):
+    def __init__(self,
+                 test_name: TestName,
+                 err_msg: str | None,
+                 stdout: str | None,
+                 state: str):
+        result = TestResult(test_name, state=state)
+        self.mp_result = MultiprocessResult(result, stdout, err_msg)
+        super().__init__()
+
+
+class WorkerThread(threading.Thread):
+    def __init__(self, worker_id: int, runner: "RunWorkers") -> None:
+        super().__init__()
+        self.worker_id = worker_id
+        self.runtests = runner.runtests
+        self.pending = runner.pending
+        self.output = runner.output
+        self.timeout = runner.worker_timeout
+        self.log = runner.log
+        self.test_name: TestName | None = None
+        self.start_time: float | None = None
+        self._popen: subprocess.Popen[str] | None = None
+        self._killed = False
+        self._stopped = False
+
+    def __repr__(self) -> str:
+        info = [f'WorkerThread #{self.worker_id}']
+        if self.is_alive():
+            info.append("running")
+        else:
+            info.append('stopped')
+        test = self.test_name
+        if test:
+            info.append(f'test={test}')
+        popen = self._popen
+        if popen is not None:
+            dt = time.monotonic() - self.start_time
+            info.extend((f'pid={self._popen.pid}',
+                         f'time={format_duration(dt)}'))
+        return '<%s>' % ' '.join(info)
+
+    def _kill(self) -> None:
+        popen = self._popen
+        if popen is None:
+            return
+
+        if self._killed:
+            return
+        self._killed = True
+
+        if USE_PROCESS_GROUP:
+            what = f"{self} process group"
+        else:
+            what = f"{self} process"
+
+        print(f"Kill {what}", file=sys.stderr, flush=True)
+        try:
+            if USE_PROCESS_GROUP:
+                os.killpg(popen.pid, signal.SIGKILL)
+            else:
+                popen.kill()
+        except ProcessLookupError:
+            # popen.kill(): the process completed, the WorkerThread thread
+            # read its exit status, but Popen.send_signal() read the returncode
+            # just before Popen.wait() set returncode.
+            pass
+        except OSError as exc:
+            print_warning(f"Failed to kill {what}: {exc!r}")
+
+    def stop(self) -> None:
+        # Method called from a different thread to stop this thread
+        self._stopped = True
+        self._kill()
+
+    def _run_process(self, runtests: RunTests, output_fd: int,
+                     tmp_dir: StrPath | None = None) -> int | None:
+        popen = create_worker_process(runtests, output_fd, tmp_dir)
+        self._popen = popen
+        self._killed = False
+
+        try:
+            if self._stopped:
+                # If kill() has been called before self._popen is set,
+                # self._popen is still running. Call again kill()
+                # to ensure that the process is killed.
+                self._kill()
+                raise ExitThread
+
+            try:
+                # gh-94026: stdout+stderr are written to tempfile
+                retcode = popen.wait(timeout=self.timeout)
+                assert retcode is not None
+                return retcode
+            except subprocess.TimeoutExpired:
+                if self._stopped:
+                    # kill() has been called: communicate() fails on reading
+                    # closed stdout
+                    raise ExitThread
+
+                # On timeout, kill the process
+                self._kill()
+
+                # None means TIMEOUT for the caller
+                retcode = None
+                # bpo-38207: Don't attempt to call communicate() again: on it
+                # can hang until all child processes using stdout
+                # pipes completes.
+            except OSError:
+                if self._stopped:
+                    # kill() has been called: communicate() fails
+                    # on reading closed stdout
+                    raise ExitThread
+                raise
+        except:
+            self._kill()
+            raise
+        finally:
+            self._wait_completed()
+            self._popen = None
+
+    def create_stdout(self, stack: contextlib.ExitStack) -> TextIO:
+        """Create stdout temporay file (file descriptor)."""
+
+        if MS_WINDOWS:
+            # gh-95027: When stdout is not a TTY, Python uses the ANSI code
+            # page for the sys.stdout encoding. If the main process runs in a
+            # terminal, sys.stdout uses WindowsConsoleIO with UTF-8 encoding.
+            encoding = locale.getencoding()
+        else:
+            encoding = sys.stdout.encoding
+
+        # gh-94026: Write stdout+stderr to a tempfile as workaround for
+        # non-blocking pipes on Emscripten with NodeJS.
+        # gh-109425: Use "backslashreplace" error handler: log corrupted
+        # stdout+stderr, instead of failing with a UnicodeDecodeError and not
+        # logging stdout+stderr at all.
+        stdout_file = tempfile.TemporaryFile('w+',
+                                             encoding=encoding,
+                                             errors='backslashreplace')
+        stack.enter_context(stdout_file)
+        return stdout_file
+
+    def create_json_file(self, stack: contextlib.ExitStack) -> tuple[JsonFile, TextIO | None]:
+        """Create JSON file."""
+
+        json_file_use_stdout = self.runtests.json_file_use_stdout()
+        if json_file_use_stdout:
+            json_file = JsonFile(None, JsonFileType.STDOUT)
+            json_tmpfile = None
+        else:
+            json_tmpfile = tempfile.TemporaryFile('w+', encoding='utf8')
+            stack.enter_context(json_tmpfile)
+
+            json_fd = json_tmpfile.fileno()
+            if MS_WINDOWS:
+                json_handle = msvcrt.get_osfhandle(json_fd)
+                json_file = JsonFile(json_handle,
+                                     JsonFileType.WINDOWS_HANDLE)
+            else:
+                json_file = JsonFile(json_fd, JsonFileType.UNIX_FD)
+        return (json_file, json_tmpfile)
+
+    def create_worker_runtests(self, test_name: TestName, json_file: JsonFile) -> RunTests:
+        """Create the worker RunTests."""
+
+        tests = (test_name,)
+        if self.runtests.rerun:
+            match_tests = self.runtests.get_match_tests(test_name)
+        else:
+            match_tests = None
+
+        kwargs = {}
+        if match_tests:
+            kwargs['match_tests'] = match_tests
+        if self.runtests.output_on_failure:
+            kwargs['verbose'] = True
+            kwargs['output_on_failure'] = False
+        return self.runtests.copy(
+            tests=tests,
+            json_file=json_file,
+            **kwargs)
+
+    def run_tmp_files(self, worker_runtests: RunTests,
+                      stdout_fd: int) -> tuple[int | None, list[StrPath]]:
+        # gh-93353: Check for leaked temporary files in the parent process,
+        # since the deletion of temporary files can happen late during
+        # Python finalization: too late for libregrtest.
+        if not support.is_wasi:
+            # Don't check for leaked temporary files and directories if Python is
+            # run on WASI. WASI don't pass environment variables like TMPDIR to
+            # worker processes.
+            tmp_dir = tempfile.mkdtemp(prefix="test_python_")
+            tmp_dir = os.path.abspath(tmp_dir)
+            try:
+                retcode = self._run_process(worker_runtests,
+                                            stdout_fd, tmp_dir)
+            finally:
+                tmp_files = os.listdir(tmp_dir)
+                os_helper.rmtree(tmp_dir)
+        else:
+            retcode = self._run_process(worker_runtests, stdout_fd)
+            tmp_files = []
+
+        return (retcode, tmp_files)
+
+    def read_stdout(self, stdout_file: TextIO) -> str:
+        stdout_file.seek(0)
+        try:
+            return stdout_file.read().strip()
+        except Exception as exc:
+            # gh-101634: Catch UnicodeDecodeError if stdout cannot be
+            # decoded from encoding
+            raise WorkerError(self.test_name,
+                              f"Cannot read process stdout: {exc}",
+                              stdout=None,
+                              state=State.WORKER_BUG)
+
+    def read_json(self, json_file: JsonFile, json_tmpfile: TextIO | None,
+                  stdout: str) -> tuple[TestResult, str]:
+        try:
+            if json_tmpfile is not None:
+                json_tmpfile.seek(0)
+                worker_json = json_tmpfile.read()
+            elif json_file.file_type == JsonFileType.STDOUT:
+                stdout, _, worker_json = stdout.rpartition("\n")
+                stdout = stdout.rstrip()
+            else:
+                with json_file.open(encoding='utf8') as json_fp:
+                    worker_json = json_fp.read()
+        except Exception as exc:
+            # gh-101634: Catch UnicodeDecodeError if stdout cannot be
+            # decoded from encoding
+            err_msg = f"Failed to read worker process JSON: {exc}"
+            raise WorkerError(self.test_name, err_msg, stdout,
+                              state=State.WORKER_BUG)
+
+        if not worker_json:
+            raise WorkerError(self.test_name, "empty JSON", stdout,
+                              state=State.WORKER_BUG)
+
+        try:
+            result = TestResult.from_json(worker_json)
+        except Exception as exc:
+            # gh-101634: Catch UnicodeDecodeError if stdout cannot be
+            # decoded from encoding
+            err_msg = f"Failed to parse worker process JSON: {exc}"
+            raise WorkerError(self.test_name, err_msg, stdout,
+                              state=State.WORKER_BUG)
+
+        return (result, stdout)
+
+    def _runtest(self, test_name: TestName) -> MultiprocessResult:
+        with contextlib.ExitStack() as stack:
+            stdout_file = self.create_stdout(stack)
+            json_file, json_tmpfile = self.create_json_file(stack)
+            worker_runtests = self.create_worker_runtests(test_name, json_file)
+
+            retcode, tmp_files = self.run_tmp_files(worker_runtests,
+                                                    stdout_file.fileno())
+
+            stdout = self.read_stdout(stdout_file)
+
+            if retcode is None:
+                raise WorkerError(self.test_name, stdout=stdout,
+                                  err_msg=None,
+                                  state=State.TIMEOUT)
+            if retcode != 0:
+                name = get_signal_name(retcode)
+                if name:
+                    retcode = f"{retcode} ({name})"
+                raise WorkerError(self.test_name, f"Exit code {retcode}", stdout,
+                                  state=State.WORKER_FAILED)
+
+            result, stdout = self.read_json(json_file, json_tmpfile, stdout)
+
+        if tmp_files:
+            msg = (f'\n\n'
+                   f'Warning -- {test_name} leaked temporary files '
+                   f'({len(tmp_files)}): {", ".join(sorted(tmp_files))}')
+            stdout += msg
+            result.set_env_changed()
+
+        return MultiprocessResult(result, stdout)
+
+    def run(self) -> None:
+        fail_fast = self.runtests.fail_fast
+        fail_env_changed = self.runtests.fail_env_changed
+        while not self._stopped:
+            try:
+                try:
+                    test_name = next(self.pending)
+                except StopIteration:
+                    break
+
+                self.start_time = time.monotonic()
+                self.test_name = test_name
+                try:
+                    mp_result = self._runtest(test_name)
+                except WorkerError as exc:
+                    mp_result = exc.mp_result
+                finally:
+                    self.test_name = None
+                mp_result.result.duration = time.monotonic() - self.start_time
+                self.output.put((False, mp_result))
+
+                if mp_result.result.must_stop(fail_fast, fail_env_changed):
+                    break
+            except ExitThread:
+                break
+            except BaseException:
+                self.output.put((True, traceback.format_exc()))
+                break
+
+    def _wait_completed(self) -> None:
+        popen = self._popen
+
+        try:
+            popen.wait(WAIT_COMPLETED_TIMEOUT)
+        except (subprocess.TimeoutExpired, OSError) as exc:
+            print_warning(f"Failed to wait for {self} completion "
+                          f"(timeout={format_duration(WAIT_COMPLETED_TIMEOUT)}): "
+                          f"{exc!r}")
+
+    def wait_stopped(self, start_time: float) -> None:
+        # bpo-38207: RunWorkers.stop_workers() called self.stop()
+        # which killed the process. Sometimes, killing the process from the
+        # main thread does not interrupt popen.communicate() in
+        # WorkerThread thread. This loop with a timeout is a workaround
+        # for that.
+        #
+        # Moreover, if this method fails to join the thread, it is likely
+        # that Python will hang at exit while calling threading._shutdown()
+        # which tries again to join the blocked thread. Regrtest.main()
+        # uses EXIT_TIMEOUT to workaround this second bug.
+        while True:
+            # Write a message every second
+            self.join(1.0)
+            if not self.is_alive():
+                break
+            dt = time.monotonic() - start_time
+            self.log(f"Waiting for {self} thread for {format_duration(dt)}")
+            if dt > WAIT_KILLED_TIMEOUT:
+                print_warning(f"Failed to join {self} in {format_duration(dt)}")
+                break
+
+
+def get_running(workers: list[WorkerThread]) -> str | None:
+    running: list[str] = []
+    for worker in workers:
+        test_name = worker.test_name
+        if not test_name:
+            continue
+        dt = time.monotonic() - worker.start_time
+        if dt >= PROGRESS_MIN_TIME:
+            text = f'{test_name} ({format_duration(dt)})'
+            running.append(text)
+    if not running:
+        return None
+    return f"running ({len(running)}): {', '.join(running)}"
+
+
+class RunWorkers:
+    def __init__(self, num_workers: int, runtests: RunTests,
+                 logger: Logger, results: TestResults) -> None:
+        self.num_workers = num_workers
+        self.runtests = runtests
+        self.log = logger.log
+        self.display_progress = logger.display_progress
+        self.results: TestResults = results
+
+        self.output: queue.Queue[QueueOutput] = queue.Queue()
+        tests_iter = runtests.iter_tests()
+        self.pending = MultiprocessIterator(tests_iter)
+        self.timeout = runtests.timeout
+        if self.timeout is not None:
+            # Rely on faulthandler to kill a worker process. This timouet is
+            # when faulthandler fails to kill a worker process. Give a maximum
+            # of 5 minutes to faulthandler to kill the worker.
+            self.worker_timeout: float | None = min(self.timeout * 1.5, self.timeout + 5 * 60)
+        else:
+            self.worker_timeout = None
+        self.workers: list[WorkerThread] | None = None
+
+        jobs = self.runtests.get_jobs()
+        if jobs is not None:
+            # Don't spawn more threads than the number of jobs:
+            # these worker threads would never get anything to do.
+            self.num_workers = min(self.num_workers, jobs)
+
+    def start_workers(self) -> None:
+        self.workers = [WorkerThread(index, self)
+                        for index in range(1, self.num_workers + 1)]
+        jobs = self.runtests.get_jobs()
+        if jobs is not None:
+            tests = count(jobs, 'test')
+        else:
+            tests = 'tests'
+        nworkers = len(self.workers)
+        processes = plural(nworkers, "process", "processes")
+        msg = (f"Run {tests} in parallel using "
+               f"{nworkers} worker {processes}")
+        if self.timeout:
+            msg += (" (timeout: %s, worker timeout: %s)"
+                    % (format_duration(self.timeout),
+                       format_duration(self.worker_timeout)))
+        self.log(msg)
+        for worker in self.workers:
+            worker.start()
+
+    def stop_workers(self) -> None:
+        start_time = time.monotonic()
+        for worker in self.workers:
+            worker.stop()
+        for worker in self.workers:
+            worker.wait_stopped(start_time)
+
+    def _get_result(self) -> QueueOutput | None:
+        pgo = self.runtests.pgo
+        use_faulthandler = (self.timeout is not None)
+
+        # bpo-46205: check the status of workers every iteration to avoid
+        # waiting forever on an empty queue.
+        while any(worker.is_alive() for worker in self.workers):
+            if use_faulthandler:
+                faulthandler.dump_traceback_later(MAIN_PROCESS_TIMEOUT,
+                                                  exit=True)
+
+            # wait for a thread
+            try:
+                return self.output.get(timeout=PROGRESS_UPDATE)
+            except queue.Empty:
+                pass
+
+            if not pgo:
+                # display progress
+                running = get_running(self.workers)
+                if running:
+                    self.log(running)
+
+        # all worker threads are done: consume pending results
+        try:
+            return self.output.get(timeout=0)
+        except queue.Empty:
+            return None
+
+    def display_result(self, mp_result: MultiprocessResult) -> None:
+        result = mp_result.result
+        pgo = self.runtests.pgo
+
+        text = str(result)
+        if mp_result.err_msg:
+            # WORKER_BUG
+            text += ' (%s)' % mp_result.err_msg
+        elif (result.duration >= PROGRESS_MIN_TIME and not pgo):
+            text += ' (%s)' % format_duration(result.duration)
+        if not pgo:
+            running = get_running(self.workers)
+            if running:
+                text += f' -- {running}'
+        self.display_progress(self.test_index, text)
+
+    def _process_result(self, item: QueueOutput) -> TestResult:
+        """Returns True if test runner must stop."""
+        if item[0]:
+            # Thread got an exception
+            format_exc = item[1]
+            print_warning(f"regrtest worker thread failed: {format_exc}")
+            result = TestResult("<regrtest worker>", state=State.WORKER_BUG)
+            self.results.accumulate_result(result, self.runtests)
+            return result
+
+        self.test_index += 1
+        mp_result = item[1]
+        result = mp_result.result
+        self.results.accumulate_result(result, self.runtests)
+        self.display_result(mp_result)
+
+        # Display worker stdout
+        if not self.runtests.output_on_failure:
+            show_stdout = True
+        else:
+            # --verbose3 ignores stdout on success
+            show_stdout = (result.state != State.PASSED)
+        if show_stdout:
+            stdout = mp_result.worker_stdout
+            if stdout:
+                print(stdout, flush=True)
+
+        return result
+
+    def run(self) -> None:
+        fail_fast = self.runtests.fail_fast
+        fail_env_changed = self.runtests.fail_env_changed
+
+        self.start_workers()
+
+        self.test_index = 0
+        try:
+            while True:
+                item = self._get_result()
+                if item is None:
+                    break
+
+                result = self._process_result(item)
+                if result.must_stop(fail_fast, fail_env_changed):
+                    break
+        except KeyboardInterrupt:
+            print()
+            self.results.interrupted = True
+        finally:
+            if self.timeout is not None:
+                faulthandler.cancel_dump_traceback_later()
+
+            # Always ensure that all worker processes are no longer
+            # worker when we exit this function
+            self.pending.stop()
+            self.stop_workers()
diff --git a/Lib/test/libregrtest/runtests.py b/Lib/test/libregrtest/runtests.py
new file mode 100644 (file)
index 0000000..4da312d
--- /dev/null
@@ -0,0 +1,162 @@
+import contextlib
+import dataclasses
+import json
+import os
+import subprocess
+from typing import Any
+
+from test import support
+
+from .utils import (
+    StrPath, StrJSON, TestTuple, FilterTuple, FilterDict)
+
+
+class JsonFileType:
+    UNIX_FD = "UNIX_FD"
+    WINDOWS_HANDLE = "WINDOWS_HANDLE"
+    STDOUT = "STDOUT"
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class JsonFile:
+    # file type depends on file_type:
+    # - UNIX_FD: file descriptor (int)
+    # - WINDOWS_HANDLE: handle (int)
+    # - STDOUT: use process stdout (None)
+    file: int | None
+    file_type: str
+
+    def configure_subprocess(self, popen_kwargs: dict) -> None:
+        match self.file_type:
+            case JsonFileType.UNIX_FD:
+                # Unix file descriptor
+                popen_kwargs['pass_fds'] = [self.file]
+            case JsonFileType.WINDOWS_HANDLE:
+                # Windows handle
+                startupinfo = subprocess.STARTUPINFO()
+                startupinfo.lpAttributeList = {"handle_list": [self.file]}
+                popen_kwargs['startupinfo'] = startupinfo
+
+    @contextlib.contextmanager
+    def inherit_subprocess(self):
+        if self.file_type == JsonFileType.WINDOWS_HANDLE:
+            os.set_handle_inheritable(self.file, True)
+            try:
+                yield
+            finally:
+                os.set_handle_inheritable(self.file, False)
+        else:
+            yield
+
+    def open(self, mode='r', *, encoding):
+        if self.file_type == JsonFileType.STDOUT:
+            raise ValueError("for STDOUT file type, just use sys.stdout")
+
+        file = self.file
+        if self.file_type == JsonFileType.WINDOWS_HANDLE:
+            import msvcrt
+            # Create a file descriptor from the handle
+            file = msvcrt.open_osfhandle(file, os.O_WRONLY)
+        return open(file, mode, encoding=encoding)
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class HuntRefleak:
+    warmups: int
+    runs: int
+    filename: StrPath
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class RunTests:
+    tests: TestTuple
+    fail_fast: bool
+    fail_env_changed: bool
+    match_tests: FilterTuple | None
+    ignore_tests: FilterTuple | None
+    match_tests_dict: FilterDict | None
+    rerun: bool
+    forever: bool
+    pgo: bool
+    pgo_extended: bool
+    output_on_failure: bool
+    timeout: float | None
+    verbose: int
+    quiet: bool
+    hunt_refleak: HuntRefleak | None
+    test_dir: StrPath | None
+    use_junit: bool
+    memory_limit: str | None
+    gc_threshold: int | None
+    use_resources: tuple[str, ...]
+    python_cmd: tuple[str, ...] | None
+    randomize: bool
+    random_seed: int | None
+    json_file: JsonFile | None
+
+    def copy(self, **override):
+        state = dataclasses.asdict(self)
+        state.update(override)
+        return RunTests(**state)
+
+    def get_match_tests(self, test_name) -> FilterTuple | None:
+        if self.match_tests_dict is not None:
+            return self.match_tests_dict.get(test_name, None)
+        else:
+            return None
+
+    def get_jobs(self):
+        # Number of run_single_test() calls needed to run all tests.
+        # None means that there is not bound limit (--forever option).
+        if self.forever:
+            return None
+        return len(self.tests)
+
+    def iter_tests(self):
+        if self.forever:
+            while True:
+                yield from self.tests
+        else:
+            yield from self.tests
+
+    def as_json(self) -> StrJSON:
+        return json.dumps(self, cls=_EncodeRunTests)
+
+    @staticmethod
+    def from_json(worker_json: StrJSON) -> 'RunTests':
+        return json.loads(worker_json, object_hook=_decode_runtests)
+
+    def json_file_use_stdout(self) -> bool:
+        # Use STDOUT in two cases:
+        #
+        # - If --python command line option is used;
+        # - On Emscripten and WASI.
+        #
+        # On other platforms, UNIX_FD or WINDOWS_HANDLE can be used.
+        return (
+            bool(self.python_cmd)
+            or support.is_emscripten
+            or support.is_wasi
+        )
+
+
+class _EncodeRunTests(json.JSONEncoder):
+    def default(self, o: Any) -> dict[str, Any]:
+        if isinstance(o, RunTests):
+            result = dataclasses.asdict(o)
+            result["__runtests__"] = True
+            return result
+        else:
+            return super().default(o)
+
+
+def _decode_runtests(data: dict[str, Any]) -> RunTests | dict[str, Any]:
+    if "__runtests__" in data:
+        data.pop('__runtests__')
+        if data['hunt_refleak']:
+            data['hunt_refleak'] = HuntRefleak(**data['hunt_refleak'])
+        if data['json_file']:
+            data['json_file'] = JsonFile(**data['json_file'])
+        return RunTests(**data)
+    else:
+        return data
index 164fe9806b5f0dc780e986ed6be7cb7f07c8cfbd..b2cc381344b2efe2120126e259197ff0f149045f 100644 (file)
@@ -3,9 +3,11 @@ import locale
 import os
 import sys
 import threading
+
 from test import support
 from test.support import os_helper
-from test.libregrtest.utils import print_warning
+
+from .utils import print_warning
 
 
 class SkipTestEnvironment(Exception):
@@ -34,7 +36,7 @@ class saved_test_environment:
     items is also printed.
     """
 
-    def __init__(self, test_name, verbose=0, quiet=False, *, pgo=False):
+    def __init__(self, test_name, verbose, quiet, *, pgo):
         self.test_name = test_name
         self.verbose = verbose
         self.quiet = quiet
index ec021668553fb457babc30f51d554a71555d6c7f..793347f60ad93c1bde8b199663a0d2fc5db7e741 100644 (file)
@@ -1,24 +1,32 @@
-import atexit
 import faulthandler
+import gc
 import os
+import random
 import signal
 import sys
 import unittest
 from test import support
 from test.support.os_helper import TESTFN_UNDECODABLE, FS_NONASCII
-try:
-    import gc
-except ImportError:
-    gc = None
 
-from test.libregrtest.utils import (setup_unraisable_hook,
-                                    setup_threading_excepthook)
+from .runtests import RunTests
+from .utils import (
+    setup_unraisable_hook, setup_threading_excepthook, fix_umask,
+    adjust_rlimit_nofile)
 
 
 UNICODE_GUARD_ENV = "PYTHONREGRTEST_UNICODE_GUARD"
 
 
-def setup_tests(ns):
+def setup_test_dir(testdir: str | None) -> None:
+    if testdir:
+        # Prepend test directory to sys.path, so runtest() will be able
+        # to locate tests
+        sys.path.insert(0, os.path.abspath(testdir))
+
+
+def setup_process():
+    fix_umask()
+
     try:
         stderr_fd = sys.__stderr__.fileno()
     except (ValueError, AttributeError):
@@ -40,14 +48,9 @@ def setup_tests(ns):
         for signum in signals:
             faulthandler.register(signum, chain=True, file=stderr_fd)
 
-    _adjust_resource_limits()
-    replace_stdout()
-    support.record_original_stdout(sys.stdout)
+    adjust_rlimit_nofile()
 
-    if ns.testdir:
-        # Prepend test directory to sys.path, so runtest() will be able
-        # to locate tests
-        sys.path.insert(0, os.path.abspath(ns.testdir))
+    support.record_original_stdout(sys.stdout)
 
     # Some times __path__ and __file__ are not absolute (e.g. while running from
     # Lib/) and, if we change the CWD to run the tests in a temporary dir, some
@@ -66,19 +69,6 @@ def setup_tests(ns):
         if getattr(module, '__file__', None):
             module.__file__ = os.path.abspath(module.__file__)
 
-    if ns.huntrleaks:
-        unittest.BaseTestSuite._cleanup = False
-
-    if ns.memlimit is not None:
-        support.set_memlimit(ns.memlimit)
-
-    if ns.threshold is not None:
-        gc.set_threshold(ns.threshold)
-
-    support.suppress_msvcrt_asserts(ns.verbose and ns.verbose >= 2)
-
-    support.use_resources = ns.use_resources
-
     if hasattr(sys, 'addaudithook'):
         # Add an auditing hook for all tests to ensure PySys_Audit is tested
         def _test_audit_hook(name, args):
@@ -88,7 +78,37 @@ def setup_tests(ns):
     setup_unraisable_hook()
     setup_threading_excepthook()
 
-    timeout = ns.timeout
+    # Ensure there's a non-ASCII character in env vars at all times to force
+    # tests consider this case. See BPO-44647 for details.
+    if TESTFN_UNDECODABLE and os.supports_bytes_environ:
+        os.environb.setdefault(UNICODE_GUARD_ENV.encode(), TESTFN_UNDECODABLE)
+    elif FS_NONASCII:
+        os.environ.setdefault(UNICODE_GUARD_ENV, FS_NONASCII)
+
+
+def setup_tests(runtests: RunTests):
+    support.verbose = runtests.verbose
+    support.failfast = runtests.fail_fast
+    support.PGO = runtests.pgo
+    support.PGO_EXTENDED = runtests.pgo_extended
+
+    support.set_match_tests(runtests.match_tests, runtests.ignore_tests)
+
+    if runtests.use_junit:
+        support.junit_xml_list = []
+        from test.support.testresult import RegressionTestResult
+        RegressionTestResult.USE_XML = True
+    else:
+        support.junit_xml_list = None
+
+    if runtests.memory_limit is not None:
+        support.set_memlimit(runtests.memory_limit)
+
+    support.suppress_msvcrt_asserts(runtests.verbose >= 2)
+
+    support.use_resources = runtests.use_resources
+
+    timeout = runtests.timeout
     if timeout is not None:
         # For a slow buildbot worker, increase SHORT_TIMEOUT and LONG_TIMEOUT
         support.LOOPBACK_TIMEOUT = max(support.LOOPBACK_TIMEOUT, timeout / 120)
@@ -102,61 +122,10 @@ def setup_tests(ns):
         support.SHORT_TIMEOUT = min(support.SHORT_TIMEOUT, timeout)
         support.LONG_TIMEOUT = min(support.LONG_TIMEOUT, timeout)
 
-    if ns.xmlpath:
-        from test.support.testresult import RegressionTestResult
-        RegressionTestResult.USE_XML = True
-
-    # Ensure there's a non-ASCII character in env vars at all times to force
-    # tests consider this case. See BPO-44647 for details.
-    if TESTFN_UNDECODABLE and os.supports_bytes_environ:
-        os.environb.setdefault(UNICODE_GUARD_ENV.encode(), TESTFN_UNDECODABLE)
-    elif FS_NONASCII:
-        os.environ.setdefault(UNICODE_GUARD_ENV, FS_NONASCII)
-
-
-def replace_stdout():
-    """Set stdout encoder error handler to backslashreplace (as stderr error
-    handler) to avoid UnicodeEncodeError when printing a traceback"""
-    stdout = sys.stdout
-    try:
-        fd = stdout.fileno()
-    except ValueError:
-        # On IDLE, sys.stdout has no file descriptor and is not a TextIOWrapper
-        # object. Leaving sys.stdout unchanged.
-        #
-        # Catch ValueError to catch io.UnsupportedOperation on TextIOBase
-        # and ValueError on a closed stream.
-        return
-
-    sys.stdout = open(fd, 'w',
-        encoding=stdout.encoding,
-        errors="backslashreplace",
-        closefd=False,
-        newline='\n')
-
-    def restore_stdout():
-        sys.stdout.close()
-        sys.stdout = stdout
-    atexit.register(restore_stdout)
+    if runtests.hunt_refleak:
+        unittest.BaseTestSuite._cleanup = False
 
+    if runtests.gc_threshold is not None:
+        gc.set_threshold(runtests.gc_threshold)
 
-def _adjust_resource_limits():
-    """Adjust the system resource limits (ulimit) if needed."""
-    try:
-        import resource
-        from resource import RLIMIT_NOFILE
-    except ImportError:
-        return
-    fd_limit, max_fds = resource.getrlimit(RLIMIT_NOFILE)
-    # On macOS the default fd limit is sometimes too low (256) for our
-    # test suite to succeed.  Raise it to something more reasonable.
-    # 1024 is a common Linux default.
-    desired_fds = 1024
-    if fd_limit < desired_fds and fd_limit < max_fds:
-        new_fd_limit = min(desired_fds, max_fds)
-        try:
-            resource.setrlimit(RLIMIT_NOFILE, (new_fd_limit, max_fds))
-            print(f"Raised RLIMIT_NOFILE: {fd_limit} -> {new_fd_limit}")
-        except (ValueError, OSError) as err:
-            print(f"Unable to raise RLIMIT_NOFILE from {fd_limit} to "
-                  f"{new_fd_limit}: {err}.")
+    random.seed(runtests.random_seed)
diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py
new file mode 100644 (file)
index 0000000..0304f85
--- /dev/null
@@ -0,0 +1,278 @@
+import doctest
+import faulthandler
+import gc
+import importlib
+import io
+import sys
+import time
+import traceback
+import unittest
+
+from test import support
+from test.support import TestStats
+from test.support import threading_helper
+
+from .result import State, TestResult
+from .runtests import RunTests
+from .save_env import saved_test_environment
+from .setup import setup_tests
+from .utils import (
+    TestName,
+    clear_caches, remove_testfn, abs_module_name, print_warning)
+
+
+# Minimum duration of a test to display its duration or to mention that
+# the test is running in background
+PROGRESS_MIN_TIME = 30.0   # seconds
+
+
+def run_unittest(test_mod):
+    loader = unittest.TestLoader()
+    tests = loader.loadTestsFromModule(test_mod)
+    for error in loader.errors:
+        print(error, file=sys.stderr)
+    if loader.errors:
+        raise Exception("errors while loading tests")
+    return support.run_unittest(tests)
+
+
+def regrtest_runner(result: TestResult, test_func, runtests: RunTests) -> None:
+    # Run test_func(), collect statistics, and detect reference and memory
+    # leaks.
+    if runtests.hunt_refleak:
+        from .refleak import runtest_refleak
+        refleak, test_result = runtest_refleak(result.test_name, test_func,
+                                               runtests.hunt_refleak,
+                                               runtests.quiet)
+    else:
+        test_result = test_func()
+        refleak = False
+
+    if refleak:
+        result.state = State.REFLEAK
+
+    stats: TestStats | None
+
+    match test_result:
+        case TestStats():
+            stats = test_result
+        case unittest.TestResult():
+            stats = TestStats.from_unittest(test_result)
+        case doctest.TestResults():
+            stats = TestStats.from_doctest(test_result)
+        case None:
+            print_warning(f"{result.test_name} test runner returned None: {test_func}")
+            stats = None
+        case _:
+            print_warning(f"Unknown test result type: {type(test_result)}")
+            stats = None
+
+    result.stats = stats
+
+
+# Storage of uncollectable GC objects (gc.garbage)
+GC_GARBAGE = []
+
+
+def _load_run_test(result: TestResult, runtests: RunTests) -> None:
+    # Load the test module and run the tests.
+    test_name = result.test_name
+    module_name = abs_module_name(test_name, runtests.test_dir)
+
+    # Remove the module from sys.module to reload it if it was already imported
+    sys.modules.pop(module_name, None)
+
+    test_mod = importlib.import_module(module_name)
+
+    if hasattr(test_mod, "test_main"):
+        # https://github.com/python/cpython/issues/89392
+        raise Exception(f"Module {test_name} defines test_main() which "
+                        f"is no longer supported by regrtest")
+    def test_func():
+        return run_unittest(test_mod)
+
+    try:
+        regrtest_runner(result, test_func, runtests)
+    finally:
+        # First kill any dangling references to open files etc.
+        # This can also issue some ResourceWarnings which would otherwise get
+        # triggered during the following test run, and possibly produce
+        # failures.
+        support.gc_collect()
+
+        remove_testfn(test_name, runtests.verbose)
+
+    if gc.garbage:
+        support.environment_altered = True
+        print_warning(f"{test_name} created {len(gc.garbage)} "
+                      f"uncollectable object(s)")
+
+        # move the uncollectable objects somewhere,
+        # so we don't see them again
+        GC_GARBAGE.extend(gc.garbage)
+        gc.garbage.clear()
+
+    support.reap_children()
+
+
+def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
+                             display_failure: bool = True) -> None:
+    # Handle exceptions, detect environment changes.
+
+    # Reset the environment_altered flag to detect if a test altered
+    # the environment
+    support.environment_altered = False
+
+    pgo = runtests.pgo
+    if pgo:
+        display_failure = False
+    quiet = runtests.quiet
+
+    test_name = result.test_name
+    try:
+        clear_caches()
+        support.gc_collect()
+
+        with saved_test_environment(test_name,
+                                    runtests.verbose, quiet, pgo=pgo):
+            _load_run_test(result, runtests)
+    except support.ResourceDenied as exc:
+        if not quiet and not pgo:
+            print(f"{test_name} skipped -- {exc}", flush=True)
+        result.state = State.RESOURCE_DENIED
+        return
+    except unittest.SkipTest as exc:
+        if not quiet and not pgo:
+            print(f"{test_name} skipped -- {exc}", flush=True)
+        result.state = State.SKIPPED
+        return
+    except support.TestFailedWithDetails as exc:
+        msg = f"test {test_name} failed"
+        if display_failure:
+            msg = f"{msg} -- {exc}"
+        print(msg, file=sys.stderr, flush=True)
+        result.state = State.FAILED
+        result.errors = exc.errors
+        result.failures = exc.failures
+        result.stats = exc.stats
+        return
+    except support.TestFailed as exc:
+        msg = f"test {test_name} failed"
+        if display_failure:
+            msg = f"{msg} -- {exc}"
+        print(msg, file=sys.stderr, flush=True)
+        result.state = State.FAILED
+        result.stats = exc.stats
+        return
+    except support.TestDidNotRun:
+        result.state = State.DID_NOT_RUN
+        return
+    except KeyboardInterrupt:
+        print()
+        result.state = State.INTERRUPTED
+        return
+    except:
+        if not pgo:
+            msg = traceback.format_exc()
+            print(f"test {test_name} crashed -- {msg}",
+                  file=sys.stderr, flush=True)
+        result.state = State.UNCAUGHT_EXC
+        return
+
+    if support.environment_altered:
+        result.set_env_changed()
+    # Don't override the state if it was already set (REFLEAK or ENV_CHANGED)
+    if result.state is None:
+        result.state = State.PASSED
+
+
+def _runtest(result: TestResult, runtests: RunTests) -> None:
+    # Capture stdout and stderr, set faulthandler timeout,
+    # and create JUnit XML report.
+    verbose = runtests.verbose
+    output_on_failure = runtests.output_on_failure
+    timeout = runtests.timeout
+
+    use_timeout = (
+        timeout is not None and threading_helper.can_start_thread
+    )
+    if use_timeout:
+        faulthandler.dump_traceback_later(timeout, exit=True)
+
+    try:
+        setup_tests(runtests)
+
+        if output_on_failure:
+            support.verbose = True
+
+            stream = io.StringIO()
+            orig_stdout = sys.stdout
+            orig_stderr = sys.stderr
+            print_warning = support.print_warning
+            orig_print_warnings_stderr = print_warning.orig_stderr
+
+            output = None
+            try:
+                sys.stdout = stream
+                sys.stderr = stream
+                # print_warning() writes into the temporary stream to preserve
+                # messages order. If support.environment_altered becomes true,
+                # warnings will be written to sys.stderr below.
+                print_warning.orig_stderr = stream
+
+                _runtest_env_changed_exc(result, runtests, display_failure=False)
+                # Ignore output if the test passed successfully
+                if result.state != State.PASSED:
+                    output = stream.getvalue()
+            finally:
+                sys.stdout = orig_stdout
+                sys.stderr = orig_stderr
+                print_warning.orig_stderr = orig_print_warnings_stderr
+
+            if output is not None:
+                sys.stderr.write(output)
+                sys.stderr.flush()
+        else:
+            # Tell tests to be moderately quiet
+            support.verbose = verbose
+            _runtest_env_changed_exc(result, runtests,
+                                     display_failure=not verbose)
+
+        xml_list = support.junit_xml_list
+        if xml_list:
+            import xml.etree.ElementTree as ET
+            result.xml_data = [ET.tostring(x).decode('us-ascii')
+                               for x in xml_list]
+    finally:
+        if use_timeout:
+            faulthandler.cancel_dump_traceback_later()
+        support.junit_xml_list = None
+
+
+def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult:
+    """Run a single test.
+
+    test_name -- the name of the test
+
+    Returns a TestResult.
+
+    If runtests.use_junit, xml_data is a list containing each generated
+    testsuite element.
+    """
+    start_time = time.perf_counter()
+    result = TestResult(test_name)
+    pgo = runtests.pgo
+    try:
+        _runtest(result, runtests)
+    except:
+        if not pgo:
+            msg = traceback.format_exc()
+            print(f"test {test_name} crashed -- {msg}",
+                  file=sys.stderr, flush=True)
+        result.state = State.UNCAUGHT_EXC
+
+    sys.stdout.flush()
+    sys.stderr.flush()
+
+    result.duration = time.perf_counter() - start_time
+    return result
index 28acb531f4d93e8933af37124d1d7b1e121a5a2a..86192ed9084f4027261b51892cd84ba55876e110 100644 (file)
@@ -1,9 +1,59 @@
+import contextlib
+import faulthandler
+import locale
 import math
 import os.path
+import platform
+import random
+import shlex
+import signal
+import subprocess
 import sys
 import sysconfig
+import tempfile
 import textwrap
+from collections.abc import Callable
+
 from test import support
+from test.support import os_helper
+from test.support import threading_helper
+
+
+# All temporary files and temporary directories created by libregrtest should
+# use TMP_PREFIX so cleanup_temp_dir() can remove them all.
+TMP_PREFIX = 'test_python_'
+WORK_DIR_PREFIX = TMP_PREFIX
+WORKER_WORK_DIR_PREFIX = WORK_DIR_PREFIX + 'worker_'
+
+# bpo-38203: Maximum delay in seconds to exit Python (call Py_Finalize()).
+# Used to protect against threading._shutdown() hang.
+# Must be smaller than buildbot "1200 seconds without output" limit.
+EXIT_TIMEOUT = 120.0
+
+
+ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network',
+                 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'walltime')
+
+# Other resources excluded from --use=all:
+#
+# - extralagefile (ex: test_zipfile64): really too slow to be enabled
+#   "by default"
+# - tzdata: while needed to validate fully test_datetime, it makes
+#   test_datetime too slow (15-20 min on some buildbots) and so is disabled by
+#   default (see bpo-30822).
+RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata')
+
+
+# Types for types hints
+StrPath = str
+TestName = str
+StrJSON = str
+TestTuple = tuple[TestName, ...]
+TestList = list[TestName]
+# --match and --ignore options: list of patterns
+# ('*' joker character can be used)
+FilterTuple = tuple[TestName, ...]
+FilterDict = dict[TestName, FilterTuple]
 
 
 def format_duration(seconds):
@@ -31,7 +81,7 @@ def format_duration(seconds):
     return ' '.join(parts)
 
 
-def strip_py_suffix(names: list[str]):
+def strip_py_suffix(names: list[str] | None) -> None:
     if not names:
         return
     for idx, name in enumerate(names):
@@ -40,11 +90,20 @@ def strip_py_suffix(names: list[str]):
             names[idx] = basename
 
 
+def plural(n, singular, plural=None):
+    if n == 1:
+        return singular
+    elif plural is not None:
+        return plural
+    else:
+        return singular + 's'
+
+
 def count(n, word):
     if n == 1:
-        return "%d %s" % (n, word)
+        return f"{n} {word}"
     else:
-        return "%d %ss" % (n, word)
+        return f"{n} {word}s"
 
 
 def printlist(x, width=70, indent=4, file=None):
@@ -228,6 +287,11 @@ def get_build_info():
     ldflags_nodist = sysconfig.get_config_var('PY_LDFLAGS_NODIST') or ''
 
     build = []
+
+    # --disable-gil
+    if sysconfig.get_config_var('Py_NOGIL'):
+        build.append("nogil")
+
     if hasattr(sys, 'gettotalrefcount'):
         # --with-pydebug
         build.append('debug')
@@ -292,3 +356,331 @@ def get_build_info():
         build.append("dtrace")
 
     return build
+
+
+def get_temp_dir(tmp_dir: StrPath | None = None) -> StrPath:
+    if tmp_dir:
+        tmp_dir = os.path.expanduser(tmp_dir)
+    else:
+        # When tests are run from the Python build directory, it is best practice
+        # to keep the test files in a subfolder.  This eases the cleanup of leftover
+        # files using the "make distclean" command.
+        if sysconfig.is_python_build():
+            if not support.is_wasi:
+                tmp_dir = sysconfig.get_config_var('abs_builddir')
+                if tmp_dir is None:
+                    tmp_dir = sysconfig.get_config_var('abs_srcdir')
+                    if not tmp_dir:
+                        # gh-74470: On Windows, only srcdir is available. Using
+                        # abs_builddir mostly matters on UNIX when building
+                        # Python out of the source tree, especially when the
+                        # source tree is read only.
+                        tmp_dir = sysconfig.get_config_var('srcdir')
+                tmp_dir = os.path.join(tmp_dir, 'build')
+            else:
+                # WASI platform
+                tmp_dir = sysconfig.get_config_var('projectbase')
+                tmp_dir = os.path.join(tmp_dir, 'build')
+
+                # When get_temp_dir() is called in a worker process,
+                # get_temp_dir() path is different than in the parent process
+                # which is not a WASI process. So the parent does not create
+                # the same "tmp_dir" than the test worker process.
+                os.makedirs(tmp_dir, exist_ok=True)
+        else:
+            tmp_dir = tempfile.gettempdir()
+
+    return os.path.abspath(tmp_dir)
+
+
+def fix_umask():
+    if support.is_emscripten:
+        # Emscripten has default umask 0o777, which breaks some tests.
+        # see https://github.com/emscripten-core/emscripten/issues/17269
+        old_mask = os.umask(0)
+        if old_mask == 0o777:
+            os.umask(0o027)
+        else:
+            os.umask(old_mask)
+
+
+def get_work_dir(parent_dir: StrPath, worker: bool = False) -> StrPath:
+    # Define a writable temp dir that will be used as cwd while running
+    # the tests. The name of the dir includes the pid to allow parallel
+    # testing (see the -j option).
+    # Emscripten and WASI have stubbed getpid(), Emscripten has only
+    # milisecond clock resolution. Use randint() instead.
+    if support.is_emscripten or support.is_wasi:
+        nounce = random.randint(0, 1_000_000)
+    else:
+        nounce = os.getpid()
+
+    if worker:
+        work_dir = WORK_DIR_PREFIX + str(nounce)
+    else:
+        work_dir = WORKER_WORK_DIR_PREFIX + str(nounce)
+    work_dir += os_helper.FS_NONASCII
+    work_dir = os.path.join(parent_dir, work_dir)
+    return work_dir
+
+
+@contextlib.contextmanager
+def exit_timeout():
+    try:
+        yield
+    except SystemExit as exc:
+        # bpo-38203: Python can hang at exit in Py_Finalize(), especially
+        # on threading._shutdown() call: put a timeout
+        if threading_helper.can_start_thread:
+            faulthandler.dump_traceback_later(EXIT_TIMEOUT, exit=True)
+        sys.exit(exc.code)
+
+
+def remove_testfn(test_name: TestName, verbose: int) -> None:
+    # Try to clean up os_helper.TESTFN if left behind.
+    #
+    # While tests shouldn't leave any files or directories behind, when a test
+    # fails that can be tedious for it to arrange.  The consequences can be
+    # especially nasty on Windows, since if a test leaves a file open, it
+    # cannot be deleted by name (while there's nothing we can do about that
+    # here either, we can display the name of the offending test, which is a
+    # real help).
+    name = os_helper.TESTFN
+    if not os.path.exists(name):
+        return
+
+    nuker: Callable[[str], None]
+    if os.path.isdir(name):
+        import shutil
+        kind, nuker = "directory", shutil.rmtree
+    elif os.path.isfile(name):
+        kind, nuker = "file", os.unlink
+    else:
+        raise RuntimeError(f"os.path says {name!r} exists but is neither "
+                           f"directory nor file")
+
+    if verbose:
+        print_warning(f"{test_name} left behind {kind} {name!r}")
+        support.environment_altered = True
+
+    try:
+        import stat
+        # fix possible permissions problems that might prevent cleanup
+        os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
+        nuker(name)
+    except Exception as exc:
+        print_warning(f"{test_name} left behind {kind} {name!r} "
+                      f"and it couldn't be removed: {exc}")
+
+
+def abs_module_name(test_name: TestName, test_dir: StrPath | None) -> TestName:
+    if test_name.startswith('test.') or test_dir:
+        return test_name
+    else:
+        # Import it from the test package
+        return 'test.' + test_name
+
+
+# gh-90681: When rerunning tests, we might need to rerun the whole
+# class or module suite if some its life-cycle hooks fail.
+# Test level hooks are not affected.
+_TEST_LIFECYCLE_HOOKS = frozenset((
+    'setUpClass', 'tearDownClass',
+    'setUpModule', 'tearDownModule',
+))
+
+def normalize_test_name(test_full_name, *, is_error=False):
+    short_name = test_full_name.split(" ")[0]
+    if is_error and short_name in _TEST_LIFECYCLE_HOOKS:
+        if test_full_name.startswith(('setUpModule (', 'tearDownModule (')):
+            # if setUpModule() or tearDownModule() failed, don't filter
+            # tests with the test file name, don't use use filters.
+            return None
+
+        # This means that we have a failure in a life-cycle hook,
+        # we need to rerun the whole module or class suite.
+        # Basically the error looks like this:
+        #    ERROR: setUpClass (test.test_reg_ex.RegTest)
+        # or
+        #    ERROR: setUpModule (test.test_reg_ex)
+        # So, we need to parse the class / module name.
+        lpar = test_full_name.index('(')
+        rpar = test_full_name.index(')')
+        return test_full_name[lpar + 1: rpar].split('.')[-1]
+    return short_name
+
+
+def adjust_rlimit_nofile():
+    """
+    On macOS the default fd limit (RLIMIT_NOFILE) is sometimes too low (256)
+    for our test suite to succeed. Raise it to something more reasonable. 1024
+    is a common Linux default.
+    """
+    try:
+        import resource
+    except ImportError:
+        return
+
+    fd_limit, max_fds = resource.getrlimit(resource.RLIMIT_NOFILE)
+
+    desired_fds = 1024
+
+    if fd_limit < desired_fds and fd_limit < max_fds:
+        new_fd_limit = min(desired_fds, max_fds)
+        try:
+            resource.setrlimit(resource.RLIMIT_NOFILE,
+                               (new_fd_limit, max_fds))
+            print(f"Raised RLIMIT_NOFILE: {fd_limit} -> {new_fd_limit}")
+        except (ValueError, OSError) as err:
+            print_warning(f"Unable to raise RLIMIT_NOFILE from {fd_limit} to "
+                          f"{new_fd_limit}: {err}.")
+
+
+def get_host_runner():
+    if (hostrunner := os.environ.get("_PYTHON_HOSTRUNNER")) is None:
+        hostrunner = sysconfig.get_config_var("HOSTRUNNER")
+    return hostrunner
+
+
+def is_cross_compiled():
+    return ('_PYTHON_HOST_PLATFORM' in os.environ)
+
+
+def format_resources(use_resources: tuple[str, ...]):
+    use_resources = set(use_resources)
+    all_resources = set(ALL_RESOURCES)
+
+    # Express resources relative to "all"
+    relative_all = ['all']
+    for name in sorted(all_resources - use_resources):
+        relative_all.append(f'-{name}')
+    for name in sorted(use_resources - all_resources):
+        relative_all.append(f'{name}')
+    all_text = ','.join(relative_all)
+    all_text = f"resources: {all_text}"
+
+    # List of enabled resources
+    text = ','.join(sorted(use_resources))
+    text = f"resources ({len(use_resources)}): {text}"
+
+    # Pick the shortest string (prefer relative to all if lengths are equal)
+    if len(all_text) <= len(text):
+        return all_text
+    else:
+        return text
+
+
+def process_cpu_count():
+    if hasattr(os, 'sched_getaffinity'):
+        return len(os.sched_getaffinity(0))
+    else:
+        return os.cpu_count()
+
+
+def display_header(use_resources: tuple[str, ...],
+                   python_cmd: tuple[str, ...] | None):
+    # Print basic platform information
+    print("==", platform.python_implementation(), *sys.version.split())
+    print("==", platform.platform(aliased=True),
+                  "%s-endian" % sys.byteorder)
+    print("== Python build:", ' '.join(get_build_info()))
+    print("== cwd:", os.getcwd())
+
+    cpu_count = os.cpu_count()
+    if cpu_count:
+        affinity = process_cpu_count()
+        if affinity and affinity != cpu_count:
+            cpu_count = f"{affinity} (process) / {cpu_count} (system)"
+        print("== CPU count:", cpu_count)
+    print("== encodings: locale=%s FS=%s"
+          % (locale.getencoding(), sys.getfilesystemencoding()))
+
+    if use_resources:
+        text = format_resources(use_resources)
+        print(f"== {text}")
+    else:
+        print("== resources: all test resources are disabled, "
+              "use -u option to unskip tests")
+
+    cross_compile = is_cross_compiled()
+    if cross_compile:
+        print("== cross compiled: Yes")
+    if python_cmd:
+        cmd = shlex.join(python_cmd)
+        print(f"== host python: {cmd}")
+
+        get_cmd = [*python_cmd, '-m', 'platform']
+        proc = subprocess.run(
+            get_cmd,
+            stdout=subprocess.PIPE,
+            text=True,
+            cwd=os_helper.SAVEDCWD)
+        stdout = proc.stdout.replace('\n', ' ').strip()
+        if stdout:
+            print(f"== host platform: {stdout}")
+        elif proc.returncode:
+            print(f"== host platform: <command failed with exit code {proc.returncode}>")
+    else:
+        hostrunner = get_host_runner()
+        if hostrunner:
+            print(f"== host runner: {hostrunner}")
+
+    # This makes it easier to remember what to set in your local
+    # environment when trying to reproduce a sanitizer failure.
+    asan = support.check_sanitizer(address=True)
+    msan = support.check_sanitizer(memory=True)
+    ubsan = support.check_sanitizer(ub=True)
+    sanitizers = []
+    if asan:
+        sanitizers.append("address")
+    if msan:
+        sanitizers.append("memory")
+    if ubsan:
+        sanitizers.append("undefined behavior")
+    if sanitizers:
+        print(f"== sanitizers: {', '.join(sanitizers)}")
+        for sanitizer, env_var in (
+            (asan, "ASAN_OPTIONS"),
+            (msan, "MSAN_OPTIONS"),
+            (ubsan, "UBSAN_OPTIONS"),
+        ):
+            options= os.environ.get(env_var)
+            if sanitizer and options is not None:
+                print(f"== {env_var}={options!r}")
+
+    print(flush=True)
+
+
+def cleanup_temp_dir(tmp_dir: StrPath):
+    import glob
+
+    path = os.path.join(glob.escape(tmp_dir), TMP_PREFIX + '*')
+    print("Cleanup %s directory" % tmp_dir)
+    for name in glob.glob(path):
+        if os.path.isdir(name):
+            print("Remove directory: %s" % name)
+            os_helper.rmtree(name)
+        else:
+            print("Remove file: %s" % name)
+            os_helper.unlink(name)
+
+WINDOWS_STATUS = {
+    0xC0000005: "STATUS_ACCESS_VIOLATION",
+    0xC00000FD: "STATUS_STACK_OVERFLOW",
+    0xC000013A: "STATUS_CONTROL_C_EXIT",
+}
+
+def get_signal_name(exitcode):
+    if exitcode < 0:
+        signum = -exitcode
+        try:
+            return signal.Signals(signum).name
+        except ValueError:
+            pass
+
+    try:
+        return WINDOWS_STATUS[exitcode]
+    except KeyError:
+        pass
+
+    return None
diff --git a/Lib/test/libregrtest/worker.py b/Lib/test/libregrtest/worker.py
new file mode 100644 (file)
index 0000000..a9c8be0
--- /dev/null
@@ -0,0 +1,116 @@
+import subprocess
+import sys
+import os
+from typing import Any, NoReturn
+
+from test import support
+from test.support import os_helper
+
+from .setup import setup_process, setup_test_dir
+from .runtests import RunTests, JsonFile, JsonFileType
+from .single import run_single_test
+from .utils import (
+    StrPath, StrJSON, FilterTuple,
+    get_temp_dir, get_work_dir, exit_timeout)
+
+
+USE_PROCESS_GROUP = (hasattr(os, "setsid") and hasattr(os, "killpg"))
+
+
+def create_worker_process(runtests: RunTests, output_fd: int,
+                          tmp_dir: StrPath | None = None) -> subprocess.Popen:
+    python_cmd = runtests.python_cmd
+    worker_json = runtests.as_json()
+
+    python_opts = support.args_from_interpreter_flags()
+    if python_cmd is not None:
+        executable = python_cmd
+        # Remove -E option, since --python=COMMAND can set PYTHON environment
+        # variables, such as PYTHONPATH, in the worker process.
+        python_opts = [opt for opt in python_opts if opt != "-E"]
+    else:
+        executable = (sys.executable,)
+    cmd = [*executable, *python_opts,
+           '-u',    # Unbuffered stdout and stderr
+           '-m', 'test.libregrtest.worker',
+           worker_json]
+
+    env = dict(os.environ)
+    if tmp_dir is not None:
+        env['TMPDIR'] = tmp_dir
+        env['TEMP'] = tmp_dir
+        env['TMP'] = tmp_dir
+
+    # Running the child from the same working directory as regrtest's original
+    # invocation ensures that TEMPDIR for the child is the same when
+    # sysconfig.is_python_build() is true. See issue 15300.
+    #
+    # Emscripten and WASI Python must start in the Python source code directory
+    # to get 'python.js' or 'python.wasm' file. Then worker_process() changes
+    # to a temporary directory created to run tests.
+    work_dir = os_helper.SAVEDCWD
+
+    kwargs: dict[str, Any] = dict(
+        env=env,
+        stdout=output_fd,
+        # bpo-45410: Write stderr into stdout to keep messages order
+        stderr=output_fd,
+        text=True,
+        close_fds=True,
+        cwd=work_dir,
+    )
+    if USE_PROCESS_GROUP:
+        kwargs['start_new_session'] = True
+
+    # Pass json_file to the worker process
+    json_file = runtests.json_file
+    json_file.configure_subprocess(kwargs)
+
+    with json_file.inherit_subprocess():
+        return subprocess.Popen(cmd, **kwargs)
+
+
+def worker_process(worker_json: StrJSON) -> NoReturn:
+    runtests = RunTests.from_json(worker_json)
+    test_name = runtests.tests[0]
+    match_tests: FilterTuple | None = runtests.match_tests
+    json_file: JsonFile = runtests.json_file
+
+    setup_test_dir(runtests.test_dir)
+    setup_process()
+
+    if runtests.rerun:
+        if match_tests:
+            matching = "matching: " + ", ".join(match_tests)
+            print(f"Re-running {test_name} in verbose mode ({matching})", flush=True)
+        else:
+            print(f"Re-running {test_name} in verbose mode", flush=True)
+
+    result = run_single_test(test_name, runtests)
+
+    if json_file.file_type == JsonFileType.STDOUT:
+        print()
+        result.write_json_into(sys.stdout)
+    else:
+        with json_file.open('w', encoding='utf-8') as json_fp:
+            result.write_json_into(json_fp)
+
+    sys.exit(0)
+
+
+def main():
+    if len(sys.argv) != 2:
+        print("usage: python -m test.libregrtest.worker JSON")
+        sys.exit(1)
+    worker_json = sys.argv[1]
+
+    tmp_dir = get_temp_dir()
+    work_dir = get_work_dir(tmp_dir, worker=True)
+
+    with exit_timeout():
+        with os_helper.temp_cwd(work_dir, quiet=True):
+            worker_process(worker_json)
+
+
+if __name__ == "__main__":
+    main()
index c0b21124499df01e236e03aa11f9f1a3c0e31479..4f3ebb12ed957d01ed3a5e56c62e294d0433dbe2 100644 (file)
@@ -1,18 +1,13 @@
 """
 Collect various information about Python to help debugging test failures.
 """
-from __future__ import print_function
 import errno
 import re
 import sys
 import traceback
-import unittest
 import warnings
 
 
-MS_WINDOWS = (sys.platform == 'win32')
-
-
 def normalize_text(text):
     if text is None:
         return None
@@ -244,6 +239,7 @@ def collect_os(info_add):
         'getresgid',
         'getresuid',
         'getuid',
+        'process_cpu_count',
         'uname',
     ):
         call_func(info_add, 'os.%s' % func, os, func)
@@ -273,6 +269,7 @@ def collect_os(info_add):
         "ARCHFLAGS",
         "ARFLAGS",
         "AUDIODEV",
+        "BUILDPYTHON",
         "CC",
         "CFLAGS",
         "COLUMNS",
@@ -317,7 +314,6 @@ def collect_os(info_add):
         "TEMP",
         "TERM",
         "TILE_LIBRARY",
-        "TIX_LIBRARY",
         "TMP",
         "TMPDIR",
         "TRAVIS",
@@ -326,15 +322,24 @@ def collect_os(info_add):
         "VIRTUAL_ENV",
         "WAYLAND_DISPLAY",
         "WINDIR",
+        "_PYTHON_HOSTRUNNER",
         "_PYTHON_HOST_PLATFORM",
         "_PYTHON_PROJECT_BASE",
         "_PYTHON_SYSCONFIGDATA_NAME",
         "__PYVENV_LAUNCHER__",
+
+        # Sanitizer options
+        "ASAN_OPTIONS",
+        "LSAN_OPTIONS",
+        "MSAN_OPTIONS",
+        "TSAN_OPTIONS",
+        "UBSAN_OPTIONS",
     ))
     for name, value in os.environ.items():
         uname = name.upper()
         if (uname in ENV_VARS
-           # Copy PYTHON* and LC_* variables
+           # Copy PYTHON* variables like PYTHONPATH
+           # Copy LC_* variables like LC_ALL
            or uname.startswith(("PYTHON", "LC_"))
            # Visual Studio: VS140COMNTOOLS
            or (uname.startswith("VS") and uname.endswith("COMNTOOLS"))):
@@ -487,13 +492,10 @@ def collect_datetime(info_add):
 
 
 def collect_sysconfig(info_add):
-    # On Windows, sysconfig is not reliable to get macros used
-    # to build Python
-    if MS_WINDOWS:
-        return
-
     import sysconfig
 
+    info_add('sysconfig.is_python_build', sysconfig.is_python_build())
+
     for name in (
         'ABIFLAGS',
         'ANDROID_API_LEVEL',
@@ -502,6 +504,7 @@ def collect_sysconfig(info_add):
         'CFLAGS',
         'CFLAGSFORSHARED',
         'CONFIG_ARGS',
+        'HOSTRUNNER',
         'HOST_GNU_TYPE',
         'MACHDEP',
         'MULTIARCH',
@@ -514,11 +517,13 @@ def collect_sysconfig(info_add):
         'PY_STDMODULE_CFLAGS',
         'Py_DEBUG',
         'Py_ENABLE_SHARED',
+        'Py_NOGIL',
         'SHELL',
         'SOABI',
         'abs_builddir',
         'abs_srcdir',
         'prefix',
+        'srcdir',
     ):
         value = sysconfig.get_config_var(name)
         if name == 'ANDROID_API_LEVEL' and not value:
@@ -665,7 +670,29 @@ def collect_testcapi(info_add):
     except ImportError:
         return
 
-    call_func(info_add, 'pymem.allocator', _testcapi, 'pymem_getallocatorsname')
+    for name in (
+        'LONG_MAX',         # always 32-bit on Windows, 64-bit on 64-bit Unix
+        'PY_SSIZE_T_MAX',
+        'Py_C_RECURSION_LIMIT',
+        'SIZEOF_TIME_T',    # 32-bit or 64-bit depending on the platform
+        'SIZEOF_WCHAR_T',   # 16-bit or 32-bit depending on the platform
+    ):
+        copy_attr(info_add, f'_testcapi.{name}', _testcapi, name)
+
+
+def collect_testinternalcapi(info_add):
+    try:
+        import _testinternalcapi
+    except ImportError:
+        return
+
+    call_func(info_add, 'pymem.allocator', _testinternalcapi, 'pymem_getallocatorsname')
+
+    for name in (
+        'SIZEOF_PYGC_HEAD',
+        'SIZEOF_PYOBJECT',
+    ):
+        copy_attr(info_add, f'_testinternalcapi.{name}', _testinternalcapi, name)
 
 
 def collect_resource(info_add):
@@ -684,6 +711,7 @@ def collect_resource(info_add):
 
 
 def collect_test_socket(info_add):
+    import unittest
     try:
         from test import test_socket
     except (ImportError, unittest.SkipTest):
@@ -695,15 +723,12 @@ def collect_test_socket(info_add):
     copy_attributes(info_add, test_socket, 'test_socket.%s', attributes)
 
 
-def collect_test_support(info_add):
+def collect_support(info_add):
     try:
         from test import support
     except ImportError:
         return
 
-    attributes = ('IPV6_ENABLED',)
-    copy_attributes(info_add, support, 'test_support.%s', attributes)
-
     attributes = (
         'MS_WINDOWS',
         'has_fork_support',
@@ -717,17 +742,64 @@ def collect_test_support(info_add):
     )
     copy_attributes(info_add, support, 'support.%s', attributes)
 
-    call_func(info_add, 'test_support._is_gui_available', support, '_is_gui_available')
-    call_func(info_add, 'test_support.python_is_optimized', support, 'python_is_optimized')
+    call_func(info_add, 'support._is_gui_available', support, '_is_gui_available')
+    call_func(info_add, 'support.python_is_optimized', support, 'python_is_optimized')
 
-    info_add('test_support.check_sanitizer(address=True)',
+    info_add('support.check_sanitizer(address=True)',
              support.check_sanitizer(address=True))
-    info_add('test_support.check_sanitizer(memory=True)',
+    info_add('support.check_sanitizer(memory=True)',
              support.check_sanitizer(memory=True))
-    info_add('test_support.check_sanitizer(ub=True)',
+    info_add('support.check_sanitizer(ub=True)',
              support.check_sanitizer(ub=True))
 
 
+def collect_support_os_helper(info_add):
+    try:
+        from test.support import os_helper
+    except ImportError:
+        return
+
+    for name in (
+        'can_symlink',
+        'can_xattr',
+        'can_chmod',
+        'can_dac_override',
+    ):
+        func = getattr(os_helper, name)
+        info_add(f'support_os_helper.{name}', func())
+
+
+def collect_support_socket_helper(info_add):
+    try:
+        from test.support import socket_helper
+    except ImportError:
+        return
+
+    attributes = (
+        'IPV6_ENABLED',
+        'has_gethostname',
+    )
+    copy_attributes(info_add, socket_helper, 'support_socket_helper.%s', attributes)
+
+    for name in (
+        'tcp_blackhole',
+    ):
+        func = getattr(socket_helper, name)
+        info_add(f'support_socket_helper.{name}', func())
+
+
+def collect_support_threading_helper(info_add):
+    try:
+        from test.support import threading_helper
+    except ImportError:
+        return
+
+    attributes = (
+        'can_start_thread',
+    )
+    copy_attributes(info_add, threading_helper, 'support_threading_helper.%s', attributes)
+
+
 def collect_cc(info_add):
     import subprocess
     import sysconfig
@@ -882,6 +954,12 @@ def collect_fips(info_add):
         pass
 
 
+def collect_tempfile(info_add):
+    import tempfile
+
+    info_add('tempfile.gettempdir', tempfile.gettempdir())
+
+
 def collect_libregrtest_utils(info_add):
     try:
         from test.libregrtest import utils
@@ -924,6 +1002,8 @@ def collect_info(info):
         collect_sys,
         collect_sysconfig,
         collect_testcapi,
+        collect_testinternalcapi,
+        collect_tempfile,
         collect_time,
         collect_tkinter,
         collect_windows,
@@ -932,7 +1012,10 @@ def collect_info(info):
 
         # Collecting from tests should be last as they have side effects.
         collect_test_socket,
-        collect_test_support,
+        collect_support,
+        collect_support_os_helper,
+        collect_support_socket_helper,
+        collect_support_threading_helper,
     ):
         try:
             collect_func(info_add)
index 0ffb3ed454eda07eb1ac32e94ad33d213e661d31..46a74fe276f55307b3e368b073661547e46de1c7 100755 (executable)
@@ -8,7 +8,7 @@ Run this script with -h or --help for documentation.
 
 import os
 import sys
-from test.libregrtest import main
+from test.libregrtest.main import main
 
 
 # Alias for backward compatibility (just in case)
index 9d69ade251dcae42357359fb3959bb018fd3d478..37fd9443e18007100ae9983e9504522a1bcfa8c4 100644 (file)
@@ -433,6 +433,14 @@ def skip_if_sanitizer(reason=None, *, address=False, memory=False, ub=False):
 HAVE_ASAN_FORK_BUG = check_sanitizer(address=True)
 
 
+def set_sanitizer_env_var(env, option):
+    for name in ('ASAN_OPTIONS', 'MSAN_OPTIONS', 'UBSAN_OPTIONS'):
+        if name in env:
+            env[name] += f':{option}'
+        else:
+            env[name] = option
+
+
 def system_must_validate_cert(f):
     """Skip the test on TLS certificate validation failures."""
     @functools.wraps(f)
index 67ebc554cddb7ce6bf09189ae92870263ff9f96e..d0473500a177357c3c088e3c304bcbbe3f519d04 100644 (file)
@@ -8,7 +8,6 @@ import subprocess
 import sys
 from test import support
 from test.support import os_helper, script_helper, is_android, MS_WINDOWS
-from test.support import skip_if_sanitizer
 import tempfile
 import unittest
 from textwrap import dedent
@@ -34,7 +33,7 @@ def expected_traceback(lineno1, lineno2, header, min_count=1):
         return '^' + regex + '$'
 
 def skip_segfault_on_android(test):
-    # Issue #32138: Raising SIGSEGV on Android may not cause a crash.
+    # gh-76319: Raising SIGSEGV on Android may not cause a crash.
     return unittest.skipIf(is_android,
                            'raising SIGSEGV on Android is unreliable')(test)
 
@@ -62,8 +61,16 @@ class FaultHandlerTests(unittest.TestCase):
         pass_fds = []
         if fd is not None:
             pass_fds.append(fd)
+        env = dict(os.environ)
+
+        # Sanitizers must not handle SIGSEGV (ex: for test_enable_fd())
+        option = 'handle_segv=0'
+        support.set_sanitizer_env_var(env, option)
+
         with support.SuppressCrashReport():
-            process = script_helper.spawn_python('-c', code, pass_fds=pass_fds)
+            process = script_helper.spawn_python('-c', code,
+                                                 pass_fds=pass_fds,
+                                                 env=env)
             with process:
                 output, stderr = process.communicate()
                 exitcode = process.wait()
@@ -302,8 +309,6 @@ class FaultHandlerTests(unittest.TestCase):
             3,
             'Segmentation fault')
 
-    @skip_if_sanitizer(memory=True, ub=True, reason="sanitizer "
-                       "builds change crashing process output.")
     @skip_segfault_on_android
     def test_enable_file(self):
         with temporary_filename() as filename:
@@ -319,8 +324,6 @@ class FaultHandlerTests(unittest.TestCase):
 
     @unittest.skipIf(sys.platform == "win32",
                      "subprocess doesn't support pass_fds on Windows")
-    @skip_if_sanitizer(memory=True, ub=True, reason="sanitizer "
-                       "builds change crashing process output.")
     @skip_segfault_on_android
     def test_enable_fd(self):
         with tempfile.TemporaryFile('wb+') as fp:
index a72c052cb83e0d03e1e4693b5ac77777c44b77c6..45573a2719c5437b10f0fed3bae56d781142549f 100644 (file)
@@ -11,18 +11,23 @@ import io
 import locale
 import os.path
 import platform
+import random
 import re
+import shlex
+import signal
 import subprocess
 import sys
 import sysconfig
 import tempfile
 import textwrap
 import unittest
-from test import libregrtest
 from test import support
 from test.support import os_helper, TestStats
-from test.libregrtest import utils, setup
-from test.libregrtest.runtest import normalize_test_name
+from test.libregrtest import cmdline
+from test.libregrtest import main
+from test.libregrtest import setup
+from test.libregrtest import utils
+from test.libregrtest.utils import normalize_test_name
 
 if not support.has_subprocess_support:
     raise unittest.SkipTest("test module requires subprocess")
@@ -52,9 +57,13 @@ class ParseArgsTestCase(unittest.TestCase):
     Test regrtest's argument parsing, function _parse_args().
     """
 
+    @staticmethod
+    def parse_args(args):
+        return cmdline._parse_args(args)
+
     def checkError(self, args, msg):
         with support.captured_stderr() as err, self.assertRaises(SystemExit):
-            libregrtest._parse_args(args)
+            self.parse_args(args)
         self.assertIn(msg, err.getvalue())
 
     def test_help(self):
@@ -62,83 +71,93 @@ class ParseArgsTestCase(unittest.TestCase):
             with self.subTest(opt=opt):
                 with support.captured_stdout() as out, \
                      self.assertRaises(SystemExit):
-                    libregrtest._parse_args([opt])
+                    self.parse_args([opt])
                 self.assertIn('Run Python regression tests.', out.getvalue())
 
     def test_timeout(self):
-        ns = libregrtest._parse_args(['--timeout', '4.2'])
+        ns = self.parse_args(['--timeout', '4.2'])
         self.assertEqual(ns.timeout, 4.2)
+
+        # negative, zero and empty string are treated as "no timeout"
+        for value in ('-1', '0', ''):
+            with self.subTest(value=value):
+                ns = self.parse_args([f'--timeout={value}'])
+                self.assertEqual(ns.timeout, None)
+
         self.checkError(['--timeout'], 'expected one argument')
-        self.checkError(['--timeout', 'foo'], 'invalid float value')
+        self.checkError(['--timeout', 'foo'], 'invalid timeout value:')
 
     def test_wait(self):
-        ns = libregrtest._parse_args(['--wait'])
+        ns = self.parse_args(['--wait'])
         self.assertTrue(ns.wait)
 
-    def test_worker_args(self):
-        ns = libregrtest._parse_args(['--worker-args', '[[], {}]'])
-        self.assertEqual(ns.worker_args, '[[], {}]')
-        self.checkError(['--worker-args'], 'expected one argument')
-
     def test_start(self):
         for opt in '-S', '--start':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt, 'foo'])
+                ns = self.parse_args([opt, 'foo'])
                 self.assertEqual(ns.start, 'foo')
                 self.checkError([opt], 'expected one argument')
 
     def test_verbose(self):
-        ns = libregrtest._parse_args(['-v'])
+        ns = self.parse_args(['-v'])
         self.assertEqual(ns.verbose, 1)
-        ns = libregrtest._parse_args(['-vvv'])
+        ns = self.parse_args(['-vvv'])
         self.assertEqual(ns.verbose, 3)
-        ns = libregrtest._parse_args(['--verbose'])
+        ns = self.parse_args(['--verbose'])
         self.assertEqual(ns.verbose, 1)
-        ns = libregrtest._parse_args(['--verbose'] * 3)
+        ns = self.parse_args(['--verbose'] * 3)
         self.assertEqual(ns.verbose, 3)
-        ns = libregrtest._parse_args([])
+        ns = self.parse_args([])
         self.assertEqual(ns.verbose, 0)
 
     def test_rerun(self):
         for opt in '-w', '--rerun', '--verbose2':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt])
+                ns = self.parse_args([opt])
                 self.assertTrue(ns.rerun)
 
     def test_verbose3(self):
         for opt in '-W', '--verbose3':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt])
+                ns = self.parse_args([opt])
                 self.assertTrue(ns.verbose3)
 
     def test_quiet(self):
         for opt in '-q', '--quiet':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt])
+                ns = self.parse_args([opt])
                 self.assertTrue(ns.quiet)
                 self.assertEqual(ns.verbose, 0)
 
     def test_slowest(self):
         for opt in '-o', '--slowest':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt])
+                ns = self.parse_args([opt])
                 self.assertTrue(ns.print_slow)
 
     def test_header(self):
-        ns = libregrtest._parse_args(['--header'])
+        ns = self.parse_args(['--header'])
         self.assertTrue(ns.header)
 
-        ns = libregrtest._parse_args(['--verbose'])
+        ns = self.parse_args(['--verbose'])
         self.assertTrue(ns.header)
 
     def test_randomize(self):
         for opt in '-r', '--randomize':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt])
+                ns = self.parse_args([opt])
                 self.assertTrue(ns.randomize)
 
+        with os_helper.EnvironmentVarGuard() as env:
+            env['SOURCE_DATE_EPOCH'] = '1'
+
+            ns = self.parse_args(['--randomize'])
+            regrtest = main.Regrtest(ns)
+            self.assertFalse(regrtest.randomize)
+            self.assertIsNone(regrtest.random_seed)
+
     def test_randseed(self):
-        ns = libregrtest._parse_args(['--randseed', '12345'])
+        ns = self.parse_args(['--randseed', '12345'])
         self.assertEqual(ns.random_seed, 12345)
         self.assertTrue(ns.randomize)
         self.checkError(['--randseed'], 'expected one argument')
@@ -147,7 +166,7 @@ class ParseArgsTestCase(unittest.TestCase):
     def test_fromfile(self):
         for opt in '-f', '--fromfile':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt, 'foo'])
+                ns = self.parse_args([opt, 'foo'])
                 self.assertEqual(ns.fromfile, 'foo')
                 self.checkError([opt], 'expected one argument')
                 self.checkError([opt, 'foo', '-s'], "don't go together")
@@ -155,20 +174,20 @@ class ParseArgsTestCase(unittest.TestCase):
     def test_exclude(self):
         for opt in '-x', '--exclude':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt])
+                ns = self.parse_args([opt])
                 self.assertTrue(ns.exclude)
 
     def test_single(self):
         for opt in '-s', '--single':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt])
+                ns = self.parse_args([opt])
                 self.assertTrue(ns.single)
                 self.checkError([opt, '-f', 'foo'], "don't go together")
 
     def test_ignore(self):
         for opt in '-i', '--ignore':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt, 'pattern'])
+                ns = self.parse_args([opt, 'pattern'])
                 self.assertEqual(ns.ignore_tests, ['pattern'])
                 self.checkError([opt], 'expected one argument')
 
@@ -178,7 +197,7 @@ class ParseArgsTestCase(unittest.TestCase):
             print('matchfile2', file=fp)
 
         filename = os.path.abspath(os_helper.TESTFN)
-        ns = libregrtest._parse_args(['-m', 'match',
+        ns = self.parse_args(['-m', 'match',
                                       '--ignorefile', filename])
         self.assertEqual(ns.ignore_tests,
                          ['matchfile1', 'matchfile2'])
@@ -186,11 +205,11 @@ class ParseArgsTestCase(unittest.TestCase):
     def test_match(self):
         for opt in '-m', '--match':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt, 'pattern'])
+                ns = self.parse_args([opt, 'pattern'])
                 self.assertEqual(ns.match_tests, ['pattern'])
                 self.checkError([opt], 'expected one argument')
 
-        ns = libregrtest._parse_args(['-m', 'pattern1',
+        ns = self.parse_args(['-m', 'pattern1',
                                       '-m', 'pattern2'])
         self.assertEqual(ns.match_tests, ['pattern1', 'pattern2'])
 
@@ -200,7 +219,7 @@ class ParseArgsTestCase(unittest.TestCase):
             print('matchfile2', file=fp)
 
         filename = os.path.abspath(os_helper.TESTFN)
-        ns = libregrtest._parse_args(['-m', 'match',
+        ns = self.parse_args(['-m', 'match',
                                       '--matchfile', filename])
         self.assertEqual(ns.match_tests,
                          ['match', 'matchfile1', 'matchfile2'])
@@ -208,65 +227,65 @@ class ParseArgsTestCase(unittest.TestCase):
     def test_failfast(self):
         for opt in '-G', '--failfast':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt, '-v'])
+                ns = self.parse_args([opt, '-v'])
                 self.assertTrue(ns.failfast)
-                ns = libregrtest._parse_args([opt, '-W'])
+                ns = self.parse_args([opt, '-W'])
                 self.assertTrue(ns.failfast)
                 self.checkError([opt], '-G/--failfast needs either -v or -W')
 
     def test_use(self):
         for opt in '-u', '--use':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt, 'gui,network'])
+                ns = self.parse_args([opt, 'gui,network'])
                 self.assertEqual(ns.use_resources, ['gui', 'network'])
 
-                ns = libregrtest._parse_args([opt, 'gui,none,network'])
+                ns = self.parse_args([opt, 'gui,none,network'])
                 self.assertEqual(ns.use_resources, ['network'])
 
-                expected = list(libregrtest.ALL_RESOURCES)
+                expected = list(cmdline.ALL_RESOURCES)
                 expected.remove('gui')
-                ns = libregrtest._parse_args([opt, 'all,-gui'])
+                ns = self.parse_args([opt, 'all,-gui'])
                 self.assertEqual(ns.use_resources, expected)
                 self.checkError([opt], 'expected one argument')
                 self.checkError([opt, 'foo'], 'invalid resource')
 
                 # all + a resource not part of "all"
-                ns = libregrtest._parse_args([opt, 'all,tzdata'])
+                ns = self.parse_args([opt, 'all,tzdata'])
                 self.assertEqual(ns.use_resources,
-                                 list(libregrtest.ALL_RESOURCES) + ['tzdata'])
+                                 list(cmdline.ALL_RESOURCES) + ['tzdata'])
 
                 # test another resource which is not part of "all"
-                ns = libregrtest._parse_args([opt, 'extralargefile'])
+                ns = self.parse_args([opt, 'extralargefile'])
                 self.assertEqual(ns.use_resources, ['extralargefile'])
 
     def test_memlimit(self):
         for opt in '-M', '--memlimit':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt, '4G'])
+                ns = self.parse_args([opt, '4G'])
                 self.assertEqual(ns.memlimit, '4G')
                 self.checkError([opt], 'expected one argument')
 
     def test_testdir(self):
-        ns = libregrtest._parse_args(['--testdir', 'foo'])
+        ns = self.parse_args(['--testdir', 'foo'])
         self.assertEqual(ns.testdir, os.path.join(os_helper.SAVEDCWD, 'foo'))
         self.checkError(['--testdir'], 'expected one argument')
 
     def test_runleaks(self):
         for opt in '-L', '--runleaks':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt])
+                ns = self.parse_args([opt])
                 self.assertTrue(ns.runleaks)
 
     def test_huntrleaks(self):
         for opt in '-R', '--huntrleaks':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt, ':'])
+                ns = self.parse_args([opt, ':'])
                 self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt'))
-                ns = libregrtest._parse_args([opt, '6:'])
+                ns = self.parse_args([opt, '6:'])
                 self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt'))
-                ns = libregrtest._parse_args([opt, ':3'])
+                ns = self.parse_args([opt, ':3'])
                 self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt'))
-                ns = libregrtest._parse_args([opt, '6:3:leaks.log'])
+                ns = self.parse_args([opt, '6:3:leaks.log'])
                 self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log'))
                 self.checkError([opt], 'expected one argument')
                 self.checkError([opt, '6'],
@@ -277,7 +296,7 @@ class ParseArgsTestCase(unittest.TestCase):
     def test_multiprocess(self):
         for opt in '-j', '--multiprocess':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt, '2'])
+                ns = self.parse_args([opt, '2'])
                 self.assertEqual(ns.use_mp, 2)
                 self.checkError([opt], 'expected one argument')
                 self.checkError([opt, 'foo'], 'invalid int value')
@@ -287,13 +306,13 @@ class ParseArgsTestCase(unittest.TestCase):
     def test_coverage(self):
         for opt in '-T', '--coverage':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt])
+                ns = self.parse_args([opt])
                 self.assertTrue(ns.trace)
 
     def test_coverdir(self):
         for opt in '-D', '--coverdir':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt, 'foo'])
+                ns = self.parse_args([opt, 'foo'])
                 self.assertEqual(ns.coverdir,
                                  os.path.join(os_helper.SAVEDCWD, 'foo'))
                 self.checkError([opt], 'expected one argument')
@@ -301,13 +320,13 @@ class ParseArgsTestCase(unittest.TestCase):
     def test_nocoverdir(self):
         for opt in '-N', '--nocoverdir':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt])
+                ns = self.parse_args([opt])
                 self.assertIsNone(ns.coverdir)
 
     def test_threshold(self):
         for opt in '-t', '--threshold':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt, '1000'])
+                ns = self.parse_args([opt, '1000'])
                 self.assertEqual(ns.threshold, 1000)
                 self.checkError([opt], 'expected one argument')
                 self.checkError([opt, 'foo'], 'invalid int value')
@@ -316,7 +335,7 @@ class ParseArgsTestCase(unittest.TestCase):
         for opt in '-n', '--nowindows':
             with self.subTest(opt=opt):
                 with contextlib.redirect_stderr(io.StringIO()) as stderr:
-                    ns = libregrtest._parse_args([opt])
+                    ns = self.parse_args([opt])
                 self.assertTrue(ns.nowindows)
                 err = stderr.getvalue()
                 self.assertIn('the --nowindows (-n) option is deprecated', err)
@@ -324,39 +343,39 @@ class ParseArgsTestCase(unittest.TestCase):
     def test_forever(self):
         for opt in '-F', '--forever':
             with self.subTest(opt=opt):
-                ns = libregrtest._parse_args([opt])
+                ns = self.parse_args([opt])
                 self.assertTrue(ns.forever)
 
     def test_unrecognized_argument(self):
         self.checkError(['--xxx'], 'usage:')
 
     def test_long_option__partial(self):
-        ns = libregrtest._parse_args(['--qui'])
+        ns = self.parse_args(['--qui'])
         self.assertTrue(ns.quiet)
         self.assertEqual(ns.verbose, 0)
 
     def test_two_options(self):
-        ns = libregrtest._parse_args(['--quiet', '--exclude'])
+        ns = self.parse_args(['--quiet', '--exclude'])
         self.assertTrue(ns.quiet)
         self.assertEqual(ns.verbose, 0)
         self.assertTrue(ns.exclude)
 
     def test_option_with_empty_string_value(self):
-        ns = libregrtest._parse_args(['--start', ''])
+        ns = self.parse_args(['--start', ''])
         self.assertEqual(ns.start, '')
 
     def test_arg(self):
-        ns = libregrtest._parse_args(['foo'])
+        ns = self.parse_args(['foo'])
         self.assertEqual(ns.args, ['foo'])
 
     def test_option_and_arg(self):
-        ns = libregrtest._parse_args(['--quiet', 'foo'])
+        ns = self.parse_args(['--quiet', 'foo'])
         self.assertTrue(ns.quiet)
         self.assertEqual(ns.verbose, 0)
         self.assertEqual(ns.args, ['foo'])
 
     def test_arg_option_arg(self):
-        ns = libregrtest._parse_args(['test_unaryop', '-v', 'test_binop'])
+        ns = self.parse_args(['test_unaryop', '-v', 'test_binop'])
         self.assertEqual(ns.verbose, 1)
         self.assertEqual(ns.args, ['test_unaryop', 'test_binop'])
 
@@ -364,6 +383,55 @@ class ParseArgsTestCase(unittest.TestCase):
         self.checkError(['--unknown-option'],
                         'unrecognized arguments: --unknown-option')
 
+    def check_ci_mode(self, args, use_resources, rerun=True):
+        ns = cmdline._parse_args(args)
+
+        # Check Regrtest attributes which are more reliable than Namespace
+        # which has an unclear API
+        regrtest = main.Regrtest(ns)
+        self.assertEqual(regrtest.num_workers, -1)
+        self.assertEqual(regrtest.want_rerun, rerun)
+        self.assertTrue(regrtest.randomize)
+        self.assertIsInstance(regrtest.random_seed, int)
+        self.assertTrue(regrtest.fail_env_changed)
+        self.assertTrue(regrtest.fail_rerun)
+        self.assertTrue(regrtest.print_slowest)
+        self.assertTrue(regrtest.output_on_failure)
+        self.assertEqual(sorted(regrtest.use_resources), sorted(use_resources))
+        return regrtest
+
+    def test_fast_ci(self):
+        args = ['--fast-ci']
+        use_resources = sorted(cmdline.ALL_RESOURCES)
+        use_resources.remove('cpu')
+        regrtest = self.check_ci_mode(args, use_resources)
+        self.assertEqual(regrtest.timeout, 10 * 60)
+
+    def test_fast_ci_python_cmd(self):
+        args = ['--fast-ci', '--python', 'python -X dev']
+        use_resources = sorted(cmdline.ALL_RESOURCES)
+        use_resources.remove('cpu')
+        regrtest = self.check_ci_mode(args, use_resources, rerun=False)
+        self.assertEqual(regrtest.timeout, 10 * 60)
+        self.assertEqual(regrtest.python_cmd, ('python', '-X', 'dev'))
+
+    def test_fast_ci_resource(self):
+        # it should be possible to override resources
+        args = ['--fast-ci', '-u', 'network']
+        use_resources = ['network']
+        self.check_ci_mode(args, use_resources)
+
+    def test_slow_ci(self):
+        args = ['--slow-ci']
+        use_resources = sorted(cmdline.ALL_RESOURCES)
+        regrtest = self.check_ci_mode(args, use_resources)
+        self.assertEqual(regrtest.timeout, 20 * 60)
+
+    def test_dont_add_python_opts(self):
+        args = ['--dont-add-python-opts']
+        ns = cmdline._parse_args(args)
+        self.assertFalse(ns._add_python_opts)
+
 
 @dataclasses.dataclass(slots=True)
 class Rerun:
@@ -419,10 +487,12 @@ class BaseTestCase(unittest.TestCase):
             self.fail("%r not found in %r" % (regex, output))
         return match
 
-    def check_line(self, output, regex, full=False):
+    def check_line(self, output, pattern, full=False, regex=True):
+        if not regex:
+            pattern = re.escape(pattern)
         if full:
-            regex += '\n'
-        regex = re.compile(r'^' + regex, re.MULTILINE)
+            pattern += '\n'
+        regex = re.compile(r'^' + pattern, re.MULTILINE)
         self.assertRegex(output, regex)
 
     def parse_executed_tests(self, output):
@@ -431,13 +501,14 @@ class BaseTestCase(unittest.TestCase):
         parser = re.finditer(regex, output, re.MULTILINE)
         return list(match.group(1) for match in parser)
 
-    def check_executed_tests(self, output, tests, skipped=(), failed=(),
+    def check_executed_tests(self, output, tests, *, stats,
+                             skipped=(), failed=(),
                              env_changed=(), omitted=(),
                              rerun=None, run_no_tests=(),
                              resource_denied=(),
-                             randomize=False, interrupted=False,
+                             randomize=False, parallel=False, interrupted=False,
                              fail_env_changed=False,
-                             *, stats, forever=False, filtered=False):
+                             forever=False, filtered=False):
         if isinstance(tests, str):
             tests = [tests]
         if isinstance(skipped, str):
@@ -454,9 +525,11 @@ class BaseTestCase(unittest.TestCase):
             run_no_tests = [run_no_tests]
         if isinstance(stats, int):
             stats = TestStats(stats)
+        if parallel:
+            randomize = True
 
         rerun_failed = []
-        if rerun is not None:
+        if rerun is not None and not env_changed:
             failed = [rerun.name]
             if not rerun.success:
                 rerun_failed.append(rerun.name)
@@ -493,7 +566,8 @@ class BaseTestCase(unittest.TestCase):
             self.check_line(output, regex)
 
         if env_changed:
-            regex = list_regex('%s test%s altered the execution environment',
+            regex = list_regex(r'%s test%s altered the execution environment '
+                               r'\(env changed\)',
                                env_changed)
             self.check_line(output, regex)
 
@@ -504,7 +578,7 @@ class BaseTestCase(unittest.TestCase):
         if rerun is not None:
             regex = list_regex('%s re-run test%s', [rerun.name])
             self.check_line(output, regex)
-            regex = LOG_PREFIX + fr"Re-running 1 failed tests in verbose mode"
+            regex = LOG_PREFIX + r"Re-running 1 failed tests in verbose mode"
             self.check_line(output, regex)
             regex = fr"Re-running {rerun.name} in verbose mode"
             if rerun.match:
@@ -583,13 +657,13 @@ class BaseTestCase(unittest.TestCase):
         state = ', '.join(state)
         if rerun is not None:
             new_state = 'SUCCESS' if rerun.success else 'FAILURE'
-            state = 'FAILURE then ' + new_state
+            state = f'{state} then {new_state}'
         self.check_line(output, f'Result: {state}', full=True)
 
     def parse_random_seed(self, output):
         match = self.regex_search(r'Using random seed ([0-9]+)', output)
         randseed = int(match.group(1))
-        self.assertTrue(0 <= randseed <= 10000000, randseed)
+        self.assertTrue(0 <= randseed, randseed)
         return randseed
 
     def run_command(self, args, input=None, exitcode=0, **kw):
@@ -598,7 +672,7 @@ class BaseTestCase(unittest.TestCase):
         if 'stderr' not in kw:
             kw['stderr'] = subprocess.STDOUT
         proc = subprocess.run(args,
-                              universal_newlines=True,
+                              text=True,
                               input=input,
                               stdout=subprocess.PIPE,
                               **kw)
@@ -621,7 +695,11 @@ class BaseTestCase(unittest.TestCase):
         return proc
 
     def run_python(self, args, **kw):
-        args = [sys.executable, '-X', 'faulthandler', '-I', *args]
+        extraargs = []
+        if 'uops' in sys._xoptions:
+            # Pass -X uops along
+            extraargs.extend(['-X', 'uops'])
+        args = [sys.executable, *extraargs, '-X', 'faulthandler', '-I', *args]
         proc = self.run_command(args, **kw)
         return proc.stdout
 
@@ -676,8 +754,8 @@ class ProgramsTestCase(BaseTestCase):
         self.check_executed_tests(output, self.tests,
                                   randomize=True, stats=len(self.tests))
 
-    def run_tests(self, args):
-        output = self.run_python(args)
+    def run_tests(self, args, env=None):
+        output = self.run_python(args, env=env)
         self.check_output(output)
 
     def test_script_regrtest(self):
@@ -718,14 +796,6 @@ class ProgramsTestCase(BaseTestCase):
         args = [*self.python_args, script, *self.regrtest_args, *self.tests]
         self.run_tests(args)
 
-    @unittest.skipUnless(sysconfig.is_python_build(),
-                         'run_tests.py script is not installed')
-    def test_tools_script_run_tests(self):
-        # Tools/scripts/run_tests.py
-        script = os.path.join(ROOT_DIR, 'Tools', 'scripts', 'run_tests.py')
-        args = [script, *self.regrtest_args, *self.tests]
-        self.run_tests(args)
-
     def run_batch(self, *args):
         proc = self.run_command(args)
         self.check_output(proc.stdout)
@@ -880,6 +950,10 @@ class ArgsTestCase(BaseTestCase):
         test_random2 = int(match.group(1))
         self.assertEqual(test_random2, test_random)
 
+        # check that random.seed is used by default
+        output = self.run_tests(test, exitcode=EXITCODE_NO_TESTS_RAN)
+        self.assertIsInstance(self.parse_random_seed(output), int)
+
     def test_fromfile(self):
         # test --fromfile
         tests = [self.create_test() for index in range(5)]
@@ -1014,12 +1088,16 @@ class ArgsTestCase(BaseTestCase):
                                   stats=TestStats(4, 1),
                                   forever=True)
 
-    def check_leak(self, code, what):
+    def check_leak(self, code, what, *, run_workers=False):
         test = self.create_test('huntrleaks', code=code)
 
         filename = 'reflog.txt'
         self.addCleanup(os_helper.unlink, filename)
-        output = self.run_tests('--huntrleaks', '3:3:', test,
+        cmd = ['--huntrleaks', '3:3:']
+        if run_workers:
+            cmd.append('-j1')
+        cmd.append(test)
+        output = self.run_tests(*cmd,
                                 exitcode=EXITCODE_BAD_TEST,
                                 stderr=subprocess.STDOUT)
         self.check_executed_tests(output, [test], failed=test, stats=1)
@@ -1035,7 +1113,7 @@ class ArgsTestCase(BaseTestCase):
             self.assertIn(line2, reflog)
 
     @unittest.skipUnless(support.Py_DEBUG, 'need a debug build')
-    def test_huntrleaks(self):
+    def check_huntrleaks(self, *, run_workers: bool):
         # test --huntrleaks
         code = textwrap.dedent("""
             import unittest
@@ -1046,7 +1124,13 @@ class ArgsTestCase(BaseTestCase):
                 def test_leak(self):
                     GLOBAL_LIST.append(object())
         """)
-        self.check_leak(code, 'references')
+        self.check_leak(code, 'references', run_workers=run_workers)
+
+    def test_huntrleaks(self):
+        self.check_huntrleaks(run_workers=False)
+
+    def test_huntrleaks_mp(self):
+        self.check_huntrleaks(run_workers=True)
 
     @unittest.skipUnless(support.Py_DEBUG, 'need a debug build')
     def test_huntrleaks_fd_leak(self):
@@ -1104,7 +1188,7 @@ class ArgsTestCase(BaseTestCase):
         tests = [crash_test]
         output = self.run_tests("-j2", *tests, exitcode=EXITCODE_BAD_TEST)
         self.check_executed_tests(output, tests, failed=crash_test,
-                                  randomize=True, stats=0)
+                                  parallel=True, stats=0)
 
     def parse_methods(self, output):
         regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
@@ -1124,8 +1208,6 @@ class ArgsTestCase(BaseTestCase):
                 def test_method4(self):
                     pass
         """)
-        all_methods = ['test_method1', 'test_method2',
-                       'test_method3', 'test_method4']
         testname = self.create_test(code=code)
 
         # only run a subset
@@ -1208,6 +1290,15 @@ class ArgsTestCase(BaseTestCase):
         self.check_executed_tests(output, [testname], env_changed=testname,
                                   fail_env_changed=True, stats=1)
 
+        # rerun
+        output = self.run_tests("--rerun", testname)
+        self.check_executed_tests(output, [testname],
+                                  env_changed=testname,
+                                  rerun=Rerun(testname,
+                                              match=None,
+                                              success=True),
+                                  stats=2)
+
     def test_rerun_fail(self):
         # FAILURE then FAILURE
         code = textwrap.dedent("""
@@ -1730,16 +1821,15 @@ class ArgsTestCase(BaseTestCase):
         self.check_executed_tests(output, testnames,
                                   env_changed=testnames,
                                   fail_env_changed=True,
-                                  randomize=True,
+                                  parallel=True,
                                   stats=len(testnames))
         for testname in testnames:
             self.assertIn(f"Warning -- {testname} leaked temporary "
                           f"files (1): mytmpfile",
                           output)
 
-    def test_mp_decode_error(self):
-        # gh-101634: If a worker stdout cannot be decoded, report a failed test
-        # and a non-zero exit code.
+    def test_worker_decode_error(self):
+        # gh-109425: Use "backslashreplace" error handler to decode stdout.
         if sys.platform == 'win32':
             encoding = locale.getencoding()
         else:
@@ -1747,34 +1837,46 @@ class ArgsTestCase(BaseTestCase):
             if encoding is None:
                 encoding = sys.__stdout__.encoding
                 if encoding is None:
-                    self.skipTest(f"cannot get regrtest worker encoding")
-
-        nonascii = b"byte:\xa0\xa9\xff\n"
+                    self.skipTest("cannot get regrtest worker encoding")
+
+        nonascii = bytes(ch for ch in range(128, 256))
+        corrupted_output = b"nonascii:%s\n" % (nonascii,)
+        # gh-108989: On Windows, assertion errors are written in UTF-16: when
+        # decoded each letter is follow by a NUL character.
+        assertion_failed = 'Assertion failed: tstate_is_alive(tstate)\n'
+        corrupted_output += assertion_failed.encode('utf-16-le')
         try:
-            nonascii.decode(encoding)
+            corrupted_output.decode(encoding)
         except UnicodeDecodeError:
             pass
         else:
-            self.skipTest(f"{encoding} can decode non-ASCII bytes {nonascii!a}")
+            self.skipTest(f"{encoding} can decode non-ASCII bytes")
+
+        expected_line = corrupted_output.decode(encoding, 'backslashreplace')
 
         code = textwrap.dedent(fr"""
             import sys
+            import unittest
+
+            class Tests(unittest.TestCase):
+                def test_pass(self):
+                    pass
+
             # bytes which cannot be decoded from UTF-8
-            nonascii = {nonascii!a}
-            sys.stdout.buffer.write(nonascii)
+            corrupted_output = {corrupted_output!a}
+            sys.stdout.buffer.write(corrupted_output)
             sys.stdout.buffer.flush()
         """)
         testname = self.create_test(code=code)
 
-        output = self.run_tests("--fail-env-changed", "-v", "-j1", testname,
-                                exitcode=EXITCODE_BAD_TEST)
+        output = self.run_tests("--fail-env-changed", "-v", "-j1", testname)
         self.check_executed_tests(output, [testname],
-                                  failed=[testname],
-                                  randomize=True,
-                                  stats=0)
+                                  parallel=True,
+                                  stats=1)
+        self.check_line(output, expected_line, regex=False)
 
     def test_doctest(self):
-        code = textwrap.dedent(fr'''
+        code = textwrap.dedent(r'''
             import doctest
             import sys
             from test import support
@@ -1809,9 +1911,169 @@ class ArgsTestCase(BaseTestCase):
                                 exitcode=EXITCODE_BAD_TEST)
         self.check_executed_tests(output, [testname],
                                   failed=[testname],
-                                  randomize=True,
+                                  parallel=True,
                                   stats=TestStats(1, 1, 0))
 
+    def _check_random_seed(self, run_workers: bool):
+        # gh-109276: When -r/--randomize is used, random.seed() is called
+        # with the same random seed before running each test file.
+        code = textwrap.dedent(r'''
+            import random
+            import unittest
+
+            class RandomSeedTest(unittest.TestCase):
+                def test_randint(self):
+                    numbers = [random.randint(0, 1000) for _ in range(10)]
+                    print(f"Random numbers: {numbers}")
+        ''')
+        tests = [self.create_test(name=f'test_random{i}', code=code)
+                 for i in range(1, 3+1)]
+
+        random_seed = 856_656_202
+        cmd = ["--randomize", f"--randseed={random_seed}"]
+        if run_workers:
+            # run as many worker processes than the number of tests
+            cmd.append(f'-j{len(tests)}')
+        cmd.extend(tests)
+        output = self.run_tests(*cmd)
+
+        random.seed(random_seed)
+        # Make the assumption that nothing consume entropy between libregrest
+        # setup_tests() which calls random.seed() and RandomSeedTest calling
+        # random.randint().
+        numbers = [random.randint(0, 1000) for _ in range(10)]
+        expected = f"Random numbers: {numbers}"
+
+        regex = r'^Random numbers: .*$'
+        matches = re.findall(regex, output, flags=re.MULTILINE)
+        self.assertEqual(matches, [expected] * len(tests))
+
+    def test_random_seed(self):
+        self._check_random_seed(run_workers=False)
+
+    def test_random_seed_workers(self):
+        self._check_random_seed(run_workers=True)
+
+    def test_python_command(self):
+        code = textwrap.dedent(r"""
+            import sys
+            import unittest
+
+            class WorkerTests(unittest.TestCase):
+                def test_dev_mode(self):
+                    self.assertTrue(sys.flags.dev_mode)
+        """)
+        tests = [self.create_test(code=code) for _ in range(3)]
+
+        # Custom Python command: "python -X dev"
+        python_cmd = [sys.executable, '-X', 'dev']
+        # test.libregrtest.cmdline uses shlex.split() to parse the Python
+        # command line string
+        python_cmd = shlex.join(python_cmd)
+
+        output = self.run_tests("--python", python_cmd, "-j0", *tests)
+        self.check_executed_tests(output, tests,
+                                  stats=len(tests), parallel=True)
+
+    def check_add_python_opts(self, option):
+        # --fast-ci and --slow-ci add "-u -W default -bb -E" options to Python
+        code = textwrap.dedent(r"""
+            import sys
+            import unittest
+            from test import support
+            try:
+                from _testinternalcapi import get_config
+            except ImportError:
+                get_config = None
+
+            # WASI/WASM buildbots don't use -E option
+            use_environment = (support.is_emscripten or support.is_wasi)
+
+            class WorkerTests(unittest.TestCase):
+                @unittest.skipUnless(get_config is None, 'need get_config()')
+                def test_config(self):
+                    config = get_config()['config']
+                    # -u option
+                    self.assertEqual(config['buffered_stdio'], 0)
+                    # -W default option
+                    self.assertTrue(config['warnoptions'], ['default'])
+                    # -bb option
+                    self.assertTrue(config['bytes_warning'], 2)
+                    # -E option
+                    self.assertTrue(config['use_environment'], use_environment)
+
+                def test_python_opts(self):
+                    # -u option
+                    self.assertTrue(sys.__stdout__.write_through)
+                    self.assertTrue(sys.__stderr__.write_through)
+
+                    # -W default option
+                    self.assertTrue(sys.warnoptions, ['default'])
+
+                    # -bb option
+                    self.assertEqual(sys.flags.bytes_warning, 2)
+
+                    # -E option
+                    self.assertEqual(not sys.flags.ignore_environment,
+                                     use_environment)
+        """)
+        testname = self.create_test(code=code)
+
+        # Use directly subprocess to control the exact command line
+        cmd = [sys.executable,
+               "-m", "test", option,
+               f'--testdir={self.tmptestdir}',
+               testname]
+        proc = subprocess.run(cmd,
+                              stdout=subprocess.PIPE,
+                              stderr=subprocess.STDOUT,
+                              text=True)
+        self.assertEqual(proc.returncode, 0, proc)
+
+    def test_add_python_opts(self):
+        for opt in ("--fast-ci", "--slow-ci"):
+            with self.subTest(opt=opt):
+                self.check_add_python_opts(opt)
+
+    # gh-76319: Raising SIGSEGV on Android may not cause a crash.
+    @unittest.skipIf(support.is_android,
+                     'raising SIGSEGV on Android is unreliable')
+    def test_worker_output_on_failure(self):
+        try:
+            from faulthandler import _sigsegv
+        except ImportError:
+            self.skipTest("need faulthandler._sigsegv")
+
+        code = textwrap.dedent(r"""
+            import faulthandler
+            import unittest
+            from test import support
+
+            class CrashTests(unittest.TestCase):
+                def test_crash(self):
+                    print("just before crash!", flush=True)
+
+                    with support.SuppressCrashReport():
+                        faulthandler._sigsegv(True)
+        """)
+        testname = self.create_test(code=code)
+
+        # Sanitizers must not handle SIGSEGV (ex: for test_enable_fd())
+        env = dict(os.environ)
+        option = 'handle_segv=0'
+        support.set_sanitizer_env_var(env, option)
+
+        output = self.run_tests("-j1", testname,
+                                exitcode=EXITCODE_BAD_TEST,
+                                env=env)
+        self.check_executed_tests(output, testname,
+                                  failed=[testname],
+                                  stats=0, parallel=True)
+        if not support.MS_WINDOWS:
+            exitcode = -int(signal.SIGSEGV)
+            self.assertIn(f"Exit code {exitcode} (SIGSEGV)", output)
+        self.check_line(output, "just before crash!", full=True, regex=False)
+
 
 class TestUtils(unittest.TestCase):
     def test_format_duration(self):
@@ -1847,6 +2109,35 @@ class TestUtils(unittest.TestCase):
         self.assertIsNone(normalize('setUpModule (test.test_x)', is_error=True))
         self.assertIsNone(normalize('tearDownModule (test.test_module)', is_error=True))
 
+    def test_get_signal_name(self):
+        for exitcode, expected in (
+            (-int(signal.SIGINT), 'SIGINT'),
+            (-int(signal.SIGSEGV), 'SIGSEGV'),
+            (3221225477, "STATUS_ACCESS_VIOLATION"),
+            (0xC00000FD, "STATUS_STACK_OVERFLOW"),
+        ):
+            self.assertEqual(utils.get_signal_name(exitcode), expected, exitcode)
+
+    def test_format_resources(self):
+        format_resources = utils.format_resources
+        ALL_RESOURCES = utils.ALL_RESOURCES
+        self.assertEqual(
+            format_resources(("network",)),
+            'resources (1): network')
+        self.assertEqual(
+            format_resources(("audio", "decimal", "network")),
+            'resources (3): audio,decimal,network')
+        self.assertEqual(
+            format_resources(ALL_RESOURCES),
+            'resources: all')
+        self.assertEqual(
+            format_resources(tuple(name for name in ALL_RESOURCES
+                                   if name != "cpu")),
+            'resources: all,-cpu')
+        self.assertEqual(
+            format_resources((*ALL_RESOURCES, "tzdata")),
+            'resources: all,tzdata')
+
 
 if __name__ == '__main__':
     unittest.main()