executing :program:`rt.bat` from your :file:`PCbuild` directory will run all
regression tests.
+.. versionadded:: 3.14
+ Output is colorized by default and can be
+ :ref:`controlled using environment variables <using-on-controlling-color>`.
+
:mod:`test.support` --- Utilities for the Python test suite
===========================================================
import sysconfig
import time
import trace
+from _colorize import get_colors # type: ignore[import-not-found]
from typing import NoReturn
from test.support import os_helper, MS_WINDOWS, flush_std_streams
return runtests
def rerun_failed_tests(self, runtests: RunTests) -> None:
+ ansi = get_colors()
+ red, reset = ansi.BOLD_RED, ansi.RESET
+
if self.python_cmd:
# Temp patch for https://github.com/python/cpython/issues/94052
self.log(
rerun_runtests = self._rerun_failed_tests(runtests)
if self.results.bad:
- print(count(len(self.results.bad), 'test'), "failed again:")
+ print(
+ f"{red}{count(len(self.results.bad), 'test')} "
+ f"failed again:{reset}"
+ )
printlist(self.results.bad)
self.display_result(rerun_runtests)
import dataclasses
import json
+from _colorize import get_colors # type: ignore[import-not-found]
from typing import Any
from .utils import (
return State.is_failed(self.state)
def _format_failed(self):
+ ansi = get_colors()
+ red, reset = ansi.BOLD_RED, ansi.RESET
if self.errors and self.failures:
le = len(self.errors)
lf = len(self.failures)
error_s = "error" + ("s" if le > 1 else "")
failure_s = "failure" + ("s" if lf > 1 else "")
- return f"{self.test_name} failed ({le} {error_s}, {lf} {failure_s})"
+ return (
+ f"{red}{self.test_name} failed "
+ f"({le} {error_s}, {lf} {failure_s}){reset}"
+ )
if self.errors:
le = len(self.errors)
error_s = "error" + ("s" if le > 1 else "")
- return f"{self.test_name} failed ({le} {error_s})"
+ return f"{red}{self.test_name} failed ({le} {error_s}){reset}"
if self.failures:
lf = len(self.failures)
failure_s = "failure" + ("s" if lf > 1 else "")
- return f"{self.test_name} failed ({lf} {failure_s})"
+ return f"{red}{self.test_name} failed ({lf} {failure_s}){reset}"
- return f"{self.test_name} failed"
+ return f"{red}{self.test_name} failed{reset}"
def __str__(self) -> str:
+ ansi = get_colors()
+ green = ansi.GREEN
+ red = ansi.BOLD_RED
+ reset = ansi.RESET
+ yellow = ansi.YELLOW
+
match self.state:
case State.PASSED:
- return f"{self.test_name} passed"
+ return f"{green}{self.test_name} passed{reset}"
case State.FAILED:
- return self._format_failed()
+ return f"{red}{self._format_failed()}{reset}"
case State.SKIPPED:
- return f"{self.test_name} skipped"
+ return f"{yellow}{self.test_name} skipped{reset}"
case State.UNCAUGHT_EXC:
- return f"{self.test_name} failed (uncaught exception)"
+ return (
+ f"{red}{self.test_name} failed (uncaught exception){reset}"
+ )
case State.REFLEAK:
- return f"{self.test_name} failed (reference leak)"
+ return f"{red}{self.test_name} failed (reference leak){reset}"
case State.ENV_CHANGED:
- return f"{self.test_name} failed (env changed)"
+ return f"{red}{self.test_name} failed (env changed){reset}"
case State.RESOURCE_DENIED:
- return f"{self.test_name} skipped (resource denied)"
+ return f"{yellow}{self.test_name} skipped (resource denied){reset}"
case State.INTERRUPTED:
- return f"{self.test_name} interrupted"
+ return f"{yellow}{self.test_name} interrupted{reset}"
case State.WORKER_FAILED:
- return f"{self.test_name} worker non-zero exit code"
+ return (
+ f"{red}{self.test_name} worker non-zero exit code{reset}"
+ )
case State.WORKER_BUG:
- return f"{self.test_name} worker bug"
+ return f"{red}{self.test_name} worker bug{reset}"
case State.DID_NOT_RUN:
- return f"{self.test_name} ran no tests"
+ return f"{yellow}{self.test_name} ran no tests{reset}"
case State.TIMEOUT:
assert self.duration is not None, "self.duration is None"
return f"{self.test_name} timed out ({format_duration(self.duration)})"
case _:
- raise ValueError("unknown result state: {state!r}")
+ raise ValueError(
+ f"{red}unknown result state: {{state!r}}{reset}"
+ )
def has_meaningful_duration(self):
return State.has_meaningful_duration(self.state)
import sys
import trace
+from _colorize import get_colors # type: ignore[import-not-found]
from typing import TYPE_CHECKING
from .runtests import RunTests
def get_state(self, fail_env_changed: bool) -> str:
state = []
+ ansi = get_colors()
+ green = ansi.GREEN
+ red = ansi.BOLD_RED
+ reset = ansi.RESET
+ yellow = ansi.YELLOW
if self.bad:
- state.append("FAILURE")
+ state.append(f"{red}FAILURE{reset}")
elif fail_env_changed and self.env_changed:
- state.append("ENV CHANGED")
+ state.append(f"{yellow}ENV CHANGED{reset}")
elif self.no_tests_run():
- state.append("NO TESTS RAN")
+ state.append(f"{yellow}NO TESTS RAN{reset}")
if self.interrupted:
- state.append("INTERRUPTED")
+ state.append(f"{yellow}INTERRUPTED{reset}")
if self.worker_bug:
- state.append("WORKER BUG")
+ state.append(f"{red}WORKER BUG{reset}")
if not state:
- state.append("SUCCESS")
+ state.append(f"{green}SUCCESS{reset}")
return ', '.join(state)
f.write(s)
def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool) -> None:
+ ansi = get_colors()
+ green = ansi.GREEN
+ red = ansi.BOLD_RED
+ reset = ansi.RESET
+ yellow = ansi.YELLOW
+
if print_slowest:
self.test_times.sort(reverse=True)
print()
- print("10 slowest tests:")
+ print(f"{yellow}10 slowest tests:{reset}")
for test_time, test in self.test_times[:10]:
- print("- %s: %s" % (test, format_duration(test_time)))
+ print(f"- {test}: {format_duration(test_time)}")
all_tests = []
omitted = set(tests) - self.get_executed()
# less important
- all_tests.append((sorted(omitted), "test", "{} omitted:"))
+ all_tests.append(
+ (sorted(omitted), "test", f"{yellow}{{}} omitted:{reset}")
+ )
if not quiet:
- all_tests.append((self.skipped, "test", "{} skipped:"))
- all_tests.append((self.resource_denied, "test", "{} skipped (resource denied):"))
- all_tests.append((self.run_no_tests, "test", "{} run no tests:"))
+ all_tests.append(
+ (self.skipped, "test", f"{yellow}{{}} skipped:{reset}")
+ )
+ all_tests.append(
+ (
+ self.resource_denied,
+ "test",
+ f"{yellow}{{}} skipped (resource denied):{reset}",
+ )
+ )
+ all_tests.append(
+ (self.run_no_tests, "test", f"{yellow}{{}} run no tests:{reset}")
+ )
# more important
- all_tests.append((self.env_changed, "test", "{} altered the execution environment (env changed):"))
- all_tests.append((self.rerun, "re-run test", "{}:"))
- all_tests.append((self.bad, "test", "{} failed:"))
+ all_tests.append(
+ (
+ self.env_changed,
+ "test",
+ f"{yellow}{{}} altered the execution environment (env changed):{reset}",
+ )
+ )
+ all_tests.append((self.rerun, "re-run test", f"{yellow}{{}}:{reset}"))
+ all_tests.append((self.bad, "test", f"{red}{{}} failed:{reset}"))
for tests_list, count_text, title_format in all_tests:
if tests_list:
if self.good and not quiet:
print()
text = count(len(self.good), "test")
- text = f"{text} OK."
- if (self.is_all_good() and len(self.good) > 1):
+ text = f"{green}{text} OK.{reset}"
+ if self.is_all_good() and len(self.good) > 1:
text = f"All {text}"
print(text)
if self.interrupted:
print()
- print("Test suite interrupted by signal SIGINT.")
+ print(f"{yellow}Test suite interrupted by signal SIGINT.{reset}")
def display_summary(self, first_runtests: RunTests, filtered: bool) -> None:
# Total tests
+ ansi = get_colors()
+ red, reset, yellow = ansi.RED, ansi.RESET, ansi.YELLOW
+
stats = self.stats
text = f'run={stats.tests_run:,}'
if filtered:
text = f"{text} (filtered)"
report = [text]
if stats.failures:
- report.append(f'failures={stats.failures:,}')
+ report.append(f'{red}failures={stats.failures:,}{reset}')
if stats.skipped:
- report.append(f'skipped={stats.skipped:,}')
+ report.append(f'{yellow}skipped={stats.skipped:,}{reset}')
print(f"Total tests: {' '.join(report)}")
# Total test files
if filtered:
text = f"{text} (filtered)"
report = [text]
- for name, tests in (
- ('failed', self.bad),
- ('env_changed', self.env_changed),
- ('skipped', self.skipped),
- ('resource_denied', self.resource_denied),
- ('rerun', self.rerun),
- ('run_no_tests', self.run_no_tests),
+ for name, tests, color in (
+ ('failed', self.bad, red),
+ ('env_changed', self.env_changed, yellow),
+ ('skipped', self.skipped, yellow),
+ ('resource_denied', self.resource_denied, yellow),
+ ('rerun', self.rerun, yellow),
+ ('run_no_tests', self.run_no_tests, yellow),
):
if tests:
- report.append(f'{name}={len(tests)}')
+ report.append(f'{color}{name}={len(tests)}{reset}')
print(f"Total test files: {' '.join(report)}")
import traceback
import unittest
+from _colorize import get_colors # type: ignore[import-not-found]
from test import support
from test.support import threading_helper
def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
display_failure: bool = True) -> None:
# Handle exceptions, detect environment changes.
+ ansi = get_colors()
+ red, reset, yellow = ansi.RED, ansi.RESET, ansi.YELLOW
# Reset the environment_altered flag to detect if a test altered
# the environment
_load_run_test(result, runtests)
except support.ResourceDenied as exc:
if not quiet and not pgo:
- print(f"{test_name} skipped -- {exc}", flush=True)
+ print(f"{yellow}{test_name} skipped -- {exc}{reset}", flush=True)
result.state = State.RESOURCE_DENIED
return
except unittest.SkipTest as exc:
if not quiet and not pgo:
- print(f"{test_name} skipped -- {exc}", flush=True)
+ print(f"{yellow}{test_name} skipped -- {exc}{reset}", flush=True)
result.state = State.SKIPPED
return
except support.TestFailedWithDetails as exc:
- msg = f"test {test_name} failed"
+ msg = f"{red}test {test_name} failed{reset}"
if display_failure:
- msg = f"{msg} -- {exc}"
+ msg = f"{red}{msg} -- {exc}{reset}"
print(msg, file=sys.stderr, flush=True)
result.state = State.FAILED
result.errors = exc.errors
result.stats = exc.stats
return
except support.TestFailed as exc:
- msg = f"test {test_name} failed"
+ msg = f"{red}test {test_name} failed{reset}"
if display_failure:
- msg = f"{msg} -- {exc}"
+ msg = f"{red}{msg} -- {exc}{reset}"
print(msg, file=sys.stderr, flush=True)
result.state = State.FAILED
result.stats = exc.stats
except:
if not pgo:
msg = traceback.format_exc()
- print(f"test {test_name} crashed -- {msg}",
+ print(f"{red}test {test_name} crashed -- {msg}{reset}",
file=sys.stderr, flush=True)
result.state = State.UNCAUGHT_EXC
return
If runtests.use_junit, xml_data is a list containing each generated
testsuite element.
"""
+ ansi = get_colors()
+ red, reset, yellow = ansi.BOLD_RED, ansi.RESET, ansi.YELLOW
+
start_time = time.perf_counter()
result = TestResult(test_name)
pgo = runtests.pgo
except:
if not pgo:
msg = traceback.format_exc()
- print(f"test {test_name} crashed -- {msg}",
+ print(f"{red}test {test_name} crashed -- {msg}{reset}",
file=sys.stderr, flush=True)
result.state = State.UNCAUGHT_EXC
Note: test_regrtest cannot be run twice in parallel.
"""
+import _colorize
import contextlib
import dataclasses
import glob
import tempfile
import textwrap
import unittest
+import unittest.mock
from xml.etree import ElementTree
from test import support
'valid t\xe9xt \u20ac')
+from test.libregrtest.results import TestResults
+
+
+class TestColorized(unittest.TestCase):
+ def test_test_result_get_state(self):
+ # Arrange
+ green = _colorize.ANSIColors.GREEN
+ red = _colorize.ANSIColors.BOLD_RED
+ reset = _colorize.ANSIColors.RESET
+ yellow = _colorize.ANSIColors.YELLOW
+
+ good_results = TestResults()
+ good_results.good = ["good1", "good2"]
+ bad_results = TestResults()
+ bad_results.bad = ["bad1", "bad2"]
+ no_results = TestResults()
+ no_results.bad = []
+ interrupted_results = TestResults()
+ interrupted_results.interrupted = True
+ interrupted_worker_bug = TestResults()
+ interrupted_worker_bug.interrupted = True
+ interrupted_worker_bug.worker_bug = True
+
+ for results, expected in (
+ (good_results, f"{green}SUCCESS{reset}"),
+ (bad_results, f"{red}FAILURE{reset}"),
+ (no_results, f"{yellow}NO TESTS RAN{reset}"),
+ (interrupted_results, f"{yellow}INTERRUPTED{reset}"),
+ (
+ interrupted_worker_bug,
+ f"{yellow}INTERRUPTED{reset}, {red}WORKER BUG{reset}",
+ ),
+ ):
+ with self.subTest(results=results, expected=expected):
+ # Act
+ with unittest.mock.patch(
+ "_colorize.can_colorize", return_value=True
+ ):
+ result = results.get_state(fail_env_changed=False)
+
+ # Assert
+ self.assertEqual(result, expected)
+
+
if __name__ == '__main__':
unittest.main()
--- /dev/null
+Add colour to :mod:`test.regrtest` output. Patch by Hugo van Kemenade.