]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
gh-127718: Add colour to `test.regrtest` output (#127719)
authorHugo van Kemenade <1324225+hugovk@users.noreply.github.com>
Tue, 10 Dec 2024 07:44:15 +0000 (09:44 +0200)
committerGitHub <noreply@github.com>
Tue, 10 Dec 2024 07:44:15 +0000 (09:44 +0200)
Doc/library/test.rst
Lib/test/libregrtest/main.py
Lib/test/libregrtest/result.py
Lib/test/libregrtest/results.py
Lib/test/libregrtest/single.py
Lib/test/test_regrtest.py
Misc/NEWS.d/next/Library/2024-12-07-15-28-31.gh-issue-127718.9dpLfi.rst [new file with mode: 0644]

index 04d28aee0f86722c030bbec67979dd3c64a387b1..b5b6e442e218fda6d93d3f2ba38e26eccbbed018 100644 (file)
@@ -192,6 +192,10 @@ top-level directory where Python was built. On Windows,
 executing :program:`rt.bat` from your :file:`PCbuild` directory will run all
 regression tests.
 
+.. versionadded:: 3.14
+   Output is colorized by default and can be
+   :ref:`controlled using environment variables <using-on-controlling-color>`.
+
 
 :mod:`test.support` --- Utilities for the Python test suite
 ===========================================================
index 49209b0cec756ec7c021e4a650c53768f8dbf3d8..dcbcc6790c68d8a993bc0427d1c62ce296be4d15 100644 (file)
@@ -6,6 +6,7 @@ import sys
 import sysconfig
 import time
 import trace
+from _colorize import get_colors  # type: ignore[import-not-found]
 from typing import NoReturn
 
 from test.support import os_helper, MS_WINDOWS, flush_std_streams
@@ -270,6 +271,9 @@ class Regrtest:
         return runtests
 
     def rerun_failed_tests(self, runtests: RunTests) -> None:
+        ansi = get_colors()
+        red, reset = ansi.BOLD_RED, ansi.RESET
+
         if self.python_cmd:
             # Temp patch for https://github.com/python/cpython/issues/94052
             self.log(
@@ -284,7 +288,10 @@ class Regrtest:
         rerun_runtests = self._rerun_failed_tests(runtests)
 
         if self.results.bad:
-            print(count(len(self.results.bad), 'test'), "failed again:")
+            print(
+                f"{red}{count(len(self.results.bad), 'test')} "
+                f"failed again:{reset}"
+            )
             printlist(self.results.bad)
 
         self.display_result(rerun_runtests)
index 7553efe5e8abeb19c06ca39910186151f69955e8..daf7624366ee207382881103e8186b4e2ec6ea57 100644 (file)
@@ -1,5 +1,6 @@
 import dataclasses
 import json
+from _colorize import get_colors  # type: ignore[import-not-found]
 from typing import Any
 
 from .utils import (
@@ -105,54 +106,71 @@ class TestResult:
         return State.is_failed(self.state)
 
     def _format_failed(self):
+        ansi = get_colors()
+        red, reset = ansi.BOLD_RED, ansi.RESET
         if self.errors and self.failures:
             le = len(self.errors)
             lf = len(self.failures)
             error_s = "error" + ("s" if le > 1 else "")
             failure_s = "failure" + ("s" if lf > 1 else "")
-            return f"{self.test_name} failed ({le} {error_s}, {lf} {failure_s})"
+            return (
+                f"{red}{self.test_name} failed "
+                f"({le} {error_s}, {lf} {failure_s}){reset}"
+            )
 
         if self.errors:
             le = len(self.errors)
             error_s = "error" + ("s" if le > 1 else "")
-            return f"{self.test_name} failed ({le} {error_s})"
+            return f"{red}{self.test_name} failed ({le} {error_s}){reset}"
 
         if self.failures:
             lf = len(self.failures)
             failure_s = "failure" + ("s" if lf > 1 else "")
-            return f"{self.test_name} failed ({lf} {failure_s})"
+            return f"{red}{self.test_name} failed ({lf} {failure_s}){reset}"
 
-        return f"{self.test_name} failed"
+        return f"{red}{self.test_name} failed{reset}"
 
     def __str__(self) -> str:
+        ansi = get_colors()
+        green = ansi.GREEN
+        red = ansi.BOLD_RED
+        reset = ansi.RESET
+        yellow = ansi.YELLOW
+
         match self.state:
             case State.PASSED:
-                return f"{self.test_name} passed"
+                return f"{green}{self.test_name} passed{reset}"
             case State.FAILED:
-                return self._format_failed()
+                return f"{red}{self._format_failed()}{reset}"
             case State.SKIPPED:
-                return f"{self.test_name} skipped"
+                return f"{yellow}{self.test_name} skipped{reset}"
             case State.UNCAUGHT_EXC:
-                return f"{self.test_name} failed (uncaught exception)"
+                return (
+                    f"{red}{self.test_name} failed (uncaught exception){reset}"
+                )
             case State.REFLEAK:
-                return f"{self.test_name} failed (reference leak)"
+                return f"{red}{self.test_name} failed (reference leak){reset}"
             case State.ENV_CHANGED:
-                return f"{self.test_name} failed (env changed)"
+                return f"{red}{self.test_name} failed (env changed){reset}"
             case State.RESOURCE_DENIED:
-                return f"{self.test_name} skipped (resource denied)"
+                return f"{yellow}{self.test_name} skipped (resource denied){reset}"
             case State.INTERRUPTED:
-                return f"{self.test_name} interrupted"
+                return f"{yellow}{self.test_name} interrupted{reset}"
             case State.WORKER_FAILED:
-                return f"{self.test_name} worker non-zero exit code"
+                return (
+                    f"{red}{self.test_name} worker non-zero exit code{reset}"
+                )
             case State.WORKER_BUG:
-                return f"{self.test_name} worker bug"
+                return f"{red}{self.test_name} worker bug{reset}"
             case State.DID_NOT_RUN:
-                return f"{self.test_name} ran no tests"
+                return f"{yellow}{self.test_name} ran no tests{reset}"
             case State.TIMEOUT:
                 assert self.duration is not None, "self.duration is None"
                 return f"{self.test_name} timed out ({format_duration(self.duration)})"
             case _:
-                raise ValueError("unknown result state: {state!r}")
+                raise ValueError(
+                    f"{red}unknown result state: {{state!r}}{reset}"
+                )
 
     def has_meaningful_duration(self):
         return State.has_meaningful_duration(self.state)
index 9eda926966dc7edd08ff0a08391c85ae15be2544..a35934fc2c9ca82afd6a873f3a3ef484bfa6102b 100644 (file)
@@ -1,5 +1,6 @@
 import sys
 import trace
+from _colorize import get_colors  # type: ignore[import-not-found]
 from typing import TYPE_CHECKING
 
 from .runtests import RunTests
@@ -59,19 +60,24 @@ class TestResults:
 
     def get_state(self, fail_env_changed: bool) -> str:
         state = []
+        ansi = get_colors()
+        green = ansi.GREEN
+        red = ansi.BOLD_RED
+        reset = ansi.RESET
+        yellow = ansi.YELLOW
         if self.bad:
-            state.append("FAILURE")
+            state.append(f"{red}FAILURE{reset}")
         elif fail_env_changed and self.env_changed:
-            state.append("ENV CHANGED")
+            state.append(f"{yellow}ENV CHANGED{reset}")
         elif self.no_tests_run():
-            state.append("NO TESTS RAN")
+            state.append(f"{yellow}NO TESTS RAN{reset}")
 
         if self.interrupted:
-            state.append("INTERRUPTED")
+            state.append(f"{yellow}INTERRUPTED{reset}")
         if self.worker_bug:
-            state.append("WORKER BUG")
+            state.append(f"{red}WORKER BUG{reset}")
         if not state:
-            state.append("SUCCESS")
+            state.append(f"{green}SUCCESS{reset}")
 
         return ', '.join(state)
 
@@ -197,27 +203,51 @@ class TestResults:
                 f.write(s)
 
     def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool) -> None:
+        ansi = get_colors()
+        green = ansi.GREEN
+        red = ansi.BOLD_RED
+        reset = ansi.RESET
+        yellow = ansi.YELLOW
+
         if print_slowest:
             self.test_times.sort(reverse=True)
             print()
-            print("10 slowest tests:")
+            print(f"{yellow}10 slowest tests:{reset}")
             for test_time, test in self.test_times[:10]:
-                print("- %s: %s" % (test, format_duration(test_time)))
+                print(f"- {test}: {format_duration(test_time)}")
 
         all_tests = []
         omitted = set(tests) - self.get_executed()
 
         # less important
-        all_tests.append((sorted(omitted), "test", "{} omitted:"))
+        all_tests.append(
+            (sorted(omitted), "test", f"{yellow}{{}} omitted:{reset}")
+        )
         if not quiet:
-            all_tests.append((self.skipped, "test", "{} skipped:"))
-            all_tests.append((self.resource_denied, "test", "{} skipped (resource denied):"))
-        all_tests.append((self.run_no_tests, "test", "{} run no tests:"))
+            all_tests.append(
+                (self.skipped, "test", f"{yellow}{{}} skipped:{reset}")
+            )
+            all_tests.append(
+                (
+                    self.resource_denied,
+                    "test",
+                    f"{yellow}{{}} skipped (resource denied):{reset}",
+                )
+            )
+        all_tests.append(
+            (self.run_no_tests, "test", f"{yellow}{{}} run no tests:{reset}")
+        )
 
         # more important
-        all_tests.append((self.env_changed, "test", "{} altered the execution environment (env changed):"))
-        all_tests.append((self.rerun, "re-run test", "{}:"))
-        all_tests.append((self.bad, "test", "{} failed:"))
+        all_tests.append(
+            (
+                self.env_changed,
+                "test",
+                f"{yellow}{{}} altered the execution environment (env changed):{reset}",
+            )
+        )
+        all_tests.append((self.rerun, "re-run test", f"{yellow}{{}}:{reset}"))
+        all_tests.append((self.bad, "test", f"{red}{{}} failed:{reset}"))
 
         for tests_list, count_text, title_format in all_tests:
             if tests_list:
@@ -229,26 +259,29 @@ class TestResults:
         if self.good and not quiet:
             print()
             text = count(len(self.good), "test")
-            text = f"{text} OK."
-            if (self.is_all_good() and len(self.good) > 1):
+            text = f"{green}{text} OK.{reset}"
+            if self.is_all_good() and len(self.good) > 1:
                 text = f"All {text}"
             print(text)
 
         if self.interrupted:
             print()
-            print("Test suite interrupted by signal SIGINT.")
+            print(f"{yellow}Test suite interrupted by signal SIGINT.{reset}")
 
     def display_summary(self, first_runtests: RunTests, filtered: bool) -> None:
         # Total tests
+        ansi = get_colors()
+        red, reset, yellow = ansi.RED, ansi.RESET, ansi.YELLOW
+
         stats = self.stats
         text = f'run={stats.tests_run:,}'
         if filtered:
             text = f"{text} (filtered)"
         report = [text]
         if stats.failures:
-            report.append(f'failures={stats.failures:,}')
+            report.append(f'{red}failures={stats.failures:,}{reset}')
         if stats.skipped:
-            report.append(f'skipped={stats.skipped:,}')
+            report.append(f'{yellow}skipped={stats.skipped:,}{reset}')
         print(f"Total tests: {' '.join(report)}")
 
         # Total test files
@@ -263,14 +296,14 @@ class TestResults:
         if filtered:
             text = f"{text} (filtered)"
         report = [text]
-        for name, tests in (
-            ('failed', self.bad),
-            ('env_changed', self.env_changed),
-            ('skipped', self.skipped),
-            ('resource_denied', self.resource_denied),
-            ('rerun', self.rerun),
-            ('run_no_tests', self.run_no_tests),
+        for name, tests, color in (
+            ('failed', self.bad, red),
+            ('env_changed', self.env_changed, yellow),
+            ('skipped', self.skipped, yellow),
+            ('resource_denied', self.resource_denied, yellow),
+            ('rerun', self.rerun, yellow),
+            ('run_no_tests', self.run_no_tests, yellow),
         ):
             if tests:
-                report.append(f'{name}={len(tests)}')
+                report.append(f'{color}{name}={len(tests)}{reset}')
         print(f"Total test files: {' '.join(report)}")
index 17323e7f9cf730db9b851d13fd74e8a7b5d06bb1..0e174f82abed2868a2707cbcb6e4f88233a18fff 100644 (file)
@@ -7,6 +7,7 @@ import time
 import traceback
 import unittest
 
+from _colorize import get_colors  # type: ignore[import-not-found]
 from test import support
 from test.support import threading_helper
 
@@ -161,6 +162,8 @@ def _load_run_test(result: TestResult, runtests: RunTests) -> None:
 def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
                              display_failure: bool = True) -> None:
     # Handle exceptions, detect environment changes.
+    ansi = get_colors()
+    red, reset, yellow = ansi.RED, ansi.RESET, ansi.YELLOW
 
     # Reset the environment_altered flag to detect if a test altered
     # the environment
@@ -181,18 +184,18 @@ def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
             _load_run_test(result, runtests)
     except support.ResourceDenied as exc:
         if not quiet and not pgo:
-            print(f"{test_name} skipped -- {exc}", flush=True)
+            print(f"{yellow}{test_name} skipped -- {exc}{reset}", flush=True)
         result.state = State.RESOURCE_DENIED
         return
     except unittest.SkipTest as exc:
         if not quiet and not pgo:
-            print(f"{test_name} skipped -- {exc}", flush=True)
+            print(f"{yellow}{test_name} skipped -- {exc}{reset}", flush=True)
         result.state = State.SKIPPED
         return
     except support.TestFailedWithDetails as exc:
-        msg = f"test {test_name} failed"
+        msg = f"{red}test {test_name} failed{reset}"
         if display_failure:
-            msg = f"{msg} -- {exc}"
+            msg = f"{red}{msg} -- {exc}{reset}"
         print(msg, file=sys.stderr, flush=True)
         result.state = State.FAILED
         result.errors = exc.errors
@@ -200,9 +203,9 @@ def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
         result.stats = exc.stats
         return
     except support.TestFailed as exc:
-        msg = f"test {test_name} failed"
+        msg = f"{red}test {test_name} failed{reset}"
         if display_failure:
-            msg = f"{msg} -- {exc}"
+            msg = f"{red}{msg} -- {exc}{reset}"
         print(msg, file=sys.stderr, flush=True)
         result.state = State.FAILED
         result.stats = exc.stats
@@ -217,7 +220,7 @@ def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
     except:
         if not pgo:
             msg = traceback.format_exc()
-            print(f"test {test_name} crashed -- {msg}",
+            print(f"{red}test {test_name} crashed -- {msg}{reset}",
                   file=sys.stderr, flush=True)
         result.state = State.UNCAUGHT_EXC
         return
@@ -300,6 +303,9 @@ def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult:
     If runtests.use_junit, xml_data is a list containing each generated
     testsuite element.
     """
+    ansi = get_colors()
+    red, reset, yellow = ansi.BOLD_RED, ansi.RESET, ansi.YELLOW
+
     start_time = time.perf_counter()
     result = TestResult(test_name)
     pgo = runtests.pgo
@@ -308,7 +314,7 @@ def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult:
     except:
         if not pgo:
             msg = traceback.format_exc()
-            print(f"test {test_name} crashed -- {msg}",
+            print(f"{red}test {test_name} crashed -- {msg}{reset}",
                   file=sys.stderr, flush=True)
         result.state = State.UNCAUGHT_EXC
 
index 0ab7a23aca1df87c011417661b1c46a3e58c692e..ab46ccbf004a3ac4e15f219e2dd8018459ee6215 100644 (file)
@@ -4,6 +4,7 @@ Tests of regrtest.py.
 Note: test_regrtest cannot be run twice in parallel.
 """
 
+import _colorize
 import contextlib
 import dataclasses
 import glob
@@ -21,6 +22,7 @@ import sysconfig
 import tempfile
 import textwrap
 import unittest
+import unittest.mock
 from xml.etree import ElementTree
 
 from test import support
@@ -2487,5 +2489,49 @@ class TestUtils(unittest.TestCase):
                          'valid t\xe9xt \u20ac')
 
 
+from test.libregrtest.results import TestResults
+
+
+class TestColorized(unittest.TestCase):
+    def test_test_result_get_state(self):
+        # Arrange
+        green = _colorize.ANSIColors.GREEN
+        red = _colorize.ANSIColors.BOLD_RED
+        reset = _colorize.ANSIColors.RESET
+        yellow = _colorize.ANSIColors.YELLOW
+
+        good_results = TestResults()
+        good_results.good = ["good1", "good2"]
+        bad_results = TestResults()
+        bad_results.bad = ["bad1", "bad2"]
+        no_results = TestResults()
+        no_results.bad = []
+        interrupted_results = TestResults()
+        interrupted_results.interrupted = True
+        interrupted_worker_bug = TestResults()
+        interrupted_worker_bug.interrupted = True
+        interrupted_worker_bug.worker_bug = True
+
+        for results, expected in (
+            (good_results, f"{green}SUCCESS{reset}"),
+            (bad_results, f"{red}FAILURE{reset}"),
+            (no_results, f"{yellow}NO TESTS RAN{reset}"),
+            (interrupted_results, f"{yellow}INTERRUPTED{reset}"),
+            (
+                interrupted_worker_bug,
+                f"{yellow}INTERRUPTED{reset}, {red}WORKER BUG{reset}",
+            ),
+        ):
+            with self.subTest(results=results, expected=expected):
+                # Act
+                with unittest.mock.patch(
+                    "_colorize.can_colorize", return_value=True
+                ):
+                    result = results.get_state(fail_env_changed=False)
+
+                # Assert
+                self.assertEqual(result, expected)
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/Misc/NEWS.d/next/Library/2024-12-07-15-28-31.gh-issue-127718.9dpLfi.rst b/Misc/NEWS.d/next/Library/2024-12-07-15-28-31.gh-issue-127718.9dpLfi.rst
new file mode 100644 (file)
index 0000000..6c1b7be
--- /dev/null
@@ -0,0 +1 @@
+Add colour to :mod:`test.regrtest` output. Patch by Hugo van Kemenade.