]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
gh-127221: Add colour to unittest output (#127223)
authorHugo van Kemenade <1324225+hugovk@users.noreply.github.com>
Thu, 5 Dec 2024 19:10:46 +0000 (21:10 +0200)
committerGitHub <noreply@github.com>
Thu, 5 Dec 2024 19:10:46 +0000 (21:10 +0200)
Co-authored-by: Kirill Podoprigora <kirill.bast9@mail.ru>
15 files changed:
Doc/conf.py
Doc/library/doctest.rst
Doc/library/traceback.rst
Doc/library/unittest.rst
Doc/using/cmdline.rst
Doc/whatsnew/3.13.rst
Doc/whatsnew/3.14.rst
Lib/test/test_unittest/test_async_case.py
Lib/test/test_unittest/test_program.py
Lib/test/test_unittest/test_result.py
Lib/test/test_unittest/test_runner.py
Lib/test/test_unittest/test_skipping.py
Lib/unittest/result.py
Lib/unittest/runner.py
Misc/NEWS.d/next/Library/2024-11-23-00-17-29.gh-issue-127221.OSXdFE.rst [new file with mode: 0644]

index 738c9901eef06fd1d0e2343a2eec537e2b4f5f3f..9cde394cbaed69c8826d61bd95b0e17d80f8bca2 100644 (file)
@@ -78,6 +78,13 @@ rst_epilog = f"""
 .. |python_version_literal| replace:: ``Python {version}``
 .. |python_x_dot_y_literal| replace:: ``python{version}``
 .. |usr_local_bin_python_x_dot_y_literal| replace:: ``/usr/local/bin/python{version}``
+
+.. Apparently this how you hack together a formatted link:
+   (https://www.docutils.org/docs/ref/rst/directives.html#replacement-text)
+.. |FORCE_COLOR| replace:: ``FORCE_COLOR``
+.. _FORCE_COLOR: https://force-color.org/
+.. |NO_COLOR| replace:: ``NO_COLOR``
+.. _NO_COLOR: https://no-color.org/
 """
 
 # There are two options for replacing |today|. Either, you set today to some
index 6b0282eed495663b0778941b4537da041b9a9989..106b0a6c95b7be6077c7b97bef3b32c7537e16d6 100644 (file)
@@ -136,6 +136,10 @@ examples of doctests in the standard Python test suite and libraries.
 Especially useful examples can be found in the standard test file
 :file:`Lib/test/test_doctest/test_doctest.py`.
 
+.. versionadded:: 3.13
+   Output is colorized by default and can be
+   :ref:`controlled using environment variables <using-on-controlling-color>`.
+
 
 .. _doctest-simple-testmod:
 
index 8f94fc448f2482aea92fc1b11ed09f3f44351bfd..4899ed64ebad8d5c15988dc0826c9d39dc4d64dd 100644 (file)
@@ -44,6 +44,10 @@ The module's API can be divided into two parts:
   necessary for later formatting without holding references to actual exception
   and traceback objects.
 
+.. versionadded:: 3.13
+   Output is colorized by default and can be
+   :ref:`controlled using environment variables <using-on-controlling-color>`.
+
 
 Module-Level Functions
 ----------------------
index 38bad9405597dde088a427646dada58e44a79fb4..7f8b710f611002fff9b2f0738e172701a4f9a771 100644 (file)
@@ -46,7 +46,6 @@ test runner
    a textual interface, or return a special value to indicate the results of
    executing the tests.
 
-
 .. seealso::
 
    Module :mod:`doctest`
@@ -198,6 +197,9 @@ For a list of all the command-line options::
    In earlier versions it was only possible to run individual test methods and
    not modules or classes.
 
+.. versionadded:: 3.14
+   Output is colorized by default and can be
+   :ref:`controlled using environment variables <using-on-controlling-color>`.
 
 Command-line options
 ~~~~~~~~~~~~~~~~~~~~
index 6cf42b2771802214c87dc5827963adba0f438456..7db2f4820f346a21293dba72ddbe84c13a166a53 100644 (file)
@@ -663,14 +663,6 @@ output. To control the color output only in the Python interpreter, the
 precedence over ``NO_COLOR``, which in turn takes precedence over
 ``FORCE_COLOR``.
 
-.. Apparently this how you hack together a formatted link:
-
-.. |FORCE_COLOR| replace:: ``FORCE_COLOR``
-.. _FORCE_COLOR: https://force-color.org/
-
-.. |NO_COLOR| replace:: ``NO_COLOR``
-.. _NO_COLOR: https://no-color.org/
-
 Options you shouldn't use
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
index 664b186617237895112f748f23b51c9384e41146..9f6d98b9950d196e0db71887ad708480938c1aba 100644 (file)
@@ -252,15 +252,6 @@ Improved error messages
   the canonical |NO_COLOR|_ and |FORCE_COLOR|_ environment variables.
   (Contributed by Pablo Galindo Salgado in :gh:`112730`.)
 
-.. Apparently this how you hack together a formatted link:
-   (https://www.docutils.org/docs/ref/rst/directives.html#replacement-text)
-
-.. |FORCE_COLOR| replace:: ``FORCE_COLOR``
-.. _FORCE_COLOR: https://force-color.org/
-
-.. |NO_COLOR| replace:: ``NO_COLOR``
-.. _NO_COLOR: https://no-color.org/
-
 * A common mistake is to write a script with the same name as a
   standard library module. When this results in errors, we now
   display a more helpful error message:
index e83c509a025ab544cf9121f3da071b3217c677b2..db25c037e509b67d394fcd77050305fefbe8e1e5 100644 (file)
@@ -616,6 +616,13 @@ unicodedata
 unittest
 --------
 
+* :mod:`unittest` output is now colored by default.
+  This can be controlled via the :envvar:`PYTHON_COLORS` environment
+  variable as well as the canonical |NO_COLOR|_
+  and |FORCE_COLOR|_ environment variables.
+  See also :ref:`using-on-controlling-color`.
+  (Contributed by Hugo van Kemenade in :gh:`127221`.)
+
 * unittest discovery supports :term:`namespace package` as start
   directory again. It was removed in Python 3.11.
   (Contributed by Jacob Walls in :gh:`80958`.)
index 00ef55bdf9bc832d86628c8359fa6da372c855f4..8ea244bff05c5f8bb2a5c34d2de842ccd0752269 100644 (file)
@@ -2,6 +2,7 @@ import asyncio
 import contextvars
 import unittest
 from test import support
+from test.support import force_not_colorized
 
 support.requires_working_socket(module=True)
 
@@ -252,6 +253,7 @@ class TestAsyncCase(unittest.TestCase):
         test.doCleanups()
         self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown', 'cleanup'])
 
+    @force_not_colorized
     def test_exception_in_tear_clean_up(self):
         class Test(unittest.IsolatedAsyncioTestCase):
             async def asyncSetUp(self):
index 7241cf59f73d4f425e89ba9e68950e5f0e973ad1..0b46f338ac77e10851195a565bc3e7d65896afdf 100644 (file)
@@ -4,6 +4,7 @@ import subprocess
 from test import support
 import unittest
 import test.test_unittest
+from test.support import force_not_colorized
 from test.test_unittest.test_result import BufferedWriter
 
 
@@ -120,6 +121,7 @@ class Test_TestProgram(unittest.TestCase):
         self.assertEqual(['test.test_unittest', 'test.test_unittest2'],
                           program.testNames)
 
+    @force_not_colorized
     def test_NonExit(self):
         stream = BufferedWriter()
         program = unittest.main(exit=False,
@@ -135,6 +137,7 @@ class Test_TestProgram(unittest.TestCase):
                     'expected failures=1, unexpected successes=1)\n')
         self.assertTrue(out.endswith(expected))
 
+    @force_not_colorized
     def test_Exit(self):
         stream = BufferedWriter()
         with self.assertRaises(SystemExit) as cm:
@@ -152,6 +155,7 @@ class Test_TestProgram(unittest.TestCase):
                     'expected failures=1, unexpected successes=1)\n')
         self.assertTrue(out.endswith(expected))
 
+    @force_not_colorized
     def test_ExitAsDefault(self):
         stream = BufferedWriter()
         with self.assertRaises(SystemExit):
@@ -167,6 +171,7 @@ class Test_TestProgram(unittest.TestCase):
                     'expected failures=1, unexpected successes=1)\n')
         self.assertTrue(out.endswith(expected))
 
+    @force_not_colorized
     def test_ExitSkippedSuite(self):
         stream = BufferedWriter()
         with self.assertRaises(SystemExit) as cm:
@@ -179,6 +184,7 @@ class Test_TestProgram(unittest.TestCase):
         expected = '\n\nOK (skipped=1)\n'
         self.assertTrue(out.endswith(expected))
 
+    @force_not_colorized
     def test_ExitEmptySuite(self):
         stream = BufferedWriter()
         with self.assertRaises(SystemExit) as cm:
index 4e5ec54e9c892a501c1d57466e25943579daef53..746b9fa2677717fe2f2802cc33df6f968e2cffaf 100644 (file)
@@ -7,6 +7,7 @@ from test.support import warnings_helper, captured_stdout
 import traceback
 import unittest
 from unittest.util import strclass
+from test.support import force_not_colorized
 from test.test_unittest.support import BufferedWriter
 
 
@@ -14,7 +15,7 @@ class MockTraceback(object):
     class TracebackException:
         def __init__(self, *args, **kwargs):
             self.capture_locals = kwargs.get('capture_locals', False)
-        def format(self):
+        def format(self, **kwargs):
             result = ['A traceback']
             if self.capture_locals:
                 result.append('locals')
@@ -205,6 +206,7 @@ class Test_TestResult(unittest.TestCase):
         self.assertIs(test_case, test)
         self.assertIsInstance(formatted_exc, str)
 
+    @force_not_colorized
     def test_addFailure_filter_traceback_frames(self):
         class Foo(unittest.TestCase):
             def test_1(self):
@@ -231,6 +233,7 @@ class Test_TestResult(unittest.TestCase):
         self.assertEqual(len(dropped), 1)
         self.assertIn("raise self.failureException(msg)", dropped[0])
 
+    @force_not_colorized
     def test_addFailure_filter_traceback_frames_context(self):
         class Foo(unittest.TestCase):
             def test_1(self):
@@ -260,6 +263,7 @@ class Test_TestResult(unittest.TestCase):
         self.assertEqual(len(dropped), 1)
         self.assertIn("raise self.failureException(msg)", dropped[0])
 
+    @force_not_colorized
     def test_addFailure_filter_traceback_frames_chained_exception_self_loop(self):
         class Foo(unittest.TestCase):
             def test_1(self):
@@ -285,6 +289,7 @@ class Test_TestResult(unittest.TestCase):
         formatted_exc = result.failures[0][1]
         self.assertEqual(formatted_exc.count("Exception: Loop\n"), 1)
 
+    @force_not_colorized
     def test_addFailure_filter_traceback_frames_chained_exception_cycle(self):
         class Foo(unittest.TestCase):
             def test_1(self):
@@ -446,6 +451,7 @@ class Test_TestResult(unittest.TestCase):
         result.addUnexpectedSuccess(None)
         self.assertTrue(result.shouldStop)
 
+    @force_not_colorized
     def testFailFastSetByRunner(self):
         stream = BufferedWriter()
         runner = unittest.TextTestRunner(stream=stream, failfast=True)
@@ -619,6 +625,7 @@ class Test_TextTestResult(unittest.TestCase):
         test.run(result)
         return stream.getvalue()
 
+    @force_not_colorized
     def testDotsOutput(self):
         self.assertEqual(self._run_test('testSuccess', 1), '.')
         self.assertEqual(self._run_test('testSkip', 1), 's')
@@ -627,6 +634,7 @@ class Test_TextTestResult(unittest.TestCase):
         self.assertEqual(self._run_test('testExpectedFailure', 1), 'x')
         self.assertEqual(self._run_test('testUnexpectedSuccess', 1), 'u')
 
+    @force_not_colorized
     def testLongOutput(self):
         classname = f'{__name__}.{self.Test.__qualname__}'
         self.assertEqual(self._run_test('testSuccess', 2),
@@ -642,17 +650,21 @@ class Test_TextTestResult(unittest.TestCase):
         self.assertEqual(self._run_test('testUnexpectedSuccess', 2),
                          f'testUnexpectedSuccess ({classname}.testUnexpectedSuccess) ... unexpected success\n')
 
+    @force_not_colorized
     def testDotsOutputSubTestSuccess(self):
         self.assertEqual(self._run_test('testSubTestSuccess', 1), '.')
 
+    @force_not_colorized
     def testLongOutputSubTestSuccess(self):
         classname = f'{__name__}.{self.Test.__qualname__}'
         self.assertEqual(self._run_test('testSubTestSuccess', 2),
                          f'testSubTestSuccess ({classname}.testSubTestSuccess) ... ok\n')
 
+    @force_not_colorized
     def testDotsOutputSubTestMixed(self):
         self.assertEqual(self._run_test('testSubTestMixed', 1), 'sFE')
 
+    @force_not_colorized
     def testLongOutputSubTestMixed(self):
         classname = f'{__name__}.{self.Test.__qualname__}'
         self.assertEqual(self._run_test('testSubTestMixed', 2),
@@ -661,6 +673,7 @@ class Test_TextTestResult(unittest.TestCase):
                 f'  testSubTestMixed ({classname}.testSubTestMixed) [fail] (c=3) ... FAIL\n'
                 f'  testSubTestMixed ({classname}.testSubTestMixed) [error] (d=4) ... ERROR\n')
 
+    @force_not_colorized
     def testDotsOutputTearDownFail(self):
         out = self._run_test('testSuccess', 1, AssertionError('fail'))
         self.assertEqual(out, 'F')
@@ -671,6 +684,7 @@ class Test_TextTestResult(unittest.TestCase):
         out = self._run_test('testSkip', 1, AssertionError('fail'))
         self.assertEqual(out, 'sF')
 
+    @force_not_colorized
     def testLongOutputTearDownFail(self):
         classname = f'{__name__}.{self.Test.__qualname__}'
         out = self._run_test('testSuccess', 2, AssertionError('fail'))
index 1b9cef43e3f9c5f7dfcd34f0e23ab5f65b82af7e..1131cd73128866f8326dd4a556aa32ebed474352 100644 (file)
@@ -4,6 +4,7 @@ import sys
 import pickle
 import subprocess
 from test import support
+from test.support import force_not_colorized
 
 import unittest
 from unittest.case import _Outcome
@@ -106,6 +107,7 @@ class TestCleanUp(unittest.TestCase):
         self.assertTrue(test.doCleanups())
         self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3), dict(four='hello', five='goodbye'))])
 
+    @force_not_colorized
     def testCleanUpWithErrors(self):
         class TestableTest(unittest.TestCase):
             def testNothing(self):
@@ -416,6 +418,7 @@ class TestClassCleanup(unittest.TestCase):
         self.assertIsInstance(e2[1], CustomError)
         self.assertEqual(str(e2[1]), 'cleanup1')
 
+    @force_not_colorized
     def test_with_errors_addCleanUp(self):
         ordering = []
         class TestableTest(unittest.TestCase):
@@ -439,6 +442,7 @@ class TestClassCleanup(unittest.TestCase):
                          ['setUpClass', 'setUp', 'cleanup_exc',
                           'tearDownClass', 'cleanup_good'])
 
+    @force_not_colorized
     def test_run_with_errors_addClassCleanUp(self):
         ordering = []
         class TestableTest(unittest.TestCase):
@@ -462,6 +466,7 @@ class TestClassCleanup(unittest.TestCase):
                          ['setUpClass', 'setUp', 'test', 'cleanup_good',
                           'tearDownClass', 'cleanup_exc'])
 
+    @force_not_colorized
     def test_with_errors_in_addClassCleanup_and_setUps(self):
         ordering = []
         class_blow_up = False
@@ -514,6 +519,7 @@ class TestClassCleanup(unittest.TestCase):
                          ['setUpClass', 'setUp', 'tearDownClass',
                           'cleanup_exc'])
 
+    @force_not_colorized
     def test_with_errors_in_tearDownClass(self):
         ordering = []
         class TestableTest(unittest.TestCase):
@@ -590,6 +596,7 @@ class TestClassCleanup(unittest.TestCase):
                 'inner setup', 'inner test', 'inner cleanup',
                 'end outer test', 'outer cleanup'])
 
+    @force_not_colorized
     def test_run_empty_suite_error_message(self):
         class EmptyTest(unittest.TestCase):
             pass
@@ -663,6 +670,7 @@ class TestModuleCleanUp(unittest.TestCase):
         self.assertEqual(cleanups,
                          [((1, 2), {'function': 'hello'})])
 
+    @force_not_colorized
     def test_run_module_cleanUp(self):
         blowUp = True
         ordering = []
@@ -802,6 +810,7 @@ class TestModuleCleanUp(unittest.TestCase):
                                     'tearDownClass', 'cleanup_good'])
         self.assertEqual(unittest.case._module_cleanups, [])
 
+    @force_not_colorized
     def test_run_module_cleanUp_when_teardown_exception(self):
         ordering = []
         class Module(object):
@@ -963,6 +972,7 @@ class TestModuleCleanUp(unittest.TestCase):
         self.assertEqual(cleanups,
                          [((1, 2), {'function': 3, 'self': 4})])
 
+    @force_not_colorized
     def test_with_errors_in_addClassCleanup(self):
         ordering = []
 
@@ -996,6 +1006,7 @@ class TestModuleCleanUp(unittest.TestCase):
                          ['setUpModule', 'setUpClass', 'test', 'tearDownClass',
                           'cleanup_exc', 'tearDownModule', 'cleanup_good'])
 
+    @force_not_colorized
     def test_with_errors_in_addCleanup(self):
         ordering = []
         class Module(object):
@@ -1026,6 +1037,7 @@ class TestModuleCleanUp(unittest.TestCase):
                          ['setUpModule', 'setUp', 'test', 'tearDown',
                           'cleanup_exc', 'tearDownModule', 'cleanup_good'])
 
+    @force_not_colorized
     def test_with_errors_in_addModuleCleanup_and_setUps(self):
         ordering = []
         module_blow_up = False
@@ -1318,6 +1330,7 @@ class Test_TextTestRunner(unittest.TestCase):
         expectedresult = (runner.stream, DESCRIPTIONS, VERBOSITY)
         self.assertEqual(runner._makeResult(), expectedresult)
 
+    @force_not_colorized
     @support.requires_subprocess()
     def test_warnings(self):
         """
index f146dcac18ecc092a51a8cb4c97e36d7b87bf90e..f5cb860c60b156f284195402534962e2037aff74 100644 (file)
@@ -1,5 +1,6 @@
 import unittest
 
+from test.support import force_not_colorized
 from test.test_unittest.support import LoggingResult
 
 
@@ -293,6 +294,7 @@ class Test_TestSkipping(unittest.TestCase):
         self.assertFalse(result.unexpectedSuccesses)
         self.assertTrue(result.wasSuccessful())
 
+    @force_not_colorized
     def test_expected_failure_and_fail_in_cleanup(self):
         class Foo(unittest.TestCase):
             @unittest.expectedFailure
@@ -372,6 +374,7 @@ class Test_TestSkipping(unittest.TestCase):
         self.assertEqual(result.unexpectedSuccesses, [test])
         self.assertFalse(result.wasSuccessful())
 
+    @force_not_colorized
     def test_unexpected_success_and_fail_in_cleanup(self):
         class Foo(unittest.TestCase):
             @unittest.expectedFailure
index 3ace0a5b7bf2efb209630c5a7f9f6b7aafc1ef7d..97262735aa831177fde1610030e9f648b1b212af 100644 (file)
@@ -189,7 +189,9 @@ class TestResult(object):
         tb_e = traceback.TracebackException(
             exctype, value, tb,
             capture_locals=self.tb_locals, compact=True)
-        msgLines = list(tb_e.format())
+        from _colorize import can_colorize
+
+        msgLines = list(tb_e.format(colorize=can_colorize()))
 
         if self.buffer:
             output = sys.stdout.getvalue()
index 2bcadf0c998bd9b04bb6f2cb19a2fa12a5f35c7e..d60c295a1eddf7a760590d104ed00326b4eb64bb 100644 (file)
@@ -4,6 +4,8 @@ import sys
 import time
 import warnings
 
+from _colorize import get_colors
+
 from . import result
 from .case import _SubTest
 from .signals import registerResult
@@ -13,18 +15,18 @@ __unittest = True
 
 class _WritelnDecorator(object):
     """Used to decorate file-like objects with a handy 'writeln' method"""
-    def __init__(self,stream):
+    def __init__(self, stream):
         self.stream = stream
 
     def __getattr__(self, attr):
         if attr in ('stream', '__getstate__'):
             raise AttributeError(attr)
-        return getattr(self.stream,attr)
+        return getattr(self.stream, attr)
 
     def writeln(self, arg=None):
         if arg:
             self.write(arg)
-        self.write('\n') # text-mode streams translate to \r\n if needed
+        self.write('\n')  # text-mode streams translate to \r\n if needed
 
 
 class TextTestResult(result.TestResult):
@@ -43,6 +45,7 @@ class TextTestResult(result.TestResult):
         self.showAll = verbosity > 1
         self.dots = verbosity == 1
         self.descriptions = descriptions
+        self._ansi = get_colors()
         self._newline = True
         self.durations = durations
 
@@ -76,86 +79,102 @@ class TextTestResult(result.TestResult):
 
     def addSubTest(self, test, subtest, err):
         if err is not None:
+            red, reset = self._ansi.RED, self._ansi.RESET
             if self.showAll:
                 if issubclass(err[0], subtest.failureException):
-                    self._write_status(subtest, "FAIL")
+                    self._write_status(subtest, f"{red}FAIL{reset}")
                 else:
-                    self._write_status(subtest, "ERROR")
+                    self._write_status(subtest, f"{red}ERROR{reset}")
             elif self.dots:
                 if issubclass(err[0], subtest.failureException):
-                    self.stream.write('F')
+                    self.stream.write(f"{red}F{reset}")
                 else:
-                    self.stream.write('E')
+                    self.stream.write(f"{red}E{reset}")
                 self.stream.flush()
         super(TextTestResult, self).addSubTest(test, subtest, err)
 
     def addSuccess(self, test):
         super(TextTestResult, self).addSuccess(test)
+        green, reset = self._ansi.GREEN, self._ansi.RESET
         if self.showAll:
-            self._write_status(test, "ok")
+            self._write_status(test, f"{green}ok{reset}")
         elif self.dots:
-            self.stream.write('.')
+            self.stream.write(f"{green}.{reset}")
             self.stream.flush()
 
     def addError(self, test, err):
         super(TextTestResult, self).addError(test, err)
+        red, reset = self._ansi.RED, self._ansi.RESET
         if self.showAll:
-            self._write_status(test, "ERROR")
+            self._write_status(test, f"{red}ERROR{reset}")
         elif self.dots:
-            self.stream.write('E')
+            self.stream.write(f"{red}E{reset}")
             self.stream.flush()
 
     def addFailure(self, test, err):
         super(TextTestResult, self).addFailure(test, err)
+        red, reset = self._ansi.RED, self._ansi.RESET
         if self.showAll:
-            self._write_status(test, "FAIL")
+            self._write_status(test, f"{red}FAIL{reset}")
         elif self.dots:
-            self.stream.write('F')
+            self.stream.write(f"{red}F{reset}")
             self.stream.flush()
 
     def addSkip(self, test, reason):
         super(TextTestResult, self).addSkip(test, reason)
+        yellow, reset = self._ansi.YELLOW, self._ansi.RESET
         if self.showAll:
-            self._write_status(test, "skipped {0!r}".format(reason))
+            self._write_status(test, f"{yellow}skipped{reset} {reason!r}")
         elif self.dots:
-            self.stream.write("s")
+            self.stream.write(f"{yellow}s{reset}")
             self.stream.flush()
 
     def addExpectedFailure(self, test, err):
         super(TextTestResult, self).addExpectedFailure(test, err)
+        yellow, reset = self._ansi.YELLOW, self._ansi.RESET
         if self.showAll:
-            self.stream.writeln("expected failure")
+            self.stream.writeln(f"{yellow}expected failure{reset}")
             self.stream.flush()
         elif self.dots:
-            self.stream.write("x")
+            self.stream.write(f"{yellow}x{reset}")
             self.stream.flush()
 
     def addUnexpectedSuccess(self, test):
         super(TextTestResult, self).addUnexpectedSuccess(test)
+        red, reset = self._ansi.RED, self._ansi.RESET
         if self.showAll:
-            self.stream.writeln("unexpected success")
+            self.stream.writeln(f"{red}unexpected success{reset}")
             self.stream.flush()
         elif self.dots:
-            self.stream.write("u")
+            self.stream.write(f"{red}u{reset}")
             self.stream.flush()
 
     def printErrors(self):
+        bold_red = self._ansi.BOLD_RED
+        red = self._ansi.RED
+        reset = self._ansi.RESET
         if self.dots or self.showAll:
             self.stream.writeln()
             self.stream.flush()
-        self.printErrorList('ERROR', self.errors)
-        self.printErrorList('FAIL', self.failures)
-        unexpectedSuccesses = getattr(self, 'unexpectedSuccesses', ())
+        self.printErrorList(f"{red}ERROR{reset}", self.errors)
+        self.printErrorList(f"{red}FAIL{reset}", self.failures)
+        unexpectedSuccesses = getattr(self, "unexpectedSuccesses", ())
         if unexpectedSuccesses:
             self.stream.writeln(self.separator1)
             for test in unexpectedSuccesses:
-                self.stream.writeln(f"UNEXPECTED SUCCESS: {self.getDescription(test)}")
+                self.stream.writeln(
+                    f"{red}UNEXPECTED SUCCESS{bold_red}: "
+                    f"{self.getDescription(test)}{reset}"
+                )
             self.stream.flush()
 
     def printErrorList(self, flavour, errors):
+        bold_red, reset = self._ansi.BOLD_RED, self._ansi.RESET
         for test, err in errors:
             self.stream.writeln(self.separator1)
-            self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
+            self.stream.writeln(
+                f"{flavour}{bold_red}: {self.getDescription(test)}{reset}"
+            )
             self.stream.writeln(self.separator2)
             self.stream.writeln("%s" % err)
             self.stream.flush()
@@ -232,7 +251,7 @@ class TextTestRunner(object):
             if self.warnings:
                 # if self.warnings is set, use it to filter all the warnings
                 warnings.simplefilter(self.warnings)
-            startTime = time.perf_counter()
+            start_time = time.perf_counter()
             startTestRun = getattr(result, 'startTestRun', None)
             if startTestRun is not None:
                 startTestRun()
@@ -242,8 +261,8 @@ class TextTestRunner(object):
                 stopTestRun = getattr(result, 'stopTestRun', None)
                 if stopTestRun is not None:
                     stopTestRun()
-            stopTime = time.perf_counter()
-        timeTaken = stopTime - startTime
+            stop_time = time.perf_counter()
+        time_taken = stop_time - start_time
         result.printErrors()
         if self.durations is not None:
             self._printDurations(result)
@@ -253,10 +272,10 @@ class TextTestRunner(object):
 
         run = result.testsRun
         self.stream.writeln("Ran %d test%s in %.3fs" %
-                            (run, run != 1 and "s" or "", timeTaken))
+                            (run, run != 1 and "s" or "", time_taken))
         self.stream.writeln()
 
-        expectedFails = unexpectedSuccesses = skipped = 0
+        expected_fails = unexpected_successes = skipped = 0
         try:
             results = map(len, (result.expectedFailures,
                                 result.unexpectedSuccesses,
@@ -264,26 +283,35 @@ class TextTestRunner(object):
         except AttributeError:
             pass
         else:
-            expectedFails, unexpectedSuccesses, skipped = results
+            expected_fails, unexpected_successes, skipped = results
 
         infos = []
+        ansi = get_colors()
+        bold_red = ansi.BOLD_RED
+        green = ansi.GREEN
+        red = ansi.RED
+        reset = ansi.RESET
+        yellow = ansi.YELLOW
+
         if not result.wasSuccessful():
-            self.stream.write("FAILED")
+            self.stream.write(f"{bold_red}FAILED{reset}")
             failed, errored = len(result.failures), len(result.errors)
             if failed:
-                infos.append("failures=%d" % failed)
+                infos.append(f"{bold_red}failures={failed}{reset}")
             if errored:
-                infos.append("errors=%d" % errored)
+                infos.append(f"{bold_red}errors={errored}{reset}")
         elif run == 0 and not skipped:
-            self.stream.write("NO TESTS RAN")
+            self.stream.write(f"{yellow}NO TESTS RAN{reset}")
         else:
-            self.stream.write("OK")
+            self.stream.write(f"{green}OK{reset}")
         if skipped:
-            infos.append("skipped=%d" % skipped)
-        if expectedFails:
-            infos.append("expected failures=%d" % expectedFails)
-        if unexpectedSuccesses:
-            infos.append("unexpected successes=%d" % unexpectedSuccesses)
+            infos.append(f"{yellow}skipped={skipped}{reset}")
+        if expected_fails:
+            infos.append(f"{yellow}expected failures={expected_fails}{reset}")
+        if unexpected_successes:
+            infos.append(
+                f"{red}unexpected successes={unexpected_successes}{reset}"
+            )
         if infos:
             self.stream.writeln(" (%s)" % (", ".join(infos),))
         else:
diff --git a/Misc/NEWS.d/next/Library/2024-11-23-00-17-29.gh-issue-127221.OSXdFE.rst b/Misc/NEWS.d/next/Library/2024-11-23-00-17-29.gh-issue-127221.OSXdFE.rst
new file mode 100644 (file)
index 0000000..0e4a03c
--- /dev/null
@@ -0,0 +1 @@
+Add colour to :mod:`unittest` output. Patch by Hugo van Kemenade.