]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
gh-117225: doctest: only print "and X failed" when non-zero, don't pluralise "1 items...
authorHugo van Kemenade <1324225+hugovk@users.noreply.github.com>
Wed, 27 Mar 2024 14:46:35 +0000 (16:46 +0200)
committerGitHub <noreply@github.com>
Wed, 27 Mar 2024 14:46:35 +0000 (16:46 +0200)
Doc/library/doctest.rst
Lib/doctest.py
Lib/test/test_doctest/test_doctest.py
Misc/NEWS.d/next/Library/2024-03-25-21-15-56.gh-issue-117225.oOaZXb.rst [new file with mode: 0644]

index 835a3a76806148768a924fcee87332735c944581..135758187894ec4726db93125f6f6c8d44c6e00a 100644 (file)
@@ -123,10 +123,10 @@ And so on, eventually ending with:
        OverflowError: n too large
    ok
    2 items passed all tests:
-      1 tests in __main__
-      8 tests in __main__.factorial
-   9 tests in 2 items.
-   9 passed and 0 failed.
+      1 test in __main__
+      6 tests in __main__.factorial
+   7 tests in 2 items.
+   7 passed.
    Test passed.
    $
 
@@ -1933,7 +1933,7 @@ such a test runner::
                                            optionflags=flags)
         else:
             fail, total = doctest.testmod(optionflags=flags)
-            print("{} failures out of {} tests".format(fail, total))
+            print(f"{fail} failures out of {total} tests")
 
 
 .. rubric:: Footnotes
index 6049423b5147a5eb46f7934569c5c910b6dedb25..7a9f4e40d814d6281de5956b0591aeff61c95b07 100644 (file)
@@ -1191,9 +1191,9 @@ class DocTestRunner:
            2 tests in _TestClass
            2 tests in _TestClass.__init__
            2 tests in _TestClass.get
-           1 tests in _TestClass.square
+           1 test in _TestClass.square
         7 tests in 4 items.
-        7 passed and 0 failed.
+        7 passed.
         Test passed.
         TestResults(failed=0, attempted=7)
 
@@ -1568,49 +1568,59 @@ class DocTestRunner:
         """
         if verbose is None:
             verbose = self._verbose
-        notests = []
-        passed = []
-        failed = []
+
+        notests, passed, failed = [], [], []
         total_tries = total_failures = total_skips = 0
-        for item in self._stats.items():
-            name, (failures, tries, skips) = item
+
+        for name, (failures, tries, skips) in self._stats.items():
             assert failures <= tries
             total_tries += tries
             total_failures += failures
             total_skips += skips
+
             if tries == 0:
                 notests.append(name)
             elif failures == 0:
                 passed.append((name, tries))
             else:
-                failed.append(item)
+                failed.append((name, (failures, tries, skips)))
+
         if verbose:
             if notests:
-                print(f"{len(notests)} items had no tests:")
+                print(f"{_n_items(notests)} had no tests:")
                 notests.sort()
                 for name in notests:
                     print(f"    {name}")
+
             if passed:
-                print(f"{len(passed)} items passed all tests:")
-                passed.sort()
-                for name, count in passed:
-                    print(f" {count:3d} tests in {name}")
+                print(f"{_n_items(passed)} passed all tests:")
+                for name, count in sorted(passed):
+                    s = "" if count == 1 else "s"
+                    print(f" {count:3d} test{s} in {name}")
+
         if failed:
             print(self.DIVIDER)
-            print(f"{len(failed)} items had failures:")
-            failed.sort()
-            for name, (failures, tries, skips) in failed:
+            print(f"{_n_items(failed)} had failures:")
+            for name, (failures, tries, skips) in sorted(failed):
                 print(f" {failures:3d} of {tries:3d} in {name}")
+
         if verbose:
-            print(f"{total_tries} tests in {len(self._stats)} items.")
-            print(f"{total_tries - total_failures} passed and {total_failures} failed.")
+            s = "" if total_tries == 1 else "s"
+            print(f"{total_tries} test{s} in {_n_items(self._stats)}.")
+
+            and_f = f" and {total_failures} failed" if total_failures else ""
+            print(f"{total_tries - total_failures} passed{and_f}.")
+
         if total_failures:
-            msg = f"***Test Failed*** {total_failures} failures"
+            s = "" if total_failures == 1 else "s"
+            msg = f"***Test Failed*** {total_failures} failure{s}"
             if total_skips:
-                msg = f"{msg} and {total_skips} skipped tests"
+                s = "" if total_skips == 1 else "s"
+                msg = f"{msg} and {total_skips} skipped test{s}"
             print(f"{msg}.")
         elif verbose:
             print("Test passed.")
+
         return TestResults(total_failures, total_tries, skipped=total_skips)
 
     #/////////////////////////////////////////////////////////////////
@@ -1627,6 +1637,15 @@ class DocTestRunner:
             d[name] = (failures, tries, skips)
 
 
+def _n_items(items: list) -> str:
+    """
+    Helper to pluralise the number of items in a list.
+    """
+    n = len(items)
+    s = "" if n == 1 else "s"
+    return f"{n} item{s}"
+
+
 class OutputChecker:
     """
     A class used to check the whether the actual output from a doctest
index 43be200b983227bd1886719a266049b9cb49dc57..3e883c56f6c766c854fc434e13eefd0c9ef7ac0a 100644 (file)
@@ -2628,9 +2628,9 @@ We don't want `-v` in sys.argv for these tests.
         ...
         NameError: name 'favorite_color' is not defined
     **********************************************************************
-    1 items had failures:
+    1 item had failures:
        1 of   2 in test_doctest.txt
-    ***Test Failed*** 1 failures.
+    ***Test Failed*** 1 failure.
     TestResults(failed=1, attempted=2)
     >>> doctest.master = None  # Reset master.
 
@@ -2657,9 +2657,9 @@ Globals may be specified with the `globs` and `extraglobs` parameters:
     Got:
         'red'
     **********************************************************************
-    1 items had failures:
+    1 item had failures:
        1 of   2 in test_doctest.txt
-    ***Test Failed*** 1 failures.
+    ***Test Failed*** 1 failure.
     TestResults(failed=1, attempted=2)
     >>> doctest.master = None  # Reset master.
 
@@ -2689,10 +2689,10 @@ Verbosity can be increased with the optional `verbose` parameter:
         <BLANKLINE>
         b
     ok
-    1 items passed all tests:
+    1 item passed all tests:
        2 tests in test_doctest.txt
-    2 tests in 1 items.
-    2 passed and 0 failed.
+    2 tests in 1 item.
+    2 passed.
     Test passed.
     TestResults(failed=0, attempted=2)
     >>> doctest.master = None  # Reset master.
@@ -2749,7 +2749,7 @@ using the optional keyword argument `encoding`:
     **********************************************************************
     ...
     **********************************************************************
-    1 items had failures:
+    1 item had failures:
        2 of   2 in test_doctest4.txt
     ***Test Failed*** 2 failures.
     TestResults(failed=2, attempted=2)
@@ -2772,10 +2772,10 @@ Test the verbose output:
     Expecting:
         'b\u0105r'
     ok
-    1 items passed all tests:
+    1 item passed all tests:
        2 tests in test_doctest4.txt
-    2 tests in 1 items.
-    2 passed and 0 failed.
+    2 tests in 1 item.
+    2 passed.
     Test passed.
     TestResults(failed=0, attempted=2)
     >>> doctest.master = None  # Reset master.
@@ -2997,10 +2997,10 @@ With the verbose flag, we should see the test output, but no error output:
     Expecting:
         'a'
     ok
-    1 items passed all tests:
+    1 item passed all tests:
        2 tests in myfile.doc
-    2 tests in 1 items.
-    2 passed and 0 failed.
+    2 tests in 1 item.
+    2 passed.
     Test passed.
 
 Now we'll write a couple files, one with three tests, the other a python module
@@ -3074,7 +3074,7 @@ not stderr:
     Got:
         'ajkml'
     **********************************************************************
-    1 items had failures:
+    1 item had failures:
        2 of   3 in myfile.doc
     ***Test Failed*** 2 failures.
 
@@ -3101,9 +3101,9 @@ The fourth run uses FAIL_FAST, so we should see only one error:
     Got:
         'abcdef'
     **********************************************************************
-    1 items had failures:
+    1 item had failures:
        1 of   2 in myfile.doc
-    ***Test Failed*** 1 failures.
+    ***Test Failed*** 1 failure.
 
 The fifth test uses verbose with the two options, so we should get verbose
 success output for the tests in both files:
@@ -3126,10 +3126,10 @@ success output for the tests in both files:
     Expecting:
         'a...l'
     ok
-    1 items passed all tests:
+    1 item passed all tests:
        3 tests in myfile.doc
-    3 tests in 1 items.
-    3 passed and 0 failed.
+    3 tests in 1 item.
+    3 passed.
     Test passed.
     Trying:
         1 + 1
@@ -3141,12 +3141,12 @@ success output for the tests in both files:
     Expecting:
         'abc def'
     ok
-    1 items had no tests:
+    1 item had no tests:
         myfile2
-    1 items passed all tests:
+    1 item passed all tests:
        2 tests in myfile2.test_func
     2 tests in 2 items.
-    2 passed and 0 failed.
+    2 passed.
     Test passed.
 
 We should also check some typical error cases.
diff --git a/Misc/NEWS.d/next/Library/2024-03-25-21-15-56.gh-issue-117225.oOaZXb.rst b/Misc/NEWS.d/next/Library/2024-03-25-21-15-56.gh-issue-117225.oOaZXb.rst
new file mode 100644 (file)
index 0000000..b6c4850
--- /dev/null
@@ -0,0 +1,2 @@
+doctest: only print "and X failed" when non-zero, don't pluralise "1 items".
+Patch by Hugo van Kemenade.