]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
gh-74028: add `buffersize` parameter to `concurrent.futures.Executor.map` for lazier...
authorEnzo Bonnal <bonnal.enzo.dev@gmail.com>
Thu, 13 Mar 2025 10:57:53 +0000 (10:57 +0000)
committerGitHub <noreply@github.com>
Thu, 13 Mar 2025 10:57:53 +0000 (11:57 +0100)
`concurrent.futures.Executor.map` now supports limiting the number of submitted
tasks whose results have not yet been yielded via the new `buffersize` parameter.

---------

Co-authored-by: Bénédikt Tran <10796600+picnixz@users.noreply.github.com>
Doc/library/concurrent.futures.rst
Doc/whatsnew/3.14.rst
Lib/concurrent/futures/_base.py
Lib/concurrent/futures/process.py
Lib/test/test_concurrent_futures/executor.py
Misc/NEWS.d/next/Library/2024-10-18-10-27-54.gh-issue-74028.4d4vVD.rst [new file with mode: 0644]

index dc613f2f8f00cd8a93e53c477041cf1104004a13..68d081001b6791d72ee5683cc62a44903c85263d 100644 (file)
@@ -40,11 +40,14 @@ Executor Objects
              future = executor.submit(pow, 323, 1235)
              print(future.result())
 
-   .. method:: map(fn, *iterables, timeout=None, chunksize=1)
+   .. method:: map(fn, *iterables, timeout=None, chunksize=1, buffersize=None)
 
       Similar to :func:`map(fn, *iterables) <map>` except:
 
-      * the *iterables* are collected immediately rather than lazily;
+      * The *iterables* are collected immediately rather than lazily, unless a
+        *buffersize* is specified to limit the number of submitted tasks whose
+        results have not yet been yielded. If the buffer is full, iteration over
+        the *iterables* pauses until a result is yielded from the buffer.
 
       * *fn* is executed asynchronously and several calls to
         *fn* may be made concurrently.
@@ -68,7 +71,10 @@ Executor Objects
       *chunksize* has no effect.
 
       .. versionchanged:: 3.5
-         Added the *chunksize* argument.
+         Added the *chunksize* parameter.
+
+      .. versionchanged:: next
+         Added the *buffersize* parameter.
 
    .. method:: shutdown(wait=True, *, cancel_futures=False)
 
index 6898b50b2c932a0ffe8bd77ef55079405b943d20..42a3bf02475b397b26e4bc47e2a03fbcaaa3e46f 100644 (file)
@@ -465,6 +465,13 @@ contextvars
 * Support context manager protocol by :class:`contextvars.Token`.
   (Contributed by Andrew Svetlov in :gh:`129889`.)
 
+* Add the optional ``buffersize`` parameter to
+  :meth:`concurrent.futures.Executor.map` to limit the number of submitted
+  tasks whose results have not yet been yielded. If the buffer is full,
+  iteration over the *iterables* pauses until a result is yielded from the
+  buffer.
+  (Contributed by Enzo Bonnal and Josh Rosenberg in :gh:`74028`.)
+
 
 ctypes
 ------
index 707fcdfde79acdb41aa00884ee0ebbb4eb90b887..d5ba39e3d7177482f411ff3f3132fd8fc40797d3 100644 (file)
@@ -8,6 +8,8 @@ import logging
 import threading
 import time
 import types
+import weakref
+from itertools import islice
 
 FIRST_COMPLETED = 'FIRST_COMPLETED'
 FIRST_EXCEPTION = 'FIRST_EXCEPTION'
@@ -572,7 +574,7 @@ class Executor(object):
         """
         raise NotImplementedError()
 
-    def map(self, fn, *iterables, timeout=None, chunksize=1):
+    def map(self, fn, *iterables, timeout=None, chunksize=1, buffersize=None):
         """Returns an iterator equivalent to map(fn, iter).
 
         Args:
@@ -584,6 +586,11 @@ class Executor(object):
                 before being passed to a child process. This argument is only
                 used by ProcessPoolExecutor; it is ignored by
                 ThreadPoolExecutor.
+            buffersize: The number of submitted tasks whose results have not
+                yet been yielded. If the buffer is full, iteration over the
+                iterables pauses until a result is yielded from the buffer.
+                If None, all input elements are eagerly collected, and a task is
+                submitted for each.
 
         Returns:
             An iterator equivalent to: map(func, *iterables) but the calls may
@@ -594,10 +601,25 @@ class Executor(object):
                 before the given timeout.
             Exception: If fn(*args) raises for any values.
         """
+        if buffersize is not None and not isinstance(buffersize, int):
+            raise TypeError("buffersize must be an integer or None")
+        if buffersize is not None and buffersize < 1:
+            raise ValueError("buffersize must be None or > 0")
+
         if timeout is not None:
             end_time = timeout + time.monotonic()
 
-        fs = [self.submit(fn, *args) for args in zip(*iterables)]
+        zipped_iterables = zip(*iterables)
+        if buffersize:
+            fs = collections.deque(
+                self.submit(fn, *args) for args in islice(zipped_iterables, buffersize)
+            )
+        else:
+            fs = [self.submit(fn, *args) for args in zipped_iterables]
+
+        # Use a weak reference to ensure that the executor can be garbage
+        # collected independently of the result_iterator closure.
+        executor_weakref = weakref.ref(self)
 
         # Yield must be hidden in closure so that the futures are submitted
         # before the first iterator value is required.
@@ -606,6 +628,12 @@ class Executor(object):
                 # reverse to keep finishing order
                 fs.reverse()
                 while fs:
+                    if (
+                        buffersize
+                        and (executor := executor_weakref())
+                        and (args := next(zipped_iterables, None))
+                    ):
+                        fs.appendleft(executor.submit(fn, *args))
                     # Careful not to keep a reference to the popped future
                     if timeout is None:
                         yield _result_or_cancel(fs.pop())
index d79d6b959c90d3863c774388d45ca492e4344eaa..4847550908adab7de81ca9b67b66a38402b5a5fb 100644 (file)
@@ -813,7 +813,7 @@ class ProcessPoolExecutor(_base.Executor):
             return f
     submit.__doc__ = _base.Executor.submit.__doc__
 
-    def map(self, fn, *iterables, timeout=None, chunksize=1):
+    def map(self, fn, *iterables, timeout=None, chunksize=1, buffersize=None):
         """Returns an iterator equivalent to map(fn, iter).
 
         Args:
@@ -824,6 +824,11 @@ class ProcessPoolExecutor(_base.Executor):
             chunksize: If greater than one, the iterables will be chopped into
                 chunks of size chunksize and submitted to the process pool.
                 If set to one, the items in the list will be sent one at a time.
+            buffersize: The number of submitted tasks whose results have not
+                yet been yielded. If the buffer is full, iteration over the
+                iterables pauses until a result is yielded from the buffer.
+                If None, all input elements are eagerly collected, and a task is
+                submitted for each.
 
         Returns:
             An iterator equivalent to: map(func, *iterables) but the calls may
@@ -839,7 +844,8 @@ class ProcessPoolExecutor(_base.Executor):
 
         results = super().map(partial(_process_chunk, fn),
                               itertools.batched(zip(*iterables), chunksize),
-                              timeout=timeout)
+                              timeout=timeout,
+                              buffersize=buffersize)
         return _chain_from_iterable_of_lists(results)
 
     def shutdown(self, wait=True, *, cancel_futures=False):
index 0221c28b3ceb8b729eb1a4acc1ae19252bb1013b..d88c34d1c8c8e4d14109fc35a821aaa72d886b33 100644 (file)
@@ -1,7 +1,9 @@
+import itertools
 import threading
 import time
 import weakref
 from concurrent import futures
+from operator import add
 from test import support
 from test.support import Py_GIL_DISABLED
 
@@ -73,6 +75,74 @@ class ExecutorTest:
         # take longer than the specified timeout.
         self.assertIn(results, ([None, None], [None], []))
 
+    def test_map_buffersize_type_validation(self):
+        for buffersize in ("foo", 2.0):
+            with self.subTest(buffersize=buffersize):
+                with self.assertRaisesRegex(
+                    TypeError,
+                    "buffersize must be an integer or None",
+                ):
+                    self.executor.map(str, range(4), buffersize=buffersize)
+
+    def test_map_buffersize_value_validation(self):
+        for buffersize in (0, -1):
+            with self.subTest(buffersize=buffersize):
+                with self.assertRaisesRegex(
+                    ValueError,
+                    "buffersize must be None or > 0",
+                ):
+                    self.executor.map(str, range(4), buffersize=buffersize)
+
+    def test_map_buffersize(self):
+        ints = range(4)
+        for buffersize in (1, 2, len(ints), len(ints) * 2):
+            with self.subTest(buffersize=buffersize):
+                res = self.executor.map(str, ints, buffersize=buffersize)
+                self.assertListEqual(list(res), ["0", "1", "2", "3"])
+
+    def test_map_buffersize_on_multiple_iterables(self):
+        ints = range(4)
+        for buffersize in (1, 2, len(ints), len(ints) * 2):
+            with self.subTest(buffersize=buffersize):
+                res = self.executor.map(add, ints, ints, buffersize=buffersize)
+                self.assertListEqual(list(res), [0, 2, 4, 6])
+
+    def test_map_buffersize_on_infinite_iterable(self):
+        res = self.executor.map(str, itertools.count(), buffersize=2)
+        self.assertEqual(next(res, None), "0")
+        self.assertEqual(next(res, None), "1")
+        self.assertEqual(next(res, None), "2")
+
+    def test_map_buffersize_on_multiple_infinite_iterables(self):
+        res = self.executor.map(
+            add,
+            itertools.count(),
+            itertools.count(),
+            buffersize=2
+        )
+        self.assertEqual(next(res, None), 0)
+        self.assertEqual(next(res, None), 2)
+        self.assertEqual(next(res, None), 4)
+
+    def test_map_buffersize_on_empty_iterable(self):
+        res = self.executor.map(str, [], buffersize=2)
+        self.assertIsNone(next(res, None))
+
+    def test_map_buffersize_without_iterable(self):
+        res = self.executor.map(str, buffersize=2)
+        self.assertIsNone(next(res, None))
+
+    def test_map_buffersize_when_buffer_is_full(self):
+        ints = iter(range(4))
+        buffersize = 2
+        self.executor.map(str, ints, buffersize=buffersize)
+        self.executor.shutdown(wait=True)  # wait for tasks to complete
+        self.assertEqual(
+            next(ints),
+            buffersize,
+            msg="should have fetched only `buffersize` elements from `ints`.",
+        )
+
     def test_shutdown_race_issue12456(self):
         # Issue #12456: race condition at shutdown where trying to post a
         # sentinel in the call queue blocks (the queue is full while processes
diff --git a/Misc/NEWS.d/next/Library/2024-10-18-10-27-54.gh-issue-74028.4d4vVD.rst b/Misc/NEWS.d/next/Library/2024-10-18-10-27-54.gh-issue-74028.4d4vVD.rst
new file mode 100644 (file)
index 0000000..6760e2b
--- /dev/null
@@ -0,0 +1,4 @@
+Add the optional ``buffersize`` parameter to
+:meth:`concurrent.futures.Executor.map` to limit the number of submitted tasks
+whose results have not yet been yielded. If the buffer is full, iteration over
+the *iterables* pauses until a result is yielded from the buffer.