From 37cf93f9db8f34ede1d9d3e0d61fb6031f4025d9 Mon Sep 17 00:00:00 2001 From: sumau Date: Sat, 19 Oct 2019 23:03:31 +0100 Subject: [PATCH] Fixes: #4914 Amend BufferedRowResultProxy to handle max_row_buffer > 1000 --- lib/sqlalchemy/engine/result.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 733bd6f6ab..100004c167 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -1486,9 +1486,8 @@ class BufferedRowResultProxy(ResultProxy): The pre-fetching behavior fetches only one row initially, and then grows its buffer size by a fixed amount with each successive need - for additional rows up to a size of 1000. - - The size argument is configurable using the ``max_row_buffer`` + for additional rows up to a size of 1000, after which the buffer + size increases to the value configured using the ``max_row_buffer`` execution option:: with psycopg2_engine.connect() as conn: @@ -1508,13 +1507,14 @@ class BufferedRowResultProxy(ResultProxy): self._max_row_buffer = self.context.execution_options.get( "max_row_buffer", None ) + self.__update_size_growth() self.__buffer_rows() super(BufferedRowResultProxy, self)._init_metadata() # this is a "growth chart" for the buffering of rows. # each successive __buffer_rows call will use the next # value in the list for the buffer size until the max - # is reached + # or 1000 is reached size_growth = { 1: 5, 5: 10, @@ -1526,6 +1526,12 @@ class BufferedRowResultProxy(ResultProxy): 500: 1000, } + # this increases size_growth if the max buffer size > 1000 + def __update_size_growth(self): + if self._max_row_buffer is not None: + if self._max_row_buffer >1000: + self.size_growth.update({1000:self._max_row_buffer}) + def __buffer_rows(self): if self.cursor is None: return -- 2.47.3