]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
- Fixed bug where in the case that a pool checkout event handler is used
authorMike Bayer <mike_mp@zzzcomputing.com>
Thu, 14 May 2015 17:59:04 +0000 (13:59 -0400)
committerMike Bayer <mike_mp@zzzcomputing.com>
Thu, 14 May 2015 17:59:04 +0000 (13:59 -0400)
and the database can no longer be connected towards, that the checkout
handler failure is caught, the attempt to re-acquire the connection
also raises an exception, but the underlying connection record
is not immediately re-checked in before the exception is propagated
outwards, having the effect that the checked-out record does not close
itself until the stack trace it's associated with is garbage collected,
preventing that record from being used for a new checkout until we
leave the scope of the stack trace.   This can lead to confusion
in the specific case of when the number of current stack traces
in memory exceeds the number of connections the pool can return,
as the pool will instead begin to raise errors about no more checkouts
available, rather than attempting a connection again.   The fix
applies a checkin of the record before re-raising.
fixes #3419

doc/build/changelog/changelog_10.rst
lib/sqlalchemy/pool.py
test/engine/test_pool.py

index de9f1e0883de041f9b1aa47f8b3bb223fa11ea64..4e42765613b2c101a5aaa92b48c1e18d7eabdb8e 100644 (file)
 .. changelog::
     :version: 1.0.5
 
+    .. change::
+        :tags: bug, pool
+        :tickets: 3419
+
+        Fixed bug where in the case that a pool checkout event handler is used
+        and the database can no longer be connected towards, that the checkout
+        handler failure is caught, the attempt to re-acquire the connection
+        also raises an exception, but the underlying connection record
+        is not immediately re-checked in before the exception is propagated
+        outwards, having the effect that the checked-out record does not close
+        itself until the stack trace it's associated with is garbage collected,
+        preventing that record from being used for a new checkout until we
+        leave the scope of the stack trace.   This can lead to confusion
+        in the specific case of when the number of current stack traces
+        in memory exceeds the number of connections the pool can return,
+        as the pool will instead begin to raise errors about no more checkouts
+        available, rather than attempting a connection again.   The fix
+        applies a checkin of the record before re-raising.
+
+
 .. changelog::
     :version: 1.0.4
     :released: May 7, 2015
index 0a4cdadc91bf70588d4e12ef496b149314b942d7..b38aefb3dae42ea44225cabc3ae1e53592f1eb1d 100644 (file)
@@ -732,7 +732,13 @@ class _ConnectionFairy(object):
                 pool.logger.info(
                     "Disconnection detected on checkout: %s", e)
                 fairy._connection_record.invalidate(e)
-                fairy.connection = fairy._connection_record.get_connection()
+                try:
+                    fairy.connection = \
+                        fairy._connection_record.get_connection()
+                except:
+                    with util.safe_reraise():
+                        fairy._connection_record.checkin()
+
                 attempts -= 1
 
         pool.logger.info("Reconnection attempts exhausted on checkout")
index 3684fc3e665892d6fed038a712e26c2f77a7a3c0..451cb8b0eb265ec01f1fa83a64e59f659501597c 100644 (file)
@@ -26,10 +26,12 @@ def MockDBAPI():
             db.connect = Mock(side_effect=Exception("connect failed"))
         else:
             db.connect = Mock(side_effect=connect)
+        db.is_shutdown = value
 
     db = Mock(
         connect=Mock(side_effect=connect),
-        shutdown=shutdown, _shutdown=False)
+        shutdown=shutdown,
+        is_shutdown=False)
     return db
 
 
@@ -1422,30 +1424,45 @@ class QueuePoolTest(PoolTestBase):
         assert c3._connection_record is c2_rec
         assert c2_rec.connection is c3.connection
 
+    def _no_wr_finalize(self):
+        finalize_fairy = pool._finalize_fairy
+
+        def assert_no_wr_callback(
+            connection, connection_record,
+                pool, ref, echo, fairy=None):
+            if fairy is None:
+                raise AssertionError(
+                    "finalize fairy was called as a weakref callback")
+            return finalize_fairy(
+                connection, connection_record, pool, ref, echo, fairy)
+        return patch.object(
+            pool, '_finalize_fairy', assert_no_wr_callback)
 
     def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
         # p is QueuePool with size=1, max_overflow=2,
         # and one connection in the pool that will need to
         # reconnect when next used (either due to recycle or invalidate)
-        eq_(p.checkedout(), 0)
-        eq_(p._overflow, 0)
-        dbapi.shutdown(True)
-        assert_raises(
-            Exception,
-            p.connect
-        )
-        eq_(p._overflow, 0)
-        eq_(p.checkedout(), 0)  # and not 1
 
-        dbapi.shutdown(False)
+        with self._no_wr_finalize():
+            eq_(p.checkedout(), 0)
+            eq_(p._overflow, 0)
+            dbapi.shutdown(True)
+            assert_raises(
+                Exception,
+                p.connect
+            )
+            eq_(p._overflow, 0)
+            eq_(p.checkedout(), 0)  # and not 1
 
-        c1 = self._with_teardown(p.connect())
-        assert p._pool.empty()  # poolsize is one, so we're empty OK
-        c2 = self._with_teardown(p.connect())
-        eq_(p._overflow, 1)  # and not 2
+            dbapi.shutdown(False)
 
-        # this hangs if p._overflow is 2
-        c3 = self._with_teardown(p.connect())
+            c1 = self._with_teardown(p.connect())
+            assert p._pool.empty()  # poolsize is one, so we're empty OK
+            c2 = self._with_teardown(p.connect())
+            eq_(p._overflow, 1)  # and not 2
+
+            # this hangs if p._overflow is 2
+            c3 = self._with_teardown(p.connect())
 
     def test_error_on_pooled_reconnect_cleanup_invalidate(self):
         dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
@@ -1463,6 +1480,20 @@ class QueuePoolTest(PoolTestBase):
         time.sleep(1.5)
         self._assert_cleanup_on_pooled_reconnect(dbapi, p)
 
+    def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
+        dbapi, p = self._queuepool_dbapi_fixture(pool_size=1,
+                                        max_overflow=2)
+
+        c1 = p.connect()
+        c1.close()
+
+        @event.listens_for(p, "checkout")
+        def handle_checkout_event(dbapi_con, con_record, con_proxy):
+            if dbapi.is_shutdown:
+                raise tsa.exc.DisconnectionError()
+
+        self._assert_cleanup_on_pooled_reconnect(dbapi, p)
+
     @testing.requires.timing_intensive
     def test_recycle_pool_no_race(self):
         def slow_close():