]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
- Fixed critical issue whereby the pool "checkout" event handler
authorMike Bayer <mike_mp@zzzcomputing.com>
Wed, 22 Jul 2015 21:59:34 +0000 (17:59 -0400)
committerMike Bayer <mike_mp@zzzcomputing.com>
Wed, 22 Jul 2015 22:08:37 +0000 (18:08 -0400)
may be called against a stale connection without the "connect"
event handler having been called, in the case where the pool
attempted to reconnect after being invalidated and failed; the stale
connection would remain present and would be used on a subsequent
attempt.  This issue has a greater impact in the 1.0 series subsequent
to 1.0.2, as it also delivers a blanked-out ``.info`` dictionary to
the event handler; prior to 1.0.2 the ``.info`` dictionary is still
the previous one.
fixes #3497

(cherry picked from commit 4096ad0f0980f6940be57aaee85791df8f975cd7)

doc/build/changelog/changelog_09.rst
lib/sqlalchemy/__init__.py
lib/sqlalchemy/pool.py
lib/sqlalchemy/testing/mock.py
test/engine/test_pool.py

index 055dea80d44840f5d0477c8170ab40ae6707af3b..9bbadee65ce3376a3220c8850daddf48af1532a1 100644 (file)
     .. include:: changelog_07.rst
         :start-line: 5
 
+.. changelog::
+    :version: 0.9.11
+
+    .. change::
+        :tags: bug, pool
+        :tickets: 3497
+        :versions: 1.0.8
+
+        Fixed critical issue whereby the pool "checkout" event handler
+        may be called against a stale connection without the "connect"
+        event handler having been called, in the case where the pool
+        attempted to reconnect after being invalidated and failed; the stale
+        connection would remain present and would be used on a subsequent
+        attempt.  This issue has a greater impact in the 1.0 series subsequent
+        to 1.0.2, as it also delivers a blanked-out ``.info`` dictionary to
+        the event handler; prior to 1.0.2 the ``.info`` dictionary is still
+        the previous one.
+
 .. changelog::
     :version: 0.9.10
     :released: July 22, 2015
index e6d589f6de92c62c032c6f8a81eeba6d9e377d84..0f65e31883f5994d70ff4f5a0e19dc84c7834a94 100644 (file)
@@ -117,7 +117,7 @@ from .schema import (
 from .inspection import inspect
 from .engine import create_engine, engine_from_config
 
-__version__ = '0.9.10'
+__version__ = '0.9.11'
 
 
 def __go(lcls):
index 54cc13c75a3408001a39c6c4f0c57c038fe1975d..0701402346d0e21f1cbfdfdb45573d7547002874 100644 (file)
@@ -527,6 +527,9 @@ class _ConnectionRecord(object):
 
         if recycle:
             self.__close()
+            # ensure that if self.__connect() fails,
+            # we are not referring to the previous stale connection here
+            self.connection = None
             self.connection = self.__connect()
             self.info.clear()
             if self.__pool.dispatch.connect:
index be83693cc9acfdf17bd350e7167f74723fe9d4bd..c836bb407b4c2cc39ae47e04bf6a13f18abe7672 100644 (file)
@@ -11,10 +11,10 @@ from __future__ import absolute_import
 from ..util import py33
 
 if py33:
-    from unittest.mock import MagicMock, Mock, call, patch
+    from unittest.mock import MagicMock, Mock, call, patch, ANY
 else:
     try:
-        from mock import MagicMock, Mock, call, patch
+        from mock import MagicMock, Mock, call, patch, ANY
     except ImportError:
         raise ImportError(
             "SQLAlchemy's test suite requires the "
index e84f8803a434f30f27f6e9da8b3c133b9125df40..6fd25290d17bf802689656ef5aea499f9cd467b3 100644 (file)
@@ -8,8 +8,9 @@ from sqlalchemy.testing import eq_, assert_raises, is_not_
 from sqlalchemy.testing.engines import testing_engine
 from sqlalchemy.testing import fixtures
 import random
-from sqlalchemy.testing.mock import Mock, call
+from sqlalchemy.testing.mock import Mock, call, ANY
 import weakref
+import collections
 
 join_timeout = 10
 
@@ -1362,6 +1363,97 @@ class QueuePoolTest(PoolTestBase):
         time.sleep(1)
         self._assert_cleanup_on_pooled_reconnect(dbapi, p)
 
+    def test_connect_handler_not_called_for_recycled(self):
+        """test [ticket:3497]"""
+
+        dbapi, p = self._queuepool_dbapi_fixture(
+            pool_size=2, max_overflow=2)
+
+        canary = Mock()
+
+        c1 = p.connect()
+        c2 = p.connect()
+
+        c1.close()
+        c2.close()
+
+        dbapi.shutdown(True)
+
+        bad = p.connect()
+        p._invalidate(bad)
+        bad.close()
+        assert p._invalidate_time
+
+        event.listen(p, "connect", canary.connect)
+        event.listen(p, "checkout", canary.checkout)
+
+        assert_raises(
+            Exception,
+            p.connect
+        )
+
+        p._pool.queue = collections.deque(
+            [
+                c for c in p._pool.queue
+                if c.connection is not None
+            ]
+        )
+
+        dbapi.shutdown(False)
+        c = p.connect()
+        c.close()
+
+        eq_(
+            canary.mock_calls,
+            [
+                call.connect(ANY, ANY),
+                call.checkout(ANY, ANY, ANY)
+            ]
+        )
+
+    def test_connect_checkout_handler_always_gets_info(self):
+        """test [ticket:3497]"""
+
+        dbapi, p = self._queuepool_dbapi_fixture(
+            pool_size=2, max_overflow=2)
+
+        c1 = p.connect()
+        c2 = p.connect()
+
+        c1.close()
+        c2.close()
+
+        dbapi.shutdown(True)
+
+        bad = p.connect()
+        p._invalidate(bad)
+        bad.close()
+        assert p._invalidate_time
+
+        @event.listens_for(p, "connect")
+        def connect(conn, conn_rec):
+            conn_rec.info['x'] = True
+
+        @event.listens_for(p, "checkout")
+        def checkout(conn, conn_rec, conn_f):
+            assert 'x' in conn_rec.info
+
+        assert_raises(
+            Exception,
+            p.connect
+        )
+
+        p._pool.queue = collections.deque(
+            [
+                c for c in p._pool.queue
+                if c.connection is not None
+            ]
+        )
+
+        dbapi.shutdown(False)
+        c = p.connect()
+        c.close()
+
     def test_recycle_pool_no_race(self):
         def slow_close():
             slow_closing_connection._slow_close()