- server side cursor support fully functional in postgres
[ticket:514].
- improved framework for auto-invalidation of connections that have
- lost their underlying database - the error catching/invalidate
- step is totally moved to the connection pool. #516
+ lost their underlying database, via dialect-specific detection
+ of exceptions corresponding to that database's disconnect
+ related error messages. Additionally, when a "connection no
+ longer open" condition is detected, the entire connection pool
+ is discarded and replaced with a new instance. #516
- sql:
- preliminary support for unicode table names, column names and
SQL statements added, for databases which can support them.
def create_connection(self):
return _ConnectionRecord(self)
+
+ def recreate(self):
+ """return a new instance of this Pool's class with identical creation arguments."""
+ raise NotImplementedError()
+ def dispose(self):
+ """dispose of this pool.
+
+ this method leaves the possibility of checked-out connections remaining opened,
+ so it is advised to not reuse the pool once dispose() is called, and to instead
+ use a new pool constructed by the recreate() method.
+ """
+ raise NotImplementedError()
+
def connect(self):
if not self._use_threadlocal:
return _ConnectionFairy(self).checkout()
def log(self, msg):
self.logger.info(msg)
- def dispose(self):
- raise NotImplementedError()
-
class _ConnectionRecord(object):
def __init__(self, pool):
self.__pool = pool
self.connection = self.__connect()
def close(self):
- self.__pool.log("Closing connection %s" % repr(self.connection))
- self.connection.close()
+ if self.connection is not None:
+ self.__pool.log("Closing connection %s" % repr(self.connection))
+ self.connection.close()
def invalidate(self, e=None):
if e is not None:
self._conns = {}
self.size = pool_size
+ def recreate(self):
+ self.log("Pool recreating")
+ return SingletonThreadPool(self._creator, pool_size=self.size, recycle=self._recycle, echo=self.echo, use_threadlocal=self._use_threadlocal, auto_close_cursors=self.auto_close_cursors, disallow_open_cursors=self.disallow_open_cursors)
+
def dispose(self):
+ """dispose of this pool.
+
+ this method leaves the possibility of checked-out connections remaining opened,
+ so it is advised to not reuse the pool once dispose() is called, and to instead
+ use a new pool constructed by the recreate() method.
+ """
for key, conn in self._conns.items():
try:
conn.close()
self._max_overflow = max_overflow
self._timeout = timeout
+ def recreate(self):
+ self.log("Pool recreating")
+ return QueuePool(self._creator, pool_size=self._pool.maxsize, max_overflow=self._max_overflow, timeout=self._timeout, recycle=self._recycle, echo=self.echo, use_threadlocal=self._use_threadlocal, auto_close_cursors=self.auto_close_cursors, disallow_open_cursors=self.disallow_open_cursors)
+
def do_return_conn(self, conn):
try:
self._pool.put(conn, False)
c1 = p.connect()
assert c1.connection.id != c_id
+ def test_recreate(self):
+ dbapi = MockDBAPI()
+ p = pool.QueuePool(creator = lambda: dbapi.connect('foo.db'), pool_size = 1, max_overflow = 0, use_threadlocal = False)
+ p2 = p.recreate()
+ assert p2.size() == 1
+ assert p2._use_threadlocal is False
+ assert p2._max_overflow == 0
+
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator = lambda: dbapi.connect('foo.db'), pool_size = 1, max_overflow = 0, use_threadlocal = False)