]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
- adjustments to pool locking test to fail on OSX
authorMike Bayer <mike_mp@zzzcomputing.com>
Sat, 30 Jun 2007 16:35:54 +0000 (16:35 +0000)
committerMike Bayer <mike_mp@zzzcomputing.com>
Sat, 30 Jun 2007 16:35:54 +0000 (16:35 +0000)
- restored conditional locking to pool, for all conditions of max_overflow > -1

lib/sqlalchemy/orm/__init__.py
lib/sqlalchemy/pool.py
test/engine/pool.py

index 36830e8619821f79543f820853a77bde9cc107ea..7ef2da897d479775b7d33b9446c3345c7422468d 100644 (file)
@@ -124,6 +124,8 @@ def clear_mappers():
             if hasattr(mapper.class_, 'c'):
                 del mapper.class_.c
         mapper_registry.clear()
+        # TODO: either dont use ArgSingleton, or
+        # find a way to clear only ClassKey instances from it
         sautil.ArgSingleton.instances.clear()
     finally:
         mapperlib._COMPILE_MUTEX.release()
index 0377708e9322d851a8244775a58aaa5d91f3610a..c3a317c3f3fba845f223602a3ff3158b6c8f6bd6 100644 (file)
@@ -470,7 +470,7 @@ class QueuePool(Pool):
         self._overflow = 0 - pool_size
         self._max_overflow = max_overflow
         self._timeout = timeout
-        self._overflow_lock = threading.Lock()
+        self._overflow_lock = self._max_overflow > -1 and threading.Lock() or None
 
     def recreate(self):
         self.log("Pool recreating")
@@ -480,24 +480,29 @@ class QueuePool(Pool):
         try:
             self._pool.put(conn, False)
         except Queue.Full:
-            self._overflow_lock.acquire()
-            try:
+            if self._overflow_lock is None:
                 self._overflow -= 1
-            finally:
-                self._overflow_lock.release()
+            else:
+                self._overflow_lock.acquire()
+                try:
+                    self._overflow -= 1
+                finally:
+                    self._overflow_lock.release()
 
     def do_get(self):
         try:
             return self._pool.get(self._max_overflow > -1 and self._overflow >= self._max_overflow, self._timeout)
         except Queue.Empty:
-            self._overflow_lock.acquire()
+            if self._overflow_lock is not None:
+                self._overflow_lock.acquire()
             try:
                 if self._max_overflow > -1 and self._overflow >= self._max_overflow:
                     raise exceptions.TimeoutError("QueuePool limit of size %d overflow %d reached, connection timed out" % (self.size(), self.overflow()))
                 con = self.create_connection()
                 self._overflow += 1
             finally:
-                self._overflow_lock.release()
+                if self._overflow_lock is not None:
+                    self._overflow_lock.release()
             return con
 
     def dispose(self):
index 17a0b369a89188d1e0081db959dfc108b6200248..924dddeac9c107bf6265fd3b1838b7eab55c9365 100644 (file)
@@ -129,8 +129,11 @@ class PoolTest(PersistTest):
             assert int(time.time() - now) == 2
 
     def _test_overflow(self, thread_count, max_overflow):
-        # i cant really get this to fail on OSX.  linux? windows ?
-        p = pool.QueuePool(creator=lambda: mock_dbapi.connect('foo.db'),
+        def creator():
+            time.sleep(.05)
+            return mock_dbapi.connect('foo.db')
+            
+        p = pool.QueuePool(creator=creator,
                            pool_size=3, timeout=2,
                            max_overflow=max_overflow)
         peaks = []
@@ -138,8 +141,8 @@ class PoolTest(PersistTest):
             for i in range(10):
                 try:
                     con = p.connect()
-                    peaks.append(p.overflow())
                     time.sleep(.005)
+                    peaks.append(p.overflow())
                     con.close()
                     del con
                 except exceptions.TimeoutError: