]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
- merged some tweaks to the pool overflow sync from trunk r2819
authorMike Bayer <mike_mp@zzzcomputing.com>
Sat, 30 Jun 2007 01:31:56 +0000 (01:31 +0000)
committerMike Bayer <mike_mp@zzzcomputing.com>
Sat, 30 Jun 2007 01:31:56 +0000 (01:31 +0000)
- merged vertical.py delete tests from trunk r2820, fixed collection
to new style collection

CHANGES
examples/vertical/vertical.py
lib/sqlalchemy/pool.py
test/engine/pool.py

diff --git a/CHANGES b/CHANGES
index b40adbe94b75700e2ff0a3c7fd08a0d286f391ff..80311f746682dcb2f5174885851968e65832f637 100644 (file)
--- a/CHANGES
+++ b/CHANGES
     - MetaData and all SchemaItems are safe to use with pickle.  slow
       table reflections can be dumped into a pickled file to be reused later.
       Just reconnect the engine to the metadata after unpickling. [ticket:619]
+    - added a mutex to QueuePool's "overflow" calculation to prevent a race 
+      condition that can bypass max_overflow
     - fixed grouping of compound selects to give correct results. will break
       on sqlite in some cases, but those cases were producing incorrect
       results anyway, sqlite doesn't support grouped compound selects
index a6ee1742895879eb251b0364fff31b8a911d6716..f470fbd191e4cc8103c21bbbf736028a827125d2 100644 (file)
@@ -3,6 +3,8 @@ represented in distinct database rows.  This allows objects to be created with d
 fields that are all persisted in a normalized fashion."""
 
 from sqlalchemy import *
+from sqlalchemy.orm import *
+from sqlalchemy.orm.collections import mapped_collection
 import datetime
 
 e = BoundMetaData('sqlite://', echo=True)
@@ -37,14 +39,6 @@ entity_values = Table('entity_values', e,
 
 e.create_all()
 
-class EntityDict(dict):
-    """this is a dictionary that implements an append() and an __iter__ method.
-    such a dictionary can be used with SQLAlchemy list-based attributes."""
-    def append(self, entityvalue):
-        self[entityvalue.field.name] = entityvalue
-    def __iter__(self):
-        return iter(self.values())
-    
 class Entity(object):
     """represents an Entity.  The __getattr__ method is overridden to search the
     object's _entities dictionary for the appropriate value, and the __setattribute__
@@ -123,7 +117,7 @@ mapper(
 )
 
 mapper(Entity, entities, properties = {
-    '_entities' : relation(EntityValue, lazy=False, cascade='save-update', collection_class=EntityDict)
+    '_entities' : relation(EntityValue, lazy=False, cascade='all', collection_class=mapped_collection(lambda entityvalue: entityvalue.field.name))
 })
 
 # create two entities.  the objects can be used about as regularly as
@@ -174,3 +168,7 @@ session.clear()
 entities = session.query(Entity).select()
 for entity in entities:
     print entity.title, entity.name, entity.price, entity.data
+
+for entity in entities:
+    session.delete(entity)
+session.flush()
\ No newline at end of file
index 1ae787c7c2a94e4cfb0a108c38fc586bb3e76283..0377708e9322d851a8244775a58aaa5d91f3610a 100644 (file)
@@ -470,7 +470,7 @@ class QueuePool(Pool):
         self._overflow = 0 - pool_size
         self._max_overflow = max_overflow
         self._timeout = timeout
-        self._overflow_lock = max_overflow > 0 and threading.Lock() or None
+        self._overflow_lock = threading.Lock()
 
     def recreate(self):
         self.log("Pool recreating")
@@ -480,27 +480,24 @@ class QueuePool(Pool):
         try:
             self._pool.put(conn, False)
         except Queue.Full:
-            if not self._overflow_lock:
-                self._overflow -= 1
-            else:
-                self._overflow_lock.acquire()
+            self._overflow_lock.acquire()
+            try:
                 self._overflow -= 1
+            finally:
                 self._overflow_lock.release()
 
     def do_get(self):
         try:
             return self._pool.get(self._max_overflow > -1 and self._overflow >= self._max_overflow, self._timeout)
         except Queue.Empty:
-            if self._overflow_lock:
-                self._overflow_lock.acquire()
+            self._overflow_lock.acquire()
             try:
                 if self._max_overflow > -1 and self._overflow >= self._max_overflow:
                     raise exceptions.TimeoutError("QueuePool limit of size %d overflow %d reached, connection timed out" % (self.size(), self.overflow()))
                 con = self.create_connection()
                 self._overflow += 1
             finally:
-                if self._overflow_lock:
-                    self._overflow_lock.release()
+                self._overflow_lock.release()
             return con
 
     def dispose(self):
index 7f881fb5564c98e716cb2033c9c10f91ac5f3838..17a0b369a89188d1e0081db959dfc108b6200248 100644 (file)
@@ -1,7 +1,7 @@
 import testbase
 from testbase import PersistTest
 import unittest, sys, os, time
-import threading
+import threading, thread
 
 import sqlalchemy.pool as pool
 import sqlalchemy.exceptions as exceptions
@@ -129,6 +129,7 @@ class PoolTest(PersistTest):
             assert int(time.time() - now) == 2
 
     def _test_overflow(self, thread_count, max_overflow):
+        # i cant really get this to fail on OSX.  linux? windows ?
         p = pool.QueuePool(creator=lambda: mock_dbapi.connect('foo.db'),
                            pool_size=3, timeout=2,
                            max_overflow=max_overflow)
@@ -138,6 +139,7 @@ class PoolTest(PersistTest):
                 try:
                     con = p.connect()
                     peaks.append(p.overflow())
+                    time.sleep(.005)
                     con.close()
                     del con
                 except exceptions.TimeoutError:
@@ -153,10 +155,10 @@ class PoolTest(PersistTest):
         self.assert_(max(peaks) <= max_overflow)
 
     def test_no_overflow(self):
-        self._test_overflow(20, 0)
+        self._test_overflow(40, 0)
 
     def test_max_overflow(self):
-        self._test_overflow(20, 5)
+        self._test_overflow(40, 5)
         
     def test_mixed_close(self):
         p = pool.QueuePool(creator = lambda: mock_dbapi.connect('foo.db'), pool_size = 3, max_overflow = -1, use_threadlocal = True)