- MetaData and all SchemaItems are safe to use with pickle. slow
table reflections can be dumped into a pickled file to be reused later.
Just reconnect the engine to the metadata after unpickling. [ticket:619]
+ - added a mutex to QueuePool's "overflow" calculation to prevent a race
+ condition that can bypass max_overflow
- fixed grouping of compound selects to give correct results. will break
on sqlite in some cases, but those cases were producing incorrect
results anyway, sqlite doesn't support grouped compound selects
fields that are all persisted in a normalized fashion."""
from sqlalchemy import *
+from sqlalchemy.orm import *
+from sqlalchemy.orm.collections import mapped_collection
import datetime
e = BoundMetaData('sqlite://', echo=True)
e.create_all()
-class EntityDict(dict):
- """this is a dictionary that implements an append() and an __iter__ method.
- such a dictionary can be used with SQLAlchemy list-based attributes."""
- def append(self, entityvalue):
- self[entityvalue.field.name] = entityvalue
- def __iter__(self):
- return iter(self.values())
-
class Entity(object):
"""represents an Entity. The __getattr__ method is overridden to search the
object's _entities dictionary for the appropriate value, and the __setattribute__
)
mapper(Entity, entities, properties = {
- '_entities' : relation(EntityValue, lazy=False, cascade='save-update', collection_class=EntityDict)
+ '_entities' : relation(EntityValue, lazy=False, cascade='all', collection_class=mapped_collection(lambda entityvalue: entityvalue.field.name))
})
# create two entities. the objects can be used about as regularly as
entities = session.query(Entity).select()
for entity in entities:
print entity.title, entity.name, entity.price, entity.data
+
+for entity in entities:
+ session.delete(entity)
+session.flush()
\ No newline at end of file
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
- self._overflow_lock = max_overflow > 0 and threading.Lock() or None
+ self._overflow_lock = threading.Lock()
def recreate(self):
self.log("Pool recreating")
try:
self._pool.put(conn, False)
except Queue.Full:
- if not self._overflow_lock:
- self._overflow -= 1
- else:
- self._overflow_lock.acquire()
+ self._overflow_lock.acquire()
+ try:
self._overflow -= 1
+ finally:
self._overflow_lock.release()
def do_get(self):
try:
return self._pool.get(self._max_overflow > -1 and self._overflow >= self._max_overflow, self._timeout)
except Queue.Empty:
- if self._overflow_lock:
- self._overflow_lock.acquire()
+ self._overflow_lock.acquire()
try:
if self._max_overflow > -1 and self._overflow >= self._max_overflow:
raise exceptions.TimeoutError("QueuePool limit of size %d overflow %d reached, connection timed out" % (self.size(), self.overflow()))
con = self.create_connection()
self._overflow += 1
finally:
- if self._overflow_lock:
- self._overflow_lock.release()
+ self._overflow_lock.release()
return con
def dispose(self):
import testbase
from testbase import PersistTest
import unittest, sys, os, time
-import threading
+import threading, thread
import sqlalchemy.pool as pool
import sqlalchemy.exceptions as exceptions
assert int(time.time() - now) == 2
def _test_overflow(self, thread_count, max_overflow):
+ # i cant really get this to fail on OSX. linux? windows ?
p = pool.QueuePool(creator=lambda: mock_dbapi.connect('foo.db'),
pool_size=3, timeout=2,
max_overflow=max_overflow)
try:
con = p.connect()
peaks.append(p.overflow())
+ time.sleep(.005)
con.close()
del con
except exceptions.TimeoutError:
self.assert_(max(peaks) <= max_overflow)
def test_no_overflow(self):
- self._test_overflow(20, 0)
+ self._test_overflow(40, 0)
def test_max_overflow(self):
- self._test_overflow(20, 5)
+ self._test_overflow(40, 5)
def test_mixed_close(self):
p = pool.QueuePool(creator = lambda: mock_dbapi.connect('foo.db'), pool_size = 3, max_overflow = -1, use_threadlocal = True)