"""
- # assertions to ensure this method isn't being
- # called unnecessarily. can comment these out when
- # code is stable
- assert not self.post_update or not self._check_reverse(uow)
-
-
# locate and disable the aggregate processors
# for this dependency
class ManyToManyDP(DependencyProcessor):
- def per_property_preprocessors(self, uow):
- if self._check_reverse(uow):
- return
- DependencyProcessor.per_property_preprocessors(self, uow)
-
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child):
uowcommit.register_object(
attributes.instance_state(c), isdelete=True)
-
+
+ def _get_reversed_processed_set(self, uow):
+ if not self.prop._reverse_property:
+ return None
+
+ process_key = tuple(sorted(
+ [self.key] +
+ [p.key for p in self.prop._reverse_property]
+ ))
+ return uow.memo(
+ ('reverse_key', process_key),
+ set
+ )
+
def process_deletes(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
+
+ processed = self._get_reversed_processed_set(uowcommit)
+
for state in states:
history = uowcommit.get_attribute_history(
state,
passive=self.passive_deletes)
if history:
for child in history.non_added():
- if child is None:
+ if child is None or \
+ (processed is not None and (state, child) in processed) or \
+ not uowcommit.session._contains_state(child):
continue
associationrow = {}
self._synchronize(
associationrow,
False, uowcommit)
secondary_delete.append(associationrow)
-
+
+ if processed is not None:
+ processed.update((c, state) for c in history.non_added())
+
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
secondary_insert = []
secondary_update = []
+ processed = self._get_reversed_processed_set(uowcommit)
+
for state in states:
history = uowcommit.get_attribute_history(state, self.key)
if history:
for child in history.added:
- if child is None:
+ if child is None or \
+ (processed is not None and (state, child) in processed):
continue
associationrow = {}
self._synchronize(state,
False, uowcommit)
secondary_insert.append(associationrow)
for child in history.deleted:
- if child is None:
+ if child is None or \
+ (processed is not None and (state, child) in processed) or \
+ not uowcommit.session._contains_state(child):
continue
associationrow = {}
self._synchronize(state,
associationrow,
False, uowcommit)
secondary_delete.append(associationrow)
-
+
+ if processed is not None:
+ processed.update((c, state) for c in history.added + history.deleted)
+
if not self.passive_updates and \
self._pks_changed(uowcommit, state):
if not history:
secondary_update.append(associationrow)
+
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def _run_crud(self, uowcommit, secondary_insert,
secondary_update, secondary_delete):
connection = uowcommit.transaction.connection(self.mapper)
-
+
if secondary_delete:
associationrow = secondary_delete[0]
statement = self.secondary.delete(sql.and_(*[
underlying :func:`select()` function.
"""
+ global sql_util
+ if not sql_util:
+ from sqlalchemy.sql import util as sql_util
if fold_equivalents:
collist = sql_util.folded_equivalents(self)
else:
self._result = equivalent
if not self._result:
self._errmsg = "Testing for compiled statement %r partial params %r, " \
- "received %r with params %r" % (self.statement, all_params, _received_statement, all_received)
+ "received %r with params %r" % \
+ (self.statement, all_params, _received_statement, all_received)
class CountStatements(AssertRule):
import optparse, os, sys, re, ConfigParser, time, warnings
-
# 2to3
import StringIO
def _reverse_topological(options, file_config):
if options.reversetop:
- from sqlalchemy.orm import unitofwork
+ from sqlalchemy.orm import unitofwork, session, mapper, dependency
from sqlalchemy import topological
- class RevQueueDepSort(topological.QueueDependencySorter):
- def __init__(self, tuples, allitems):
- self.tuples = list(tuples)
- self.allitems = list(allitems)
- self.tuples.reverse()
- self.allitems.reverse()
- topological.QueueDependencySorter = RevQueueDepSort
- unitofwork.DependencySorter = RevQueueDepSort
+ from sqlalchemy.test.util import RandomSet
+ topological.set = unitofwork.set = session.set = mapper.set = dependency.set = RandomSet
post_configure['topological'] = _reverse_topological
callback=_engine_strategy,
help="Engine strategy (plain or threadlocal, defaults to plain)")
opt("--reversetop", action="store_true", dest="reversetop", default=False,
- help="Reverse the collection ordering for topological sorts (helps "
+ help="Use a random-ordering set implementation in the ORM (helps "
"reveal dependency issues)")
opt("--unhashable", action="store_true", dest="unhashable", default=False,
help="Disallow SQLAlchemy from performing a hash() on mapped test objects.")
from sqlalchemy.exc import CircularDependencyError
from sqlalchemy import util
-# this enables random orderings for iterated subsets
-# of non-dependent items.
-#from sqlalchemy.test.util import RandomSet as set
-
__all__ = ['sort', 'sort_as_subsets', 'find_cycles']
def sort_as_subsets(tuples, allitems):
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy.test import testing
from sqlalchemy.test.schema import Table, Column
-from sqlalchemy import Integer, String, ForeignKey
+from sqlalchemy import Integer, String, ForeignKey, func
from test.orm import _fixtures, _base
from sqlalchemy.orm import mapper, relationship, backref, \
create_session, unitofwork, attributes
sess.add_all([n1, n2, n3, n4, n5])
- self.assert_sql_execution(
- testing.db,
- sess.flush,
-
- CompiledSQL(
- "INSERT INTO nodes (data, favorite_node_id) "
- "VALUES (:data, :favorite_node_id)",
- {'data': 'n2', 'favorite_node_id': None}
- ),
- CompiledSQL(
- "INSERT INTO nodes (data, favorite_node_id) "
- "VALUES (:data, :favorite_node_id)",
- {'data': 'n3', 'favorite_node_id': None}),
- CompiledSQL("INSERT INTO nodes (data, favorite_node_id) "
- "VALUES (:data, :favorite_node_id)",
- lambda ctx:{'data': 'n5', 'favorite_node_id': n2.id}),
- CompiledSQL(
- "INSERT INTO nodes (data, favorite_node_id) "
- "VALUES (:data, :favorite_node_id)",
- lambda ctx:{'data': 'n4', 'favorite_node_id': n3.id}),
- CompiledSQL(
- "INSERT INTO node_to_nodes (left_node_id, right_node_id) "
- "VALUES (:left_node_id, :right_node_id)",
- lambda ctx:[
- {'right_node_id': n5.id, 'left_node_id': n3.id},
- {'right_node_id': n4.id, 'left_node_id': n3.id},
- {'right_node_id': n3.id, 'left_node_id': n2.id},
- {'right_node_id': n5.id, 'left_node_id': n2.id}
- ]
- ),
- CompiledSQL(
- "INSERT INTO nodes (data, favorite_node_id) "
- "VALUES (:data, :favorite_node_id)",
- lambda ctx:[{'data': 'n1', 'favorite_node_id': n5.id}]
- ),
- CompiledSQL(
- "INSERT INTO node_to_nodes (left_node_id, right_node_id) "
- "VALUES (:left_node_id, :right_node_id)",
- lambda ctx:[
- {'right_node_id': n2.id, 'left_node_id': n1.id},
- {'right_node_id': n3.id, 'left_node_id': n1.id},
- {'right_node_id': n4.id, 'left_node_id': n1.id}
- ])
- )
+ # can't really assert the SQL on this easily
+ # since there's too many ways to insert the rows.
+ # so check the end result
+ sess.flush()
+ eq_(
+ sess.query(node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id).\
+ order_by(node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id).\
+ all(),
+ sorted([
+ (n1.id, n2.id), (n1.id, n3.id), (n1.id, n4.id),
+ (n2.id, n3.id), (n2.id, n5.id),
+ (n3.id, n5.id), (n3.id, n4.id)
+ ])
+ )
sess.delete(n1)
for n in [n2, n3, n4, n5]:
sess.delete(n)
-
+
# load these collections
# outside of the flush() below
n4.children
SA_Metadata.create_all(engine)
-@profiling.profiled('large_flush', always=True, sort=['file'])
+@profiling.profiled('large_flush', always=True, sort=['cumulative'])
def generate_error():
q = Q()
for j in range(100): #at 306 the error does not pop out (depending on recursion depth)