]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
- beef up the --reversetop test option to embed RandomSet throughout the ORM
authorMike Bayer <mike_mp@zzzcomputing.com>
Thu, 15 Apr 2010 04:13:48 +0000 (00:13 -0400)
committerMike Bayer <mike_mp@zzzcomputing.com>
Thu, 15 Apr 2010 04:13:48 +0000 (00:13 -0400)
- with m2m we have to go back to the previous approach of having both sides of
the DP fire off, tracking each pair of objects.   history may not be consistently present
in one side or the other
- this revealed a whole lot of issues with self-referential m2m, which are fixed

lib/sqlalchemy/orm/dependency.py
lib/sqlalchemy/sql/expression.py
lib/sqlalchemy/test/assertsql.py
lib/sqlalchemy/test/config.py
lib/sqlalchemy/test/noseplugin.py
lib/sqlalchemy/topological.py
test/orm/test_unitofworkv2.py
test/perf/large_flush.py

index 624035c686891c51656029fc1dd5c7cf21ade212..9f1b78f4ad740838fbfe30773b2c2fc2f6a65e6a 100644 (file)
@@ -101,12 +101,6 @@ class DependencyProcessor(object):
         
         """
 
-        # assertions to ensure this method isn't being
-        # called unnecessarily.  can comment these out when 
-        # code is stable
-        assert not self.post_update or not self._check_reverse(uow)
-        
-
         # locate and disable the aggregate processors
         # for this dependency
         
@@ -776,11 +770,6 @@ class DetectKeySwitch(DependencyProcessor):
 
 class ManyToManyDP(DependencyProcessor):
         
-    def per_property_preprocessors(self, uow):
-        if self._check_reverse(uow):
-            return
-        DependencyProcessor.per_property_preprocessors(self, uow)
-
     def per_property_dependencies(self, uow, parent_saves, 
                                                 child_saves, 
                                                 parent_deletes, 
@@ -860,11 +849,27 @@ class ManyToManyDP(DependencyProcessor):
                                                     child):
                             uowcommit.register_object(
                                 attributes.instance_state(c), isdelete=True)
-        
+    
+    def _get_reversed_processed_set(self, uow):
+        if not self.prop._reverse_property:
+            return None
+
+        process_key = tuple(sorted(
+                        [self.key] + 
+                        [p.key for p in self.prop._reverse_property]
+                    ))
+        return uow.memo(
+                            ('reverse_key', process_key), 
+                            set
+                        )
+
     def process_deletes(self, uowcommit, states):
         secondary_delete = []
         secondary_insert = []
         secondary_update = []
+        
+        processed = self._get_reversed_processed_set(uowcommit)
+        
         for state in states:
             history = uowcommit.get_attribute_history(
                                     state, 
@@ -872,7 +877,9 @@ class ManyToManyDP(DependencyProcessor):
                                     passive=self.passive_deletes)
             if history:
                 for child in history.non_added():
-                    if child is None:
+                    if child is None or \
+                        (processed is not None and (state, child) in processed) or \
+                        not uowcommit.session._contains_state(child):
                         continue
                     associationrow = {}
                     self._synchronize(
@@ -881,7 +888,10 @@ class ManyToManyDP(DependencyProcessor):
                                         associationrow, 
                                         False, uowcommit)
                     secondary_delete.append(associationrow)
-
+                
+                if processed is not None:
+                    processed.update((c, state) for c in history.non_added())
+                
         self._run_crud(uowcommit, secondary_insert, 
                         secondary_update, secondary_delete)
 
@@ -890,11 +900,14 @@ class ManyToManyDP(DependencyProcessor):
         secondary_insert = []
         secondary_update = []
 
+        processed = self._get_reversed_processed_set(uowcommit)
+        
         for state in states:
             history = uowcommit.get_attribute_history(state, self.key)
             if history:
                 for child in history.added:
-                    if child is None:
+                    if child is None or \
+                            (processed is not None and (state, child) in processed):
                         continue
                     associationrow = {}
                     self._synchronize(state, 
@@ -903,7 +916,9 @@ class ManyToManyDP(DependencyProcessor):
                                         False, uowcommit)
                     secondary_insert.append(associationrow)
                 for child in history.deleted:
-                    if child is None:
+                    if child is None or \
+                            (processed is not None and (state, child) in processed) or \
+                            not uowcommit.session._contains_state(child):
                         continue
                     associationrow = {}
                     self._synchronize(state, 
@@ -911,7 +926,10 @@ class ManyToManyDP(DependencyProcessor):
                                         associationrow, 
                                         False, uowcommit)
                     secondary_delete.append(associationrow)
-
+                
+                if processed is not None:
+                    processed.update((c, state) for c in history.added + history.deleted)
+                
             if not self.passive_updates and \
                     self._pks_changed(uowcommit, state):
                 if not history:
@@ -935,13 +953,14 @@ class ManyToManyDP(DependencyProcessor):
 
                     secondary_update.append(associationrow)
                     
+
         self._run_crud(uowcommit, secondary_insert, 
                         secondary_update, secondary_delete)
         
     def _run_crud(self, uowcommit, secondary_insert, 
                                         secondary_update, secondary_delete):
         connection = uowcommit.transaction.connection(self.mapper)
-
+        
         if secondary_delete:
             associationrow = secondary_delete[0]
             statement = self.secondary.delete(sql.and_(*[
index fc6b5ad97035b1bb7db242009df1003690d2f5ad..70e26cfccd630a053ff9b9a2aedd6dc142c41900 100644 (file)
@@ -2898,6 +2898,9 @@ class Join(FromClause):
           underlying :func:`select()` function.
 
         """
+        global sql_util
+        if not sql_util:
+            from sqlalchemy.sql import util as sql_util
         if fold_equivalents:
             collist = sql_util.folded_equivalents(self)
         else:
index d67de235524cc5106f0a9448144a6dbe77f1e9e4..81ef73a7cc308087b1850d03f0d5e07d9df3781e 100644 (file)
@@ -173,7 +173,8 @@ class CompiledSQL(SQLMatchRule):
         self._result = equivalent
         if not self._result:
             self._errmsg = "Testing for compiled statement %r partial params %r, " \
-                    "received %r with params %r" % (self.statement, all_params, _received_statement, all_received)
+                    "received %r with params %r" % \
+                    (self.statement, all_params, _received_statement, all_received)
     
         
 class CountStatements(AssertRule):
index efbe00fef2a1d0084505c43befd7aacdc652668f..7d528a04b048265178ce1dead9e2bed2e1f9ca3d 100644 (file)
@@ -1,6 +1,5 @@
 import optparse, os, sys, re, ConfigParser, time, warnings
 
-
 # 2to3
 import StringIO
 
@@ -166,15 +165,9 @@ post_configure['table_options'] = _set_table_options
 
 def _reverse_topological(options, file_config):
     if options.reversetop:
-        from sqlalchemy.orm import unitofwork
+        from sqlalchemy.orm import unitofwork, session, mapper, dependency
         from sqlalchemy import topological
-        class RevQueueDepSort(topological.QueueDependencySorter):
-            def __init__(self, tuples, allitems):
-                self.tuples = list(tuples)
-                self.allitems = list(allitems)
-                self.tuples.reverse()
-                self.allitems.reverse()
-        topological.QueueDependencySorter = RevQueueDepSort
-        unitofwork.DependencySorter = RevQueueDepSort
+        from sqlalchemy.test.util import RandomSet
+        topological.set = unitofwork.set = session.set = mapper.set = dependency.set = RandomSet
 post_configure['topological'] = _reverse_topological
 
index 5e8e21e8fdcc4c1ce3b6d7a9a211324b156346be..6a3106e69ddd34593809adc957ff1a1a0c99bab7 100644 (file)
@@ -51,7 +51,7 @@ class NoseSQLAlchemy(Plugin):
             callback=_engine_strategy,
             help="Engine strategy (plain or threadlocal, defaults to plain)")
         opt("--reversetop", action="store_true", dest="reversetop", default=False,
-            help="Reverse the collection ordering for topological sorts (helps "
+            help="Use a random-ordering set implementation in the ORM (helps "
                   "reveal dependency issues)")
         opt("--unhashable", action="store_true", dest="unhashable", default=False,
             help="Disallow SQLAlchemy from performing a hash() on mapped test objects.")
index 2b6eadd5d42ffa9ef135ab5b98f6660c306f0e35..6c3e90d981ffd58a9c0c91398554c363062d14e1 100644 (file)
@@ -9,10 +9,6 @@
 from sqlalchemy.exc import CircularDependencyError
 from sqlalchemy import util
 
-# this enables random orderings for iterated subsets
-# of non-dependent items.
-#from sqlalchemy.test.util import RandomSet as set
-
 __all__ = ['sort', 'sort_as_subsets', 'find_cycles']
 
 def sort_as_subsets(tuples, allitems):
index e28537b003f68ebd578070ddcdb316f3d348c331..33e5f557e4f2e13d168fe4d8340c0b5a4f7af3d0 100644 (file)
@@ -1,7 +1,7 @@
 from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
 from sqlalchemy.test import testing
 from sqlalchemy.test.schema import Table, Column
-from sqlalchemy import Integer, String, ForeignKey
+from sqlalchemy import Integer, String, ForeignKey, func
 from test.orm import _fixtures, _base
 from sqlalchemy.orm import mapper, relationship, backref, \
                             create_session, unitofwork, attributes
@@ -585,50 +585,20 @@ class SingleCycleM2MTest(_base.MappedTest, testing.AssertsExecutionResults, Asse
         
         sess.add_all([n1, n2, n3, n4, n5])
         
-        self.assert_sql_execution(
-                testing.db,
-                sess.flush,
-
-                CompiledSQL(
-                    "INSERT INTO nodes (data, favorite_node_id) "
-                    "VALUES (:data, :favorite_node_id)",
-                    {'data': 'n2', 'favorite_node_id': None}
-                ),
-                CompiledSQL(
-                    "INSERT INTO nodes (data, favorite_node_id) "
-                    "VALUES (:data, :favorite_node_id)", 
-                    {'data': 'n3', 'favorite_node_id': None}),
-                CompiledSQL("INSERT INTO nodes (data, favorite_node_id) "
-                            "VALUES (:data, :favorite_node_id)", 
-                    lambda ctx:{'data': 'n5', 'favorite_node_id': n2.id}),
-                CompiledSQL(
-                    "INSERT INTO nodes (data, favorite_node_id) "
-                    "VALUES (:data, :favorite_node_id)", 
-                    lambda ctx:{'data': 'n4', 'favorite_node_id': n3.id}),
-                CompiledSQL(
-                    "INSERT INTO node_to_nodes (left_node_id, right_node_id) "
-                    "VALUES (:left_node_id, :right_node_id)", 
-                    lambda ctx:[
-                        {'right_node_id': n5.id, 'left_node_id': n3.id}, 
-                        {'right_node_id': n4.id, 'left_node_id': n3.id}, 
-                        {'right_node_id': n3.id, 'left_node_id': n2.id}, 
-                        {'right_node_id': n5.id, 'left_node_id': n2.id}
-                    ]
-                    ),
-                CompiledSQL(
-                    "INSERT INTO nodes (data, favorite_node_id) "
-                    "VALUES (:data, :favorite_node_id)", 
-                    lambda ctx:[{'data': 'n1', 'favorite_node_id': n5.id}]
-                ),
-                CompiledSQL(
-                    "INSERT INTO node_to_nodes (left_node_id, right_node_id) "
-                    "VALUES (:left_node_id, :right_node_id)", 
-                    lambda ctx:[
-                        {'right_node_id': n2.id, 'left_node_id': n1.id}, 
-                        {'right_node_id': n3.id, 'left_node_id': n1.id}, 
-                        {'right_node_id': n4.id, 'left_node_id': n1.id}
-                    ])
-            )
+        # can't really assert the SQL on this easily
+        # since there's too many ways to insert the rows.
+        # so check the end result
+        sess.flush()
+        eq_(
+            sess.query(node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id).\
+                    order_by(node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id).\
+                    all(), 
+            sorted([
+                    (n1.id, n2.id), (n1.id, n3.id), (n1.id, n4.id), 
+                    (n2.id, n3.id), (n2.id, n5.id), 
+                    (n3.id, n5.id), (n3.id, n4.id)
+                ])
+        )
 
         sess.delete(n1)
         
@@ -653,7 +623,7 @@ class SingleCycleM2MTest(_base.MappedTest, testing.AssertsExecutionResults, Asse
         
         for n in [n2, n3, n4, n5]:
             sess.delete(n)
-            
+        
         # load these collections
         # outside of the flush() below
         n4.children
index 431a289449456433f7b4c75264a97c3209f9aa32..5dd6f610fd4eb1f54cf89cd798edfd9cb0f82c81 100644 (file)
@@ -70,7 +70,7 @@ mapper(A, a_table, inherits=Object, polymorphic_identity='A',
 
 SA_Metadata.create_all(engine)
 
-@profiling.profiled('large_flush', always=True, sort=['file'])
+@profiling.profiled('large_flush', always=True, sort=['cumulative'])
 def generate_error():
     q = Q()
     for j in range(100): #at 306 the error does not pop out (depending on recursion depth)