]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
- added missing coverage for self-referential many-to-many flushes
authorMike Bayer <mike_mp@zzzcomputing.com>
Tue, 6 Apr 2010 22:28:40 +0000 (18:28 -0400)
committerMike Bayer <mike_mp@zzzcomputing.com>
Tue, 6 Apr 2010 22:28:40 +0000 (18:28 -0400)
- some other areas where per-state deps are called and an empty result returned
are still lacking coverage.

lib/sqlalchemy/orm/dependency.py
lib/sqlalchemy/orm/unitofwork.py
lib/sqlalchemy/test/assertsql.py
lib/sqlalchemy/topological.py
test/orm/test_naturalpks.py
test/orm/test_unitofworkv2.py

index 1018c202955c73d128e11bd86c0c9c5ea6ba4a3f..d02776dcee6ebbac91ec7e1e7ab66b0c879be467 100644 (file)
@@ -110,7 +110,8 @@ class DependencyProcessor(object):
         """
 
         if self.post_update and self._check_reverse(uow):
-            return
+            # TODO: coverage here
+            return iter([])
 
         # locate and disable the aggregate processors
         # for this dependency
@@ -684,7 +685,8 @@ class DetectKeySwitch(DependencyProcessor):
             ])
 
     def per_state_flush_actions(self, uow, states, isdelete):
-        pass
+        # TODO: coverage here
+        return iter([])
         
     def presort_deletes(self, uowcommit, states):
         assert False
@@ -741,9 +743,9 @@ class ManyToManyDP(DependencyProcessor):
             
     def per_state_flush_actions(self, uow, states, isdelete):
         if self._check_reverse(uow):
-            return
+            return iter([])
         else:
-            DependencyProcessor.\
+            return DependencyProcessor.\
                     per_state_flush_actions(self, uow, states, isdelete)
             
     def per_property_dependencies(self, uow, parent_saves, 
@@ -777,15 +779,20 @@ class ManyToManyDP(DependencyProcessor):
                                     after_save, before_delete, 
                                     isdelete, childisdelete):
         if not isdelete:
-            uow.dependencies.update([
-                (save_parent, after_save),
-                (after_save, child_action),
-                (save_parent, child_action)
-            ])
+            if childisdelete:
+                uow.dependencies.update([
+                    (save_parent, after_save),
+                    (after_save, child_action),
+                ])
+            else:
+                uow.dependencies.update([
+                    (save_parent, after_save),
+                    (child_action, after_save),
+                ])
         else:
             uow.dependencies.update([
                 (before_delete, child_action),
-                (child_action, delete_parent)
+                (before_delete, delete_parent)
             ])
         
     def presort_deletes(self, uowcommit, states):
index ca8c31e8649d03132a706b829521a8a5994f0b1c..85ed790d9f89a864df4f6ce70f047f3e16da0515 100644 (file)
@@ -4,19 +4,11 @@
 # This module is part of SQLAlchemy and is released under
 # the MIT License: http://www.opensource.org/licenses/mit-license.php
 
-"""The internals for the Unit Of Work system.
+"""The internals for the unit of work system.
 
-Includes hooks into the attributes package enabling the routing of
-change events to Unit Of Work objects, as well as the flush()
-mechanism which creates a dependency structure that executes change
-operations.
-
-A Unit of Work is essentially a system of maintaining a graph of
-in-memory objects and their modified state.  Objects are maintained as
-unique against their primary key identity using an *identity map*
-pattern.  The Unit of Work then maintains lists of objects that are
-new, dirty, or deleted and provides the capability to flush all those
-changes at once.
+The session's flush() process passes objects to a contextual object
+here, which assembles flush tasks based on mappers and their properties,
+organizes them in order of dependency, and executes.
 
 """
 
@@ -79,11 +71,6 @@ class UOWEventHandler(interfaces.AttributeExtension):
 
 
 class UOWTransaction(object):
-    """Handles the details of organizing and executing transaction
-    tasks during a UnitOfWork object's flush() operation.
-
-    """
-
     def __init__(self, session):
         self.session = session
         self.mapper_flush_opts = session._mapper_flush_opts
index 1417c2e4355fe95a6504bb41a7d02231380a3723..81a6191a181e0794b7d555d8872228f2172904ed 100644 (file)
@@ -156,12 +156,15 @@ class CompiledSQL(SQLMatchRule):
             if not isinstance(params, list):
                 params = [params]
             
-            # do a positive compare only
-            for param, received in zip(params, _received_parameters):
-                for k, v in param.iteritems():
-                    if k not in received or received[k] != v:
-                        equivalent = False
-                        break
+            while params:
+                param = params.pop(0)
+                if param not in _received_parameters:
+                    equivalent = False
+                    break
+                else:
+                    _received_parameters.remove(param)
+            if _received_parameters:
+                equivalent = False
         else:
             params = {}
 
index fbde7c6014cb8a9aa3eb407b5ef74ef4a77f6d54..a6328a5e4ae3e520c5f33f3700ffe9964c41cb25 100644 (file)
@@ -9,6 +9,9 @@
 from sqlalchemy.exc import CircularDependencyError
 from sqlalchemy import util
 
+# this enables random orderings for iterated subsets
+# of non-dependent items.
+#from sqlalchemy.test.util import RandomSet as set
 
 __all__ = ['sort', 'sort_as_subsets', 'find_cycles']
 
index 216c10f1a3a55ce99cb54188a90643f599af71d0..f8116706266e856e5287b957fc7b8c7635d63024 100644 (file)
@@ -420,7 +420,7 @@ class ReversePKsTest(_base.MappedTest):
         assert session.query(User).get([1, EDITABLE]) is a_editable
 
     
-class SelfRefTest(_base.MappedTest):
+class SelfReferentialTest(_base.MappedTest):
     __unsupported_on__ = ('mssql',) # mssql doesn't allow ON UPDATE on self-referential keys
 
     @classmethod
@@ -441,7 +441,7 @@ class SelfRefTest(_base.MappedTest):
             pass
 
     @testing.resolve_artifact_names
-    def test_onetomany(self):
+    def test_one_to_many(self):
         mapper(Node, nodes, properties={
             'children': relationship(Node,
                                  backref=sa.orm.backref('parentnode',
@@ -465,6 +465,30 @@ class SelfRefTest(_base.MappedTest):
              for n in sess.query(Node).filter(
                  Node.name.in_(['n11', 'n12', 'n13']))])
 
+    @testing.resolve_artifact_names
+    def test_many_to_one(self):
+        mapper(Node, nodes, properties={
+            'parentnode':relationship(Node, 
+                            remote_side=nodes.c.name, 
+                            passive_updates=True)
+            }
+        )
+
+        sess = create_session()
+        n1 = Node(name='n1')
+        n11 = Node(name='n11', parentnode=n1)
+        n12 = Node(name='n12', parentnode=n1)
+        n13 = Node(name='n13', parentnode=n1)
+        sess.add_all([n1, n11, n12, n13])
+        sess.flush()
+
+        n1.name = 'new n1'
+        sess.flush()
+        eq_(['new n1', 'new n1', 'new n1'],
+             [n.parent
+              for n in sess.query(Node).filter(
+                  Node.name.in_(['n11', 'n12', 'n13']))])
+
 
 class NonPKCascadeTest(_base.MappedTest):
     @classmethod
index 42d9fd90f3a5dbd1bf53fc114bc1ccb89a98fce5..e8b7d98370aa9b51e090eaf985d1eb375ebb3ace 100644 (file)
@@ -1,6 +1,8 @@
 from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
 from sqlalchemy.test import testing
-from test.orm import _fixtures
+from sqlalchemy.test.schema import Table, Column
+from sqlalchemy import Integer, String, ForeignKey
+from test.orm import _fixtures, _base
 from sqlalchemy.orm import mapper, relationship, backref, create_session
 from sqlalchemy.test.assertsql import AllOf, CompiledSQL
 
@@ -385,4 +387,155 @@ class SingleCycleTest(UOWTest):
 #                testing.db,
  #               sess.flush,
  #       )
+
+class SingleCycleM2MTest(_base.MappedTest, testing.AssertsExecutionResults):
+
+    @classmethod
+    def define_tables(cls, metadata):
+        nodes = Table('nodes', metadata,
+            Column('id', Integer, 
+                            primary_key=True, 
+                            test_needs_autoincrement=True),
+            Column('data', String(30)),
+            Column('favorite_node_id', Integer, ForeignKey('nodes.id'))
+        )
+        
+        node_to_nodes =Table('node_to_nodes', metadata,
+            Column('left_node_id', Integer, 
+                            ForeignKey('nodes.id'),primary_key=True),
+            Column('right_node_id', Integer, 
+                            ForeignKey('nodes.id'),primary_key=True),
+            )
+    
+    @testing.resolve_artifact_names
+    def test_many_to_many_one(self):
+        class Node(Base):
+            pass
+        
+        mapper(Node, nodes, properties={
+            'children':relationship(Node, secondary=node_to_nodes,
+                primaryjoin=nodes.c.id==node_to_nodes.c.left_node_id,
+                secondaryjoin=nodes.c.id==node_to_nodes.c.right_node_id,
+                backref='parents'
+            ),
+            'favorite':relationship(Node, remote_side=nodes.c.id)
+        })
+        
+        sess = create_session()
+        n1 = Node(data='n1')
+        n2 = Node(data='n2')
+        n3 = Node(data='n3')
+        n4 = Node(data='n4')
+        n5 = Node(data='n5')
+        
+        n4.favorite = n3
+        n1.favorite = n5
+        n5.favorite = n2
+        
+        n1.children = [n2, n3, n4]
+        n2.children = [n3, n5]
+        n3.children = [n5, n4]
+        
+        sess.add_all([n1, n2, n3, n4, n5])
+        self.assert_sql_execution(
+                testing.db,
+                sess.flush,
+
+                CompiledSQL(
+                    "INSERT INTO nodes (data, favorite_node_id) "
+                    "VALUES (:data, :favorite_node_id)",
+                    {'data': 'n2', 'favorite_node_id': None}
+                ),
+                CompiledSQL(
+                    "INSERT INTO nodes (data, favorite_node_id) "
+                    "VALUES (:data, :favorite_node_id)", 
+                    {'data': 'n3', 'favorite_node_id': None}),
+                CompiledSQL("INSERT INTO nodes (data, favorite_node_id) "
+                            "VALUES (:data, :favorite_node_id)", 
+                    lambda ctx:{'data': 'n5', 'favorite_node_id': n2.id}),
+                CompiledSQL(
+                    "INSERT INTO nodes (data, favorite_node_id) "
+                    "VALUES (:data, :favorite_node_id)", 
+                    lambda ctx:{'data': 'n4', 'favorite_node_id': n3.id}),
+                CompiledSQL(
+                    "INSERT INTO node_to_nodes (left_node_id, right_node_id) "
+                    "VALUES (:left_node_id, :right_node_id)", 
+                    lambda ctx:[
+                        {'right_node_id': n5.id, 'left_node_id': n3.id}, 
+                        {'right_node_id': n4.id, 'left_node_id': n3.id}, 
+                        {'right_node_id': n3.id, 'left_node_id': n2.id}, 
+                        {'right_node_id': n5.id, 'left_node_id': n2.id}
+                    ]
+                    ),
+                CompiledSQL(
+                    "INSERT INTO nodes (data, favorite_node_id) "
+                    "VALUES (:data, :favorite_node_id)", 
+                    lambda ctx:[{'data': 'n1', 'favorite_node_id': n5.id}]
+                ),
+                CompiledSQL(
+                    "INSERT INTO node_to_nodes (left_node_id, right_node_id) "
+                    "VALUES (:left_node_id, :right_node_id)", 
+                    lambda ctx:[
+                        {'right_node_id': n2.id, 'left_node_id': n1.id}, 
+                        {'right_node_id': n3.id, 'left_node_id': n1.id}, 
+                        {'right_node_id': n4.id, 'left_node_id': n1.id}
+                    ])
+            )
+
+        sess.delete(n1)
+        
+        self.assert_sql_execution(
+                testing.db,
+                sess.flush,
+                CompiledSQL(
+                    "DELETE FROM node_to_nodes WHERE "
+                    "node_to_nodes.left_node_id = :left_node_id AND "
+                    "node_to_nodes.right_node_id = :right_node_id",
+                    lambda ctx:[
+                        {'right_node_id': n2.id, 'left_node_id': n1.id}, 
+                        {'right_node_id': n3.id, 'left_node_id': n1.id}, 
+                        {'right_node_id': n4.id, 'left_node_id': n1.id}
+                    ]
+                ),
+                CompiledSQL(
+                    "DELETE FROM nodes WHERE nodes.id = :id",
+                    lambda ctx:{'id': n1.id}
+                ),
+        )
+        
+        for n in [n2, n3, n4, n5]:
+            sess.delete(n)
+            
+        # load these collections
+        # outside of the flush() below
+        n4.children
+        n5.children
+        
+        self.assert_sql_execution(
+            testing.db,
+            sess.flush,
+            CompiledSQL(
+                "DELETE FROM node_to_nodes WHERE node_to_nodes.left_node_id "
+                "= :left_node_id AND node_to_nodes.right_node_id = "
+                ":right_node_id",
+                lambda ctx:[
+                    {'right_node_id': n5.id, 'left_node_id': n3.id}, 
+                    {'right_node_id': n4.id, 'left_node_id': n3.id}, 
+                    {'right_node_id': n3.id, 'left_node_id': n2.id}, 
+                    {'right_node_id': n5.id, 'left_node_id': n2.id}
+                ]
+            ),
+            CompiledSQL(
+                "DELETE FROM nodes WHERE nodes.id = :id",
+                lambda ctx:[{'id': n4.id}, {'id': n5.id}]
+            ),
+            CompiledSQL(
+                "DELETE FROM nodes WHERE nodes.id = :id",
+                lambda ctx:[{'id': n2.id}, {'id': n3.id}]
+            ),
+        )
+        
+        
+        
+        
         
\ No newline at end of file