"""
if self.post_update and self._check_reverse(uow):
- return
+ # TODO: coverage here
+ return iter([])
# locate and disable the aggregate processors
# for this dependency
])
def per_state_flush_actions(self, uow, states, isdelete):
- pass
+ # TODO: coverage here
+ return iter([])
def presort_deletes(self, uowcommit, states):
assert False
def per_state_flush_actions(self, uow, states, isdelete):
if self._check_reverse(uow):
- return
+ return iter([])
else:
- DependencyProcessor.\
+ return DependencyProcessor.\
per_state_flush_actions(self, uow, states, isdelete)
def per_property_dependencies(self, uow, parent_saves,
after_save, before_delete,
isdelete, childisdelete):
if not isdelete:
- uow.dependencies.update([
- (save_parent, after_save),
- (after_save, child_action),
- (save_parent, child_action)
- ])
+ if childisdelete:
+ uow.dependencies.update([
+ (save_parent, after_save),
+ (after_save, child_action),
+ ])
+ else:
+ uow.dependencies.update([
+ (save_parent, after_save),
+ (child_action, after_save),
+ ])
else:
uow.dependencies.update([
(before_delete, child_action),
- (child_action, delete_parent)
+ (before_delete, delete_parent)
])
def presort_deletes(self, uowcommit, states):
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
-"""The internals for the Unit Of Work system.
+"""The internals for the unit of work system.
-Includes hooks into the attributes package enabling the routing of
-change events to Unit Of Work objects, as well as the flush()
-mechanism which creates a dependency structure that executes change
-operations.
-
-A Unit of Work is essentially a system of maintaining a graph of
-in-memory objects and their modified state. Objects are maintained as
-unique against their primary key identity using an *identity map*
-pattern. The Unit of Work then maintains lists of objects that are
-new, dirty, or deleted and provides the capability to flush all those
-changes at once.
+The session's flush() process passes objects to a contextual object
+here, which assembles flush tasks based on mappers and their properties,
+organizes them in order of dependency, and executes.
"""
class UOWTransaction(object):
- """Handles the details of organizing and executing transaction
- tasks during a UnitOfWork object's flush() operation.
-
- """
-
def __init__(self, session):
self.session = session
self.mapper_flush_opts = session._mapper_flush_opts
assert session.query(User).get([1, EDITABLE]) is a_editable
-class SelfRefTest(_base.MappedTest):
+class SelfReferentialTest(_base.MappedTest):
__unsupported_on__ = ('mssql',) # mssql doesn't allow ON UPDATE on self-referential keys
@classmethod
pass
@testing.resolve_artifact_names
- def test_onetomany(self):
+ def test_one_to_many(self):
mapper(Node, nodes, properties={
'children': relationship(Node,
backref=sa.orm.backref('parentnode',
for n in sess.query(Node).filter(
Node.name.in_(['n11', 'n12', 'n13']))])
+ @testing.resolve_artifact_names
+ def test_many_to_one(self):
+ mapper(Node, nodes, properties={
+ 'parentnode':relationship(Node,
+ remote_side=nodes.c.name,
+ passive_updates=True)
+ }
+ )
+
+ sess = create_session()
+ n1 = Node(name='n1')
+ n11 = Node(name='n11', parentnode=n1)
+ n12 = Node(name='n12', parentnode=n1)
+ n13 = Node(name='n13', parentnode=n1)
+ sess.add_all([n1, n11, n12, n13])
+ sess.flush()
+
+ n1.name = 'new n1'
+ sess.flush()
+ eq_(['new n1', 'new n1', 'new n1'],
+ [n.parent
+ for n in sess.query(Node).filter(
+ Node.name.in_(['n11', 'n12', 'n13']))])
+
class NonPKCascadeTest(_base.MappedTest):
@classmethod
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy.test import testing
-from test.orm import _fixtures
+from sqlalchemy.test.schema import Table, Column
+from sqlalchemy import Integer, String, ForeignKey
+from test.orm import _fixtures, _base
from sqlalchemy.orm import mapper, relationship, backref, create_session
from sqlalchemy.test.assertsql import AllOf, CompiledSQL
# testing.db,
# sess.flush,
# )
+
+class SingleCycleM2MTest(_base.MappedTest, testing.AssertsExecutionResults):
+
+ @classmethod
+ def define_tables(cls, metadata):
+ nodes = Table('nodes', metadata,
+ Column('id', Integer,
+ primary_key=True,
+ test_needs_autoincrement=True),
+ Column('data', String(30)),
+ Column('favorite_node_id', Integer, ForeignKey('nodes.id'))
+ )
+
+ node_to_nodes =Table('node_to_nodes', metadata,
+ Column('left_node_id', Integer,
+ ForeignKey('nodes.id'),primary_key=True),
+ Column('right_node_id', Integer,
+ ForeignKey('nodes.id'),primary_key=True),
+ )
+
+ @testing.resolve_artifact_names
+ def test_many_to_many_one(self):
+ class Node(Base):
+ pass
+
+ mapper(Node, nodes, properties={
+ 'children':relationship(Node, secondary=node_to_nodes,
+ primaryjoin=nodes.c.id==node_to_nodes.c.left_node_id,
+ secondaryjoin=nodes.c.id==node_to_nodes.c.right_node_id,
+ backref='parents'
+ ),
+ 'favorite':relationship(Node, remote_side=nodes.c.id)
+ })
+
+ sess = create_session()
+ n1 = Node(data='n1')
+ n2 = Node(data='n2')
+ n3 = Node(data='n3')
+ n4 = Node(data='n4')
+ n5 = Node(data='n5')
+
+ n4.favorite = n3
+ n1.favorite = n5
+ n5.favorite = n2
+
+ n1.children = [n2, n3, n4]
+ n2.children = [n3, n5]
+ n3.children = [n5, n4]
+
+ sess.add_all([n1, n2, n3, n4, n5])
+ self.assert_sql_execution(
+ testing.db,
+ sess.flush,
+
+ CompiledSQL(
+ "INSERT INTO nodes (data, favorite_node_id) "
+ "VALUES (:data, :favorite_node_id)",
+ {'data': 'n2', 'favorite_node_id': None}
+ ),
+ CompiledSQL(
+ "INSERT INTO nodes (data, favorite_node_id) "
+ "VALUES (:data, :favorite_node_id)",
+ {'data': 'n3', 'favorite_node_id': None}),
+ CompiledSQL("INSERT INTO nodes (data, favorite_node_id) "
+ "VALUES (:data, :favorite_node_id)",
+ lambda ctx:{'data': 'n5', 'favorite_node_id': n2.id}),
+ CompiledSQL(
+ "INSERT INTO nodes (data, favorite_node_id) "
+ "VALUES (:data, :favorite_node_id)",
+ lambda ctx:{'data': 'n4', 'favorite_node_id': n3.id}),
+ CompiledSQL(
+ "INSERT INTO node_to_nodes (left_node_id, right_node_id) "
+ "VALUES (:left_node_id, :right_node_id)",
+ lambda ctx:[
+ {'right_node_id': n5.id, 'left_node_id': n3.id},
+ {'right_node_id': n4.id, 'left_node_id': n3.id},
+ {'right_node_id': n3.id, 'left_node_id': n2.id},
+ {'right_node_id': n5.id, 'left_node_id': n2.id}
+ ]
+ ),
+ CompiledSQL(
+ "INSERT INTO nodes (data, favorite_node_id) "
+ "VALUES (:data, :favorite_node_id)",
+ lambda ctx:[{'data': 'n1', 'favorite_node_id': n5.id}]
+ ),
+ CompiledSQL(
+ "INSERT INTO node_to_nodes (left_node_id, right_node_id) "
+ "VALUES (:left_node_id, :right_node_id)",
+ lambda ctx:[
+ {'right_node_id': n2.id, 'left_node_id': n1.id},
+ {'right_node_id': n3.id, 'left_node_id': n1.id},
+ {'right_node_id': n4.id, 'left_node_id': n1.id}
+ ])
+ )
+
+ sess.delete(n1)
+
+ self.assert_sql_execution(
+ testing.db,
+ sess.flush,
+ CompiledSQL(
+ "DELETE FROM node_to_nodes WHERE "
+ "node_to_nodes.left_node_id = :left_node_id AND "
+ "node_to_nodes.right_node_id = :right_node_id",
+ lambda ctx:[
+ {'right_node_id': n2.id, 'left_node_id': n1.id},
+ {'right_node_id': n3.id, 'left_node_id': n1.id},
+ {'right_node_id': n4.id, 'left_node_id': n1.id}
+ ]
+ ),
+ CompiledSQL(
+ "DELETE FROM nodes WHERE nodes.id = :id",
+ lambda ctx:{'id': n1.id}
+ ),
+ )
+
+ for n in [n2, n3, n4, n5]:
+ sess.delete(n)
+
+ # load these collections
+ # outside of the flush() below
+ n4.children
+ n5.children
+
+ self.assert_sql_execution(
+ testing.db,
+ sess.flush,
+ CompiledSQL(
+ "DELETE FROM node_to_nodes WHERE node_to_nodes.left_node_id "
+ "= :left_node_id AND node_to_nodes.right_node_id = "
+ ":right_node_id",
+ lambda ctx:[
+ {'right_node_id': n5.id, 'left_node_id': n3.id},
+ {'right_node_id': n4.id, 'left_node_id': n3.id},
+ {'right_node_id': n3.id, 'left_node_id': n2.id},
+ {'right_node_id': n5.id, 'left_node_id': n2.id}
+ ]
+ ),
+ CompiledSQL(
+ "DELETE FROM nodes WHERE nodes.id = :id",
+ lambda ctx:[{'id': n4.id}, {'id': n5.id}]
+ ),
+ CompiledSQL(
+ "DELETE FROM nodes WHERE nodes.id = :id",
+ lambda ctx:[{'id': n2.id}, {'id': n3.id}]
+ ),
+ )
+
+
+
+
\ No newline at end of file