]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
ordering of UPDATE and DELETE statements within groups is now
authorMike Bayer <mike_mp@zzzcomputing.com>
Mon, 3 Jul 2006 15:55:20 +0000 (15:55 +0000)
committerMike Bayer <mike_mp@zzzcomputing.com>
Mon, 3 Jul 2006 15:55:20 +0000 (15:55 +0000)
in order of primary key values, for more deterministic ordering
after_insert/delete/update mapper extensions now called per object,
not per-object-per-table
fixed import in firebird.py

CHANGES
lib/sqlalchemy/databases/firebird.py
lib/sqlalchemy/orm/dependency.py
lib/sqlalchemy/orm/mapper.py
test/orm/objectstore.py

diff --git a/CHANGES b/CHANGES
index 04a841ed674c2b39cbeacdc037597a17270fdcef..ee17260345625c9dd489d9c1f1a7f9281b3f6b15 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -6,6 +6,10 @@ working around new setuptools PYTHONPATH-killing behavior
 - further fixes with attributes/dependencies/etc....
 - improved error handling for when DynamicMetaData is not connected
 - MS-SQL support largely working (tested with pymssql)
+- ordering of UPDATE and DELETE statements within groups is now 
+in order of primary key values, for more deterministic ordering
+- after_insert/delete/update mapper extensions now called per object,
+not per-object-per-table
 
 0.2.4
 - try/except when the mapper sets init.__name__ on a mapped class,
index 42779e8ffbf822d70952953f820c18c12ef1b3b4..0039333d51205d0103dd8afd006c24c5f545d138 100644 (file)
@@ -8,7 +8,7 @@
 import sys, StringIO, string, types
 
 import sqlalchemy.engine.default as default
-import sqlalchemy.sql as sql
+import sqlalchemy.sql as sql
 import sqlalchemy.schema as schema
 import sqlalchemy.ansisql as ansisql
 # from sqlalchemy import *
index 8970dab50bcb4a60eee9d6ce3662152c882d6ac3..1af368431bc7d5097078a445c4313251c85bf000 100644 (file)
@@ -279,6 +279,7 @@ class ManyToManyDP(DependencyProcessor):
                     self._synchronize(obj, child, associationrow, False)
                     secondary_delete.append(associationrow)
         if len(secondary_delete):
+            secondary_delete.sort()
             # TODO: precompile the delete/insert queries and store them as instance variables
             # on the PropertyLoader
             statement = self.secondary.delete(sql.and_(*[c == sql.bindparam(c.key) for c in self.secondary.c if c.key in associationrow]))
index b6c5dc56dd908f0abf0aeedc162fb152480ca630..120c5f209436cb8b1fedad70f837d7e424b45f7a 100644 (file)
@@ -742,7 +742,6 @@ class Mapper(object):
 
     def _setattrbycolumn(self, obj, column, value):
         self.columntoproperty[column][0].setattr(obj, value)
-    
             
     def save_obj(self, objects, uow, postupdate=False):
         """called by a UnitOfWork object to save objects, which involves either an INSERT or
@@ -750,6 +749,16 @@ class Mapper(object):
         list."""
         #print "SAVE_OBJ MAPPER", self.class_.__name__, objects
         connection = uow.transaction.connection(self)
+
+        if not postupdate:
+            for obj in objects:
+                if not hasattr(obj, "_instance_key"):
+                    self.extension.before_insert(self, connection, obj)
+                else:
+                    self.extension.before_update(self, connection, obj)
+
+        inserted_objects = util.Set()
+        updated_objects = util.Set()
         for table in self.tables.sort(reverse=False):
             #print "SAVE_OBJ table ", self.class_.__name__, table.name
             # looping through our set of tables, which are all "real" tables, as opposed
@@ -857,14 +866,24 @@ class Mapper(object):
                 statement = table.update(clause)
                 rows = 0
                 supports_sane_rowcount = True
+                def comparator(a, b):
+                    for col in self.pks_by_table[table]:
+                        x = cmp(a[1][col._label],b[1][col._label])
+                        if x != 0:
+                            return x
+                    return 0
+                update.sort(comparator)
                 for rec in update:
                     (obj, params) = rec
                     c = connection.execute(statement, params)
                     self._postfetch(connection, table, obj, c, c.last_updated_params())
-                    self.extension.after_update(self, connection, obj)
+
+                    updated_objects.add(obj)
                     rows += c.cursor.rowcount
+
                 if c.supports_sane_rowcount() and rows != len(update):
                     raise exceptions.FlushError("ConcurrencyError - updated rowcount %d does not match number of objects updated %d" % (rows, len(update)))
+
             if len(insert):
                 statement = table.insert()
                 for rec in insert:
@@ -888,7 +907,10 @@ class Mapper(object):
                             mapper._synchronizer.execute(obj, obj)
                     sync(self)
                     
-                    self.extension.after_insert(self, connection, obj)
+                    inserted_objects.add(obj)
+        if not postupdate:
+            [self.extension.after_insert(self, connection, obj) for obj in inserted_objects]
+            [self.extension.after_update(self, connection, obj) for obj in updated_objects]
 
     def _postfetch(self, connection, table, obj, resultproxy, params):
         """after an INSERT or UPDATE, asks the returned result if PassiveDefaults fired off on the database side
@@ -917,12 +939,13 @@ class Mapper(object):
         DELETE statement for each table used by this mapper, for each object in the list."""
         connection = uow.transaction.connection(self)
         #print "DELETE_OBJ MAPPER", self.class_.__name__, objects
-        
+
+        [self.extension.before_delete(self, connection, obj) for obj in objects]
+        deleted_objects = util.Set()
         for table in self.tables.sort(reverse=True):
             if not self._has_pks(table):
                 continue
             delete = []
-            deleted_objects = []
             for obj in objects:
                 params = {}
                 if not hasattr(obj, "_instance_key"):
@@ -933,9 +956,15 @@ class Mapper(object):
                     params[col.key] = self._getattrbycolumn(obj, col)
                 if self.version_id_col is not None:
                     params[self.version_id_col.key] = self._getattrbycolumn(obj, self.version_id_col)
-                self.extension.before_delete(self, connection, obj)
-                deleted_objects.append(obj)
+                deleted_objects.add(obj)
             if len(delete):
+                def comparator(a, b):
+                    for col in self.pks_by_table[table]:
+                        x = cmp(a[col.key],b[col.key])
+                        if x != 0:
+                            return x
+                    return 0
+                delete.sort(comparator)
                 clause = sql.and_()
                 for col in self.pks_by_table[table]:
                     clause.clauses.append(col == sql.bindparam(col.key, type=col.type))
@@ -945,8 +974,8 @@ class Mapper(object):
                 c = connection.execute(statement, delete)
                 if c.supports_sane_rowcount() and c.rowcount != len(delete):
                     raise exceptions.FlushError("ConcurrencyError - updated rowcount %d does not match number of objects updated %d" % (c.cursor.rowcount, len(delete)))
-                for obj in deleted_objects:
-                    self.extension.after_delete(self, connection, obj)
+                    
+        [self.extension.after_delete(self, connection, obj) for obj in deleted_objects]
 
     def _has_pks(self, table):
         try:
index c2ef112a649d717f2bdeb2008636c1e247586772..6d3a712762711d7fc2eed6663792a3cab359fed6 100644 (file)
@@ -923,13 +923,13 @@ class SaveTest(SessionTest):
                         "UPDATE users SET user_name=:user_name WHERE users.user_id = :users_user_id",
                         {'users_user_id': u2.user_id, 'user_name': 'user2modified'}
                     ),
+                    ("UPDATE email_addresses SET user_id=:user_id WHERE email_addresses.address_id = :email_addresses_address_id",
+                        {'user_id': None, 'email_addresses_address_id': a1.address_id}
+                    ),
                     (
                         "UPDATE email_addresses SET user_id=:user_id WHERE email_addresses.address_id = :email_addresses_address_id",
                         {'user_id': u1.user_id, 'email_addresses_address_id': a3.address_id}
                     ),
-                    ("UPDATE email_addresses SET user_id=:user_id WHERE email_addresses.address_id = :email_addresses_address_id",
-                        {'user_id': None, 'email_addresses_address_id': a1.address_id}
-                    )
                 ])
 
     def testbackwardsmanipulations(self):