]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
- added an automatic "row switch" feature to mapping, which will
authorMike Bayer <mike_mp@zzzcomputing.com>
Wed, 27 Sep 2006 05:08:22 +0000 (05:08 +0000)
committerMike Bayer <mike_mp@zzzcomputing.com>
Wed, 27 Sep 2006 05:08:22 +0000 (05:08 +0000)
detect a pending instance/deleted instance pair with the same
identity key and convert the INSERT/DELETE to a single UPDATE
- "association" mappings simplified to take advantage of
automatic "row switch" feature
- fixes [ticket:311]

CHANGES
lib/sqlalchemy/orm/dependency.py
lib/sqlalchemy/orm/mapper.py
lib/sqlalchemy/orm/unitofwork.py

diff --git a/CHANGES b/CHANGES
index 65946b4c4986f43c4dc1ca9933a594ce04be5f9f..3139c57f09ce57a3499cf5b5b9b1765258e9d9c3 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -10,15 +10,22 @@ Test suite includes "--log-info" and "--log-debug" arguments
 which work independently of --verbose/--quiet.  Logging added
 to orm to allow tracking of mapper configurations, row iteration.
 - updates to MS-SQL driver: 
-   -- fixes bug 261 (table reflection broken for MS-SQL case-sensitive databases)
+   -- fixes bug 261 (table reflection broken for MS-SQL case-sensitive 
+   databases)
    -- can now specify port for pymssql
-   -- introduces new "auto_identity_insert" option for auto-switching between "SET IDENTITY_INSERT" mode when values specified for IDENTITY columns 
+   -- introduces new "auto_identity_insert" option for auto-switching 
+   between "SET IDENTITY_INSERT" mode when values specified for IDENTITY columns 
    -- now supports multi-column foreign keys
    -- fix to reflecting date/datetime columns
    -- NCHAR and NVARCHAR type support added
 - more rearrangements of unit-of-work commit scheme to better allow
 dependencies within circular flushes to work properly...updated
 task traversal/logging implementation
+- added an automatic "row switch" feature to mapping, which will
+detect a pending instance/deleted instance pair with the same 
+identity key and convert the INSERT/DELETE to a single UPDATE
+- "association" mappings simplified to take advantage of 
+automatic "row switch" feature
 - changed "for_update" parameter to accept False/True/"nowait"
 and "read", the latter two of which are interpreted only by
 Oracle and Mysql [ticket:292]
index b734b34becc510d13f5e3a1b1599e0e24ae9d148..4deb8297f7568cc8e376fdafff9e4282a9c83366 100644 (file)
@@ -301,72 +301,10 @@ class ManyToManyDP(DependencyProcessor):
         self.syncrules.execute(source, dest, obj, child, clearkeys)
 
 class AssociationDP(OneToManyDP):
-    def register_dependencies(self, uowcommit):
-        # association object.  our mapper should be dependent on both
-        # the parent mapper and the association object mapper.
-        # this is where we put the "stub" as a marker, so we get
-        # association/parent->stub->self, then we process the child
-        # elments after the 'stub' save, which is before our own
-        # mapper's save.
-        stub = MapperStub(self.parent, self.association, self.key)
-        uowcommit.register_dependency(self.parent, stub)
-        uowcommit.register_dependency(self.association, stub)
-        uowcommit.register_dependency(stub, self.mapper)
-        uowcommit.register_processor(stub, self, self.parent)
-    def process_dependencies(self, task, deplist, uowcommit, delete = False):
-        #print self.mapper.table.name + " " + self.key + " " + repr(len(deplist)) + " process_dep isdelete " + repr(delete) + " direction " + repr(self.direction)
-        for obj in deplist:
-            childlist = self.get_object_dependencies(obj, uowcommit, passive=True)
-            if childlist is None: continue
-
-            # for the association mapper, the list of association objects is organized into a unique list based on the
-            # "primary key".  newly added association items which correspond to existing association items are "merged"
-            # into the existing one by moving the "_instance_key" over to the added item, so instead of insert/delete you
-            # just get an update operation.
-            if not delete:
-                tosave = util.OrderedDict()
-                for child in childlist:
-                    self._synchronize(obj, child, None, False)
-                    key = self.mapper.instance_key(child)
-                    tosave[key] = child
-                    uowcommit.unregister_object(child)
-
-                todelete = {}
-                for child in childlist.deleted_items():
-                    self._synchronize(obj, child, None, False)
-                    key = self.mapper.instance_key(child)
-                    if not tosave.has_key(key):
-                        todelete[key] = child
-                    else:
-                        tosave[key]._instance_key = key
-                    uowcommit.unregister_object(child)
-                
-                for child in childlist.unchanged_items():
-                    key = self.mapper.instance_key(child)
-                    tosave[key]._instance_key = key
-                    
-                #print "OK for the save", [(o, getattr(o, '_instance_key', None)) for o in tosave.values()]
-                #print "OK for the delete", [(o, getattr(o, '_instance_key', None)) for o in todelete.values()]
-                
-                for obj in tosave.values():
-                    uowcommit.register_object(obj)
-                for obj in todelete.values():
-                    uowcommit.register_object(obj, isdelete=True)
-            else:
-                todelete = {}
-                for child in childlist.unchanged_items() + childlist.deleted_items():
-                    self._synchronize(obj, child, None, False)
-                    key = self.mapper.instance_key(child)
-                    todelete[key] = child
-                for obj in todelete.values():
-                    uowcommit.register_object(obj, isdelete=True)
-                    
-                
-    def preprocess_dependencies(self, task, deplist, uowcommit, delete = False):
-        # TODO: clean up the association step in process_dependencies and move the
-        # appropriate sections of it to here
-        pass
-        
+    def __init__(self, *args, **kwargs):
+        super(AssociationDP, self).__init__(*args, **kwargs)
+        self.cascade.delete = True
+        self.cascade.delete_orphan = True
 
 class MapperStub(object):
     """poses as a Mapper representing the association table in a many-to-many
index bf3d97f3f96948c29664a56a91372aae5053a3ff..29688df21e282d9ede95a0824366a85b657f8af9 100644 (file)
@@ -282,6 +282,7 @@ class Mapper(object):
             if self.order_by is False:
                 self.order_by = self.inherits.order_by
             self.polymorphic_map = self.inherits.polymorphic_map
+            self.batch = self.inherits.batch
         else:
             self._synchronizer = None
             self.mapped_table = self.local_table
@@ -719,19 +720,19 @@ class Mapper(object):
     def _setattrbycolumn(self, obj, column, value):
         self.columntoproperty[column][0].setattr(obj, value)
     
-    def save_obj(self, objects, uow, postupdate=False, post_update_cols=None, single=False):
+    def save_obj(self, objects, uowtransaction, postupdate=False, post_update_cols=None, single=False):
         """called by a UnitOfWork object to save objects, which involves either an INSERT or
         an UPDATE statement for each table used by this mapper, for each element of the
         list."""
-        #print "SAVE_OBJ MAPPER", self.class_.__name__, objects
+        self.__log_debug("save_obj() start, " + (single and "non-batched" or "batched"))
         
         # if batch=false, call save_obj separately for each object
         if not single and not self.batch:
             for obj in objects:
-                self.save_obj([obj], uow, postupdate=postupdate, post_update_cols=post_update_cols, single=True)
+                self.save_obj([obj], uowtransaction, postupdate=postupdate, post_update_cols=post_update_cols, single=True)
             return
             
-        connection = uow.transaction.connection(self)
+        connection = uowtransaction.transaction.connection(self)
 
         if not postupdate:
             for obj in objects:
@@ -766,13 +767,22 @@ class Mapper(object):
             # for this table, in the case that the user
             # specified custom primary key cols.
             for obj in objects:
-                #print "SAVE_OBJ we are Mapper(" + str(id(self)) + ") obj: " +  obj.__class__.__name__ + repr(id(obj))
+                instance_key = self.instance_key(obj)
+                self.__log_debug("save_obj() instance %s identity %s" % (mapperutil.instance_str(obj), str(instance_key)))
+
+                # detect if we have a "pending" instance (i.e. has no instance_key attached to it),
+                # and another instance with the same identity key already exists as persistent.  convert to an 
+                # UPDATE if so.
+                is_row_switch = not postupdate and not has_identity(obj) and instance_key in uowtransaction.uow.identity_map
+                if is_row_switch:
+                    existing = uowtransaction.uow.identity_map[instance_key]
+                    if not uowtransaction.is_deleted(existing):
+                        raise exceptions.FlushError("New instance %s with identity key %s conflicts with persistent instance %s" % (mapperutil.instance_str(obj), str(instance_key), mapperutil.instance_str(existing)))
+                    self.__log_debug("detected row switch for identity %s.  will update %s, remove %s from transaction" % (instance_key, mapperutil.instance_str(obj), mapperutil.instance_str(existing)))
+                    uowtransaction.unregister_object(existing)
+
+                isinsert = not is_row_switch and not postupdate and not has_identity(obj)
                 params = {}
-
-                # 'postupdate' means a PropertyLoader is telling us, "yes I know you 
-                # already inserted/updated this row but I need you to UPDATE one more 
-                # time"
-                isinsert = not postupdate and not has_identity(obj)
                 hasdata = False
                 for col in table.columns:
                     if col is self.version_id_col:
@@ -798,6 +808,7 @@ class Mapper(object):
                                 params[col.key] = value
                     elif self.polymorphic_on is not None and self.polymorphic_on.shares_lineage(col):
                         if isinsert:
+                            self.__log_debug("Using polymorphic identity '%s' for insert column '%s'" % (self.polymorphic_identity, col.key))
                             value = self.polymorphic_identity
                             if col.default is None or value is not None:
                                 params[col.key] = value
@@ -809,6 +820,10 @@ class Mapper(object):
                             # value, add it to the bind parameters
                             if post_update_cols is not None and col not in post_update_cols:
                                 continue
+                            elif is_row_switch:
+                                params[col.key] = self._getattrbycolumn(obj, col)
+                                hasdata = True
+                                continue
                             prop = self._getpropbycolumn(col, False)
                             if prop is None:
                                 continue
@@ -919,10 +934,10 @@ class Mapper(object):
                 elif v != params.get_original(c.name):
                     self._setattrbycolumn(obj, c, params.get_original(c.name))
 
-    def delete_obj(self, objects, uow):
+    def delete_obj(self, objects, uowtransaction):
         """called by a UnitOfWork object to delete objects, which involves a
         DELETE statement for each table used by this mapper, for each object in the list."""
-        connection = uow.transaction.connection(self)
+        connection = uowtransaction.transaction.connection(self)
         #print "DELETE_OBJ MAPPER", self.class_.__name__, objects
 
         [self.extension.before_delete(self, connection, obj) for obj in objects]
index 076b8088238db205834018eb8969937628d14bd8..008236877c098facbbcaca4277ea517282c10944 100644 (file)
@@ -54,7 +54,7 @@ class UOWEventHandler(attributes.AttributeExtension):
                 sess.save_or_update(newvalue, entity_name=ename)
 
 class UOWProperty(attributes.InstrumentedAttribute):
-    """overrides InstrumentedAttribute to provide an extra AttributeExtension to all managed attributes
+    """override InstrumentedAttribute to provide an extra AttributeExtension to all managed attributes
     as well as the 'property' property."""
     def __init__(self, manager, class_, key, uselist, callable_, typecallable, cascade=None, extension=None, **kwargs):
         extension = util.to_list(extension or [])
@@ -65,12 +65,14 @@ class UOWProperty(attributes.InstrumentedAttribute):
     property = property(lambda s:class_mapper(s.class_).props[s.key], doc="returns the MapperProperty object associated with this property")
             
 class UOWAttributeManager(attributes.AttributeManager):
-    """overrides AttributeManager to provide the UOWProperty instance for all InstrumentedAttributes."""
+    """override AttributeManager to provide the UOWProperty instance for all InstrumentedAttributes."""
     def create_prop(self, class_, key, uselist, callable_, typecallable, **kwargs):
         return UOWProperty(self, class_, key, uselist, callable_, typecallable, **kwargs)
 
 class UnitOfWork(object):
-    """main UOW object which stores lists of dirty/new/deleted objects.  provides top-level "flush" functionality as well as the transaction boundaries with the SQLEngine(s) involved in a write operation."""
+    """main UOW object which stores lists of dirty/new/deleted objects.  
+    provides top-level "flush" functionality as well as the transaction 
+    boundaries with the SQLEngine(s) involved in a write operation."""
     def __init__(self, identity_map=None):
         if identity_map is not None:
             self.identity_map = identity_map
@@ -159,6 +161,7 @@ class UnitOfWork(object):
         # store objects whose fate has been decided
         processed = util.Set()
         
+        
         # put all saves/updates into the flush context.  detect orphans and throw them into deleted.
         for obj in self.new.union(dirty).intersection(objset).difference(self.deleted):
             if obj in processed: