]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
(no commit message)
authorMike Bayer <mike_mp@zzzcomputing.com>
Sun, 30 Oct 2005 01:09:38 +0000 (01:09 +0000)
committerMike Bayer <mike_mp@zzzcomputing.com>
Sun, 30 Oct 2005 01:09:38 +0000 (01:09 +0000)
lib/sqlalchemy/databases/postgres.py
lib/sqlalchemy/mapper.py
lib/sqlalchemy/objectstore.py
lib/sqlalchemy/sql.py
lib/sqlalchemy/util.py
test/dependency.py
test/tables.py

index 6a20e1704bc895aac0cf2db13c6f56c06931ce9a..ed98fedc43fd48ca2ec288a1d2fbb3efb15c784b 100644 (file)
@@ -206,22 +206,21 @@ class PGSQLEngine(ansisql.ANSISQLEngine):
         constraints = gen_constraints.toengine(table.engine)
         column_constraints = gen_column_constraints.toengine(table.engine)
         
-        s = columns.select(columns.c.table_name==table.name, order_by=[columns.c.ordinal_position])
-
-        s.append_from(sql.join(columns, column_constraints, 
-                sql.and_(
-                        columns.c.table_name==column_constraints.c.table_name,
-                        columns.c.table_schema==column_constraints.c.table_schema,
-                        columns.c.column_name==column_constraints.c.column_name,
-                    ), 
-                isouter=True).join(constraints, 
-                    sql.and_(
-                        column_constraints.c.table_schema==constraints.c.table_schema,
-                        column_constraints.c.constraint_name==constraints.c.constraint_name,
-                        constraints.c.constraint_type=='PRIMARY KEY'
-                    ), isouter=True)),
-                    
-        s.append_column(constraints.c.constraint_type)    
+        s = select([columns, constraints.c.constraint_type], 
+            columns.c.table_name==table.name, 
+            order_by=[columns.c.ordinal_position],
+            from_obj=[sql.join(columns, column_constraints, 
+                              sql.and_(
+                                      columns.c.table_name==column_constraints.c.table_name,
+                                      columns.c.table_schema==column_constraints.c.table_schema,
+                                      columns.c.column_name==column_constraints.c.column_name,
+                                  ), 
+                              isouter=True).join(constraints, 
+                                  sql.and_(
+                                      column_constraints.c.table_schema==constraints.c.table_schema,
+                                      column_constraints.c.constraint_name==constraints.c.constraint_name,
+                                      constraints.c.constraint_type=='PRIMARY KEY'
+                                  ), isouter=True)])
 
         if table.schema is not None:
             s.append_whereclause(columns.c.table_schema==table.schema)
index 553ba5cb58f474e67a8b0712768ec292a0eb6c45..4fc5fbc8b412bb111292afa531b719572fe097a3 100644 (file)
@@ -450,6 +450,7 @@ class Mapper(object):
                 if c.rowcount != len(update):
                     raise "ConcurrencyError - updated rowcount %d does not match number of objects updated %d" % (c.cursor.rowcount, len(update))
             if len(insert):
+                import sys
                 statement = table.insert()
                 for rec in insert:
                     (obj, params) = rec
@@ -458,8 +459,13 @@ class Mapper(object):
                     if primary_keys is not None:
                         i = 0
                         for col in self.primary_keys[table]:
+                    #        print "col: " + table.name + "." + col.key + " val: " + repr(self._getattrbycolumn(obj, col))
                             if self._getattrbycolumn(obj, col) is None:
-                                self._setattrbycolumn(obj, col, primary_keys[i])
+                                try:
+                                    self._setattrbycolumn(obj, col, primary_keys[i])
+                                except IndexError:
+                                    print "LALALA col: " + table.name + "." + col.key + " val: " + repr(self._getattrbycolumn(obj, col))
+                                    raise
                             i+=1
                     self.extension.after_insert(self, obj)
                     
index 6081a7150d043fee4687184c7c1136aecfbf7c6f..5920e82c814d8f7054f0e6b5d9909b938d13038e 100644 (file)
@@ -563,7 +563,7 @@ class UOWTask(object):
             s += "\n" + indent + "  Save Dependencies:"
             s += self._dump_dependencies(save_dep, indent)
         if len(self.childtasks) > 0:
-            s += "\n" + indent + "  Child Tasks:"
+            s += "\n" + indent + "  Child Tasks:(%d)" % len(self.childtasks)
             for t in self.childtasks:
                 s += t.dump(indent + "    ")
         delete_dep = self.delete_dependencies()
index 2cb7d17bab7d056fb709417854591a85bf5f9848..72eea4241e7bb00df1c9dda6dab154e7703d753c 100644 (file)
@@ -230,6 +230,10 @@ class ClauseElement(object):
         raise NotImplementedError(repr(self))
     def _get_from_objects(self):
         raise NotImplementedError(repr(self))
+    def _process_from_dict(self, data):
+        for f in self._get_from_objects():
+            data[f.id] = f
+        data[self.id] = self
     def accept_visitor(self, visitor):
         raise NotImplementedError(repr(self))
 
@@ -550,6 +554,11 @@ class Join(Selectable):
         visitor.visit_join(self)
 
     engine = property(lambda s:s.left.engine or s.right.engine)
+
+    def _process_from_dict(self, data):
+        for f in self._get_from_objects():
+            data[f.id] = f
+        data[self.id] = self
         
     def _get_from_objects(self):
         m = {}
@@ -763,14 +772,14 @@ class Select(Selectable):
         if type(fromclause) == str:
             fromclause = FromClause(from_name = fromclause)
 
+        fromclause._process_from_dict(self.froms)
+        return
+        
         self.froms[fromclause.id] = fromclause
 
         for r in fromclause._get_from_objects():
             self.froms[r.id] = r
         
-    def append_join(self, joinon, right, whereclause, **params):
-        self.append_from(self.froms[joinon], right, whereclause, **params)
-
     def append_clause(self, keyword, clause):
         if type(clause) == str:
             clause = TextClause(clause)
index 2b0510f11b22aabae6f56986b6b0ad8d257b443e..3b006c220735cd1ba30cc72830448c98c2db50e1 100644 (file)
@@ -380,17 +380,25 @@ class DependencySorter(object):
             # now see, if the parent is an ancestor of the child
             c = childnode
             while c is not None and c is not parentnode:
+                root = c
                 c = c.parent
 
             # nope, so we have to move the child down from whereever
             # it currently is to a child of the parent
             if c is None:
-                parentnode.append(childnode)
+                for c in parentnode.children:
+                    c.parent = root
+                    root.children.append(c)
+                    del parentnode.children[c]
+                root.parent = parentnode
+                parentnode.children.append(root)
+                print str(parentnode)
         
         # now we have a collection of subtrees which represent dependencies.
         # go through the collection root nodes wire them together into one tree        
         head = None
         for node in nodes.values():
+            print "hi1:" + str(node)
             if node.parent is None:
                 if head is not None:
                     head.append(node)
index 9f550ab047cb23710d3023803b2f5aaff1a45c45..7d02c9dcebca3f5fdffc6776836d3d7cb73a968b 100644 (file)
@@ -52,7 +52,41 @@ class DependencySortTest(PersistTest):
         head = util.DependencySorter(tuples, [node7]).sort()
         print "\n" + str(head)
 
+    def testsort3(self):
+        ['Mapper|Keyword|keywords,Mapper|IKAssociation|itemkeywords', 'Mapper|Item|items,Mapper|IKAssociation|itemkeywords']
+        node1 = thingy('keywords')
+        node2 = thingy('itemkeyowrds')
+        node3 = thingy('items')
+        tuples = [
+            (node1, node2),
+            (node3, node2)
+        ]
+        head1 = util.DependencySorter(tuples, [node1, node2, node3]).sort()
+        head2 = util.DependencySorter(tuples, [node3, node1, node2]).sort()
+        head3 = util.DependencySorter(tuples, [node3, node2, node1]).sort()
+        
+        # TODO: figure out a "node == node2" function
+        #self.assert_(str(head1) == str(head2) == str(head3))
+        print "\n" + str(head1)
+        print "\n" + str(head2)
+        print "\n" + str(head3)
 
+    def testsort4(self):
+        node1 = thingy('keywords')
+        node2 = thingy('itemkeyowrds')
+        node3 = thingy('items')
+        node4 = thingy('lala')
+        node5 = thingy('hoho')
+        
+        tuples = [
+            (node1, node2),
+            (node5, node3),
+            (node4, node2),
+            (node3, node2),
+            
+        ]
+        head = util.DependencySorter(tuples, []).sort()
+        print "\n" + str(head)
 
 if __name__ == "__main__":
     unittest.main()
index 8bddce1d0bc6a481363b0707b6148665cdb173b0..595195780860f77004918c2286ea5d6db0c3c736 100644 (file)
@@ -10,8 +10,8 @@ __ALL__ = ['db', 'users', 'addresses', 'orders', 'orderitems', 'keywords', 'item
 
 ECHO = testbase.echo
 
-#DBTYPE = 'sqlite_memory'
-DBTYPE = 'postgres'
+DBTYPE = 'sqlite_memory'
+#DBTYPE = 'postgres'
 #DBTYPE = 'sqlite_file'
 
 if DBTYPE == 'sqlite_memory':