]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
mysql somewhat present
authorMike Bayer <mike_mp@zzzcomputing.com>
Sun, 27 Nov 2005 01:40:18 +0000 (01:40 +0000)
committerMike Bayer <mike_mp@zzzcomputing.com>
Sun, 27 Nov 2005 01:40:18 +0000 (01:40 +0000)
lib/sqlalchemy/ansisql.py
lib/sqlalchemy/databases/information_schema.py [new file with mode: 0644]
lib/sqlalchemy/databases/mysql.py [new file with mode: 0644]
lib/sqlalchemy/databases/oracle.py
lib/sqlalchemy/databases/postgres.py
lib/sqlalchemy/databases/sqlite.py
lib/sqlalchemy/schema.py
lib/sqlalchemy/sql.py
test/engines.py
test/mapper.py

index 109a7deb52519ca6c5c06df4c9b73dab5f6783e1..6a618e6cd4114c79caecde297657c4734ff92c29 100644 (file)
@@ -151,7 +151,10 @@ class ANSICompiler(sql.Compiled):
         if compound.operator is None:
             sep = " "
         else:
-            sep = " " + compound.operator + " "
+            if compound.spaces:
+                sep = compound.operator
+            else:
+                sep = " " + compound.operator + " "
         
         if compound.parens:
             self.strings[compound] = "(" + string.join([self.get_str(c) for c in compound.clauses], sep) + ")"
@@ -308,7 +311,7 @@ class ANSICompiler(sql.Compiled):
 
 class ANSISchemaGenerator(sqlalchemy.engine.SchemaIterator):
 
-    def get_column_specification(self, column, override_pk=False):
+    def get_column_specification(self, column, override_pk=False, first_pk=False):
         raise NotImplementedError()
         
     def visit_table(self, table):
@@ -318,11 +321,13 @@ class ANSISchemaGenerator(sqlalchemy.engine.SchemaIterator):
         
         # if only one primary key, specify it along with the column
         pks = table.primary_keys
+        first_pk = False
         for column in table.columns:
             self.append(separator)
             separator = ", \n"
-            self.append("\t" + self.get_column_specification(column, override_pk=len(pks)>1))
-        
+            self.append("\t" + self.get_column_specification(column, override_pk=len(pks)>1, first_pk=column.primary_key and not first_pk))
+            if column.primary_key:
+                first_pk = True
         # if multiple primary keys, specify it at the bottom
         if len(pks) > 1:
             self.append(", \n")
diff --git a/lib/sqlalchemy/databases/information_schema.py b/lib/sqlalchemy/databases/information_schema.py
new file mode 100644 (file)
index 0000000..51b9d94
--- /dev/null
@@ -0,0 +1,132 @@
+import sqlalchemy.sql as sql
+import sqlalchemy.engine as engine
+import sqlalchemy.schema as schema
+import sqlalchemy.ansisql as ansisql
+import sqlalchemy.types as sqltypes
+from sqlalchemy.ansisql import *
+
+generic_engine = ansisql.engine()
+gen_columns = schema.Table("columns", generic_engine,
+    Column("table_schema", String),
+    Column("table_name", String),
+    Column("column_name", String),
+    Column("is_nullable", Integer),
+    Column("data_type", String),
+    Column("ordinal_position", Integer),
+    Column("character_maximum_length", Integer),
+    Column("numeric_precision", Integer),
+    Column("numeric_scale", Integer),
+    schema="information_schema")
+    
+gen_constraints = schema.Table("table_constraints", generic_engine,
+    Column("table_schema", String),
+    Column("table_name", String),
+    Column("constraint_name", String),
+    Column("constraint_type", String),
+    schema="information_schema")
+
+gen_column_constraints = schema.Table("constraint_column_usage", generic_engine,
+    Column("table_schema", String),
+    Column("table_name", String),
+    Column("column_name", String),
+    Column("constraint_name", String),
+    schema="information_schema")
+
+gen_key_constraints = schema.Table("key_column_usage", generic_engine,
+    Column("table_schema", String),
+    Column("table_name", String),
+    Column("column_name", String),
+    Column("constraint_name", String),
+    schema="information_schema")
+
+
+def reflecttable(engine, table, ischema_names, use_mysql=False):
+    columns = gen_columns.toengine(engine)
+    constraints = gen_constraints.toengine(engine)
+    
+    if use_mysql:
+        # no idea which INFORMATION_SCHEMA spec is correct, mysql or postgres
+        key_constraints = schema.Table("key_column_usage", engine,
+            Column("table_schema", String),
+            Column("table_name", String),
+            Column("column_name", String),
+            Column("constraint_name", String),
+            Column("referenced_table_schema", String),
+            Column("referenced_table_name", String),
+            Column("referenced_column_name", String),
+            schema="information_schema", useexisting=True)
+    else:
+        column_constraints = gen_column_constraints.toengine(engine)
+        key_constraints = gen_key_constraints.toengine(engine)
+
+
+    if table.schema is not None:
+        current_schema = table.schema
+    else:
+        current_schema = engine.get_default_schema_name()
+    
+    s = select([columns], 
+        sql.and_(columns.c.table_name==table.name,
+        columns.c.table_schema==current_schema),
+        order_by=[columns.c.ordinal_position])
+        
+    c = s.execute()
+    while True:
+        row = c.fetchone()
+        if row is None:
+            break
+#        print "row! " + repr(row)
+ #       continue
+        (name, type, nullable, charlen, numericprec, numericscale) = (
+            row[columns.c.column_name], 
+            row[columns.c.data_type], 
+            row[columns.c.is_nullable] == 'YES', 
+            row[columns.c.character_maximum_length],
+            row[columns.c.numeric_precision],
+            row[columns.c.numeric_scale],
+            )
+
+        args = []
+        for a in (charlen, numericprec, numericscale):
+            if a is not None:
+                args.append(a)
+        coltype = ischema_names[type]
+        #print "coltype " + repr(coltype) + " args " +  repr(args)
+        coltype = coltype(*args)
+        table.append_item(schema.Column(name, coltype, nullable = nullable))
+
+    s = select([constraints.c.constraint_name, constraints.c.constraint_type, constraints.c.table_name, key_constraints], use_labels=True)
+    if not use_mysql:
+        s.append_column(column_constraints)
+        s.append_whereclause(key_constraints.c.constraint_name==column_constraints.c.constraint_name)
+        s.append_whereclause(column_constraints.c.constraint_name==constraints.c.constraint_name)
+        s.append_whereclause(constraints.c.table_name==table.name)
+        s.append_whereclause(constraints.c.table_schema==current_schema)
+        colmap = [constraints.c.constraint_type, key_constraints.c.column_name, column_constraints.c.table_schema, column_constraints.c.table_name, column_constraints.c.column_name]
+    else:
+        # this doesnt seem to pick up any foreign keys with mysql
+        s.append_whereclause(key_constraints.c.table_name==constraints.c.table_name)
+        s.append_whereclause(key_constraints.c.table_schema==constraints.c.table_schema)
+        s.append_whereclause(constraints.c.table_name==table.name)
+        s.append_whereclause(constraints.c.table_schema==current_schema)
+        colmap = [constraints.c.constraint_type, key_constraints.c.column_name, key_constraints.c.referenced_table_schema, key_constraints.c.referenced_table_name, key_constraints.c.referenced_column_name]
+    c = s.execute()
+
+    while True:
+        row = c.fetchone()
+        if row is None:
+            break
+#        continue
+        (type, constrained_column, referred_schema, referred_table, referred_column) = (
+            row[colmap[0]],
+            row[colmap[1]],
+            row[colmap[2]],
+            row[colmap[3]],
+            row[colmap[4]]
+        )
+        #print "type %s on column %s to remote %s.%s.%s" % (type, constrained_column, referred_schema, referred_table, referred_column) 
+        if type=='PRIMARY KEY':
+            table.c[constrained_column]._set_primary_key()
+        elif type=='FOREIGN KEY':
+            remotetable = Table(referred_table, engine, autoload = True, schema=referred_schema)
+            table.c[constrained_column].foreign_key = schema.ForeignKey(remotetable.c[referred_column])
diff --git a/lib/sqlalchemy/databases/mysql.py b/lib/sqlalchemy/databases/mysql.py
new file mode 100644 (file)
index 0000000..bf1b195
--- /dev/null
@@ -0,0 +1,186 @@
+# mysql.py
+# Copyright (C) 2005 Michael Bayer mike_mp@zzzcomputing.com
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+import sys, StringIO, string, types, re
+
+import sqlalchemy.sql as sql
+import sqlalchemy.engine as engine
+import sqlalchemy.schema as schema
+import sqlalchemy.ansisql as ansisql
+import sqlalchemy.types as sqltypes
+from sqlalchemy.ansisql import *
+import sqlalchemy.databases.information_schema as ischema
+
+try:
+    import MySQLdb as mysql
+except:
+    mysql = None
+    
+class MSNumeric(sqltypes.Numeric):
+    def get_col_spec(self):
+        return "NUMERIC(%(precision)s, %(length)s)" % {'precision': self.precision, 'length' : self.length}
+class MSInteger(sqltypes.Integer):
+    def get_col_spec(self):
+        return "INTEGER"
+class MSDateTime(sqltypes.DateTime):
+    def get_col_spec(self):
+        return "TIMESTAMP"
+class MSText(sqltypes.TEXT):
+    def get_col_spec(self):
+        return "TEXT"
+class MSString(sqltypes.String):
+    def get_col_spec(self):
+        return "VARCHAR(%(length)s)" % {'length' : self.length}
+class MSChar(sqltypes.CHAR):
+    def get_col_spec(self):
+        return "CHAR(%(length)s)" % {'length' : self.length}
+class MSBinary(sqltypes.Binary):
+    def get_col_spec(self):
+        return "BINARY"
+class MSBoolean(sqltypes.Boolean):
+    def get_col_spec(self):
+        return "BOOLEAN"
+        
+colspecs = {
+    sqltypes.Integer : MSInteger,
+    sqltypes.Numeric : MSNumeric,
+    sqltypes.DateTime : MSDateTime,
+    sqltypes.String : MSString,
+    sqltypes.Binary : MSBinary,
+    sqltypes.Boolean : MSBoolean,
+    sqltypes.TEXT : MSText,
+    sqltypes.CHAR: MSChar,
+}
+
+ischema_names = {
+    'int' : MSInteger,
+    'varchar' : MSString,
+    'char' : MSChar,
+    'text' : MSText,
+    'decimal' : MSNumeric,
+    'timestamp' : MSDateTime,
+    'binary' : MSBinary,
+}
+
+
+def engine(opts, **params):
+    return MySQLEngine(opts, **params)
+
+def descriptor():
+    return {'name':'mysql',
+    'description':'MySQL',
+    'arguments':[
+        ('user',"Database Username",None),
+        ('passwd',"Database Password",None),
+        ('db',"Database Name",None),
+        ('host',"Hostname", None),
+    ]}
+
+class MySQLEngine(ansisql.ANSISQLEngine):
+    def __init__(self, opts, module = None, **params):
+        if module is None:
+            self.module = mysql
+        self.opts = opts or {}
+        ansisql.ANSISQLEngine.__init__(self, **params)
+
+    def connect_args(self):
+        return [[], self.opts]
+
+    def type_descriptor(self, typeobj):
+        return sqltypes.adapt_type(typeobj, colspecs)
+
+    def last_inserted_ids(self):
+        return self.context.last_inserted_ids
+
+    def rowid_column_name(self):
+        """returns the ROWID column name for this engine."""
+        return "_rowid"
+
+    def tableimpl(self, table):
+        """returns a new sql.TableImpl object to correspond to the given Table object."""
+        return MySQLTableImpl(table)
+
+    def compiler(self, statement, bindparams, **kwargs):
+        return MySQLCompiler(self, statement, bindparams, **kwargs)
+
+    def schemagenerator(self, proxy, **params):
+        return MySQLSchemaGenerator(proxy, **params)
+
+    def get_default_schema_name(self):
+        if not hasattr(self, '_default_schema_name'):
+            self._default_schema_name = text("select database()", self).scalar()
+        return self._default_schema_name
+        
+    def last_inserted_ids(self):
+        return self.context.last_inserted_ids
+            
+    def post_exec(self, connection, cursor, statement, parameters, echo = None, compiled = None, **kwargs):
+        if compiled is None: return
+        if getattr(compiled, "isinsert", False):
+            self.context.last_inserted_ids = [cursor.lastrowid]
+
+    def _executemany(self, c, statement, parameters):
+        """we need accurate rowcounts for updates, inserts and deletes.  mysql is *also* is not nice enough
+        to produce this correctly for an executemany, so we do our own executemany here."""
+        rowcount = 0
+        for param in parameters:
+            c.execute(statement, param)
+            rowcount += c.rowcount
+        self.context.rowcount = rowcount
+
+    def dbapi(self):
+        return self.module
+
+    def reflecttable(self, table):
+        ischema.reflecttable(self, table, ischema_names, use_mysql=True)
+
+class MySQLTableImpl(sql.TableImpl):
+    """attached to a schema.Table to provide it with a Selectable interface
+    as well as other functions
+    """
+
+#    def __init__(self, table):
+ #       self.table = table
+  #      self.id = self.table.name
+
+    def _rowid_col(self):
+        if getattr(self, '_mysql_rowid_column', None) is None:
+            if len(self.table.primary_keys) > 0:
+                self._mysql_rowid_column = self.table.primary_keys[0]
+            else:
+                self._mysql_rowid_column = self.table.columns[self.table.columns.keys()[0]]
+        return self._mysql_rowid_column
+    rowid_column = property(lambda s: s._rowid_col())
+
+class MySQLCompiler(ansisql.ANSICompiler):
+    pass
+        
+class MySQLSchemaGenerator(ansisql.ANSISchemaGenerator):
+    def get_column_specification(self, column, override_pk=False, first_pk=False):
+        colspec = column.name + " " + column.type.get_col_spec()
+
+        if not column.nullable:
+            colspec += " NOT NULL"
+        if column.primary_key:
+            if not override_pk:
+                colspec += " PRIMARY KEY"
+            if first_pk:
+                colspec += " AUTO_INCREMENT"
+        if column.foreign_key:
+            colspec += " REFERENCES %s(%s)" % (column.column.foreign_key.column.table.name, column.column.foreign_key.column.name) 
+        return colspec
+
index 3fa14b01b05d6f7b078bb0edad50e376cecfd5f8..f1cea4288d86a6c1b7431ded557cc7dba35dc9dd 100644 (file)
@@ -196,7 +196,7 @@ class OracleCompiler(ansisql.ANSICompiler):
         return ansisql.ANSICompiler.visit_insert(self, insert)
 
 class OracleSchemaGenerator(ansisql.ANSISchemaGenerator):
-    def get_column_specification(self, column, override_pk=False):
+    def get_column_specification(self, column, override_pk=False, **kwargs):
         colspec = column.name
         colspec += " " + column.type.get_col_spec()
 
index 0a41fb4c8e95daacc124a503f402591b6e72b314..a25700934edaaa12e0ffb8bafd989b01401b8ff3 100644 (file)
@@ -23,6 +23,8 @@ import sqlalchemy.schema as schema
 import sqlalchemy.ansisql as ansisql
 import sqlalchemy.types as sqltypes
 from sqlalchemy.ansisql import *
+import sqlalchemy.databases.information_schema as ischema
+
 try:
     import psycopg2 as psycopg
 except:
@@ -90,39 +92,6 @@ pg2_ischema_names = {
 pg1_ischema_names = pg2_ischema_names.copy()
 pg1_ischema_names['timestamp without time zone'] = PG1DateTime
 
-generic_engine = ansisql.engine()
-gen_columns = schema.Table("columns", generic_engine,
-    Column("table_schema", String),
-    Column("table_name", String),
-    Column("column_name", String),
-    Column("is_nullable", Integer),
-    Column("data_type", String),
-    Column("ordinal_position", Integer),
-    Column("character_maximum_length", Integer),
-    Column("numeric_precision", Integer),
-    Column("numeric_scale", Integer),
-    schema="information_schema")
-    
-gen_constraints = schema.Table("table_constraints", generic_engine,
-    Column("table_schema", String),
-    Column("table_name", String),
-    Column("constraint_name", String),
-    Column("constraint_type", String),
-    schema="information_schema")
-
-gen_column_constraints = schema.Table("constraint_column_usage", generic_engine,
-    Column("table_schema", String),
-    Column("table_name", String),
-    Column("column_name", String),
-    Column("constraint_name", String),
-    schema="information_schema")
-
-gen_key_constraints = schema.Table("key_column_usage", generic_engine,
-    Column("table_schema", String),
-    Column("table_name", String),
-    Column("column_name", String),
-    Column("constraint_name", String),
-    schema="information_schema")
 
 def engine(opts, **params):
     return PGSQLEngine(opts, **params)
@@ -174,9 +143,6 @@ class PGSQLEngine(ansisql.ANSISQLEngine):
     def schemadropper(self, proxy, **params):
         return PGSchemaDropper(proxy, **params)
         
-    def reflecttable(self, table):
-        raise "not implemented"
-
     def get_default_schema_name(self):
         if not hasattr(self, '_default_schema_name'):
             self._default_schema_name = text("select current_schema()", self).scalar()
@@ -248,76 +214,12 @@ class PGSQLEngine(ansisql.ANSISQLEngine):
         return self.module
 
     def reflecttable(self, table):
-        columns = gen_columns.toengine(self)
-        constraints = gen_constraints.toengine(self)
-        column_constraints = gen_column_constraints.toengine(self)
-        key_constraints = gen_key_constraints.toengine(self)
-
-        if table.schema is not None:
-            current_schema = table.schema
+        if self.version == 2:
+            ischema_names = pg2_ischema_names
         else:
-            current_schema = self.get_default_schema_name()
-        
-        s = select([columns], 
-            sql.and_(columns.c.table_name==table.name,
-            columns.c.table_schema==current_schema),
-            order_by=[columns.c.ordinal_position])
-            
-        c = s.execute()
-        while True:
-            row = c.fetchone()
-            if row is None:
-                break
-            #print "row! " + repr(row)
-            (name, type, nullable, charlen, numericprec, numericscale) = (
-                row[columns.c.column_name], 
-                row[columns.c.data_type], 
-                row[columns.c.is_nullable] == 'YES', 
-                row[columns.c.character_maximum_length],
-                row[columns.c.numeric_precision],
-                row[columns.c.numeric_scale],
-                )
-
-            args = []
-            for a in (charlen, numericprec, numericscale):
-                if a is not None:
-                    args.append(a)
-            if self.version == 2:
-                coltype = pg2_ischema_names[type]
-            else:
-                coltype = pg1_ischema_names[type]
-            #print "coltype " + repr(coltype) + " args " +  repr(args)
-            coltype = coltype(*args)
-            table.append_item(schema.Column(name, coltype, nullable = nullable))
+            ischema_names = pg1_ischema_names
 
-        s = select([
-            constraints.c.constraint_type,
-            column_constraints,
-            key_constraints
-            ], 
-            sql.and_(
-                key_constraints.c.constraint_name==column_constraints.c.constraint_name,
-                column_constraints.c.constraint_name==constraints.c.constraint_name,
-                constraints.c.table_name==table.name, constraints.c.table_schema==current_schema)
-        , use_labels=True)
-        c = s.execute()
-        while True:
-            row = c.fetchone()
-            if row is None:
-                break
-            (type, constrained_column, referred_schema, referred_table, referred_column) = (
-                row[constraints.c.constraint_type],
-                row[key_constraints.c.column_name],
-                row[column_constraints.c.table_schema],
-                row[column_constraints.c.table_name],
-                row[column_constraints.c.column_name]
-            )
-            #print "type %s on column %s to remote %s.%s.%s" % (type, constrained_column, referred_schema, referred_table, referred_column) 
-            if type=='PRIMARY KEY':
-                table.c[constrained_column]._set_primary_key()
-            elif type=='FOREIGN KEY':
-                remotetable = Table(referred_table, self, autoload = True, schema=referred_schema)
-                table.c[constrained_column].foreign_key = schema.ForeignKey(remotetable.c[referred_column])
+        ischema.reflecttable(self, table, ischema_names)
 
 class PGCompiler(ansisql.ANSICompiler):
     def visit_insert(self, insert):
@@ -331,7 +233,7 @@ class PGCompiler(ansisql.ANSICompiler):
         return ansisql.ANSICompiler.visit_insert(self, insert)
         
 class PGSchemaGenerator(ansisql.ANSISchemaGenerator):
-    def get_column_specification(self, column, override_pk=False):
+    def get_column_specification(self, column, override_pk=False, **kwargs):
         colspec = column.name
         if column.primary_key and isinstance(column.type, types.Integer) and (column.sequence is None or column.sequence.optional):
             colspec += " SERIAL"
index 8f6bedff697eb7ba131b4384ec4142567774da2e..9e945227ebd83fc535dccbe4c9cd6946961f6a51 100644 (file)
@@ -162,20 +162,16 @@ class SQLiteSQLEngine(ansisql.ANSISQLEngine):
         for idx in unique_indexes:
             c = self.execute("PRAGMA index_info(" + idx + ")", {})
             cols = []
-            includes_primary=False
             while True:
                 row = c.fetchone()
                 if row is None:
                     break
                 cols.append(row[2])
                 col = table.columns[row[2]]
-                if col.primary_key:
-                    includes_primary= True
-            if includes_primary:
-                # unique index that includes the pk is considered a multiple primary key
-                for col in cols:
-                    column = table.columns[col]
-                    table.columns[col]._set_primary_key()
+            # unique index that includes the pk is considered a multiple primary key
+            for col in cols:
+                column = table.columns[col]
+                table.columns[col]._set_primary_key()
                     
 class SQLiteCompiler(ansisql.ANSICompiler):
     def __init__(self, *args, **params):
@@ -183,7 +179,7 @@ class SQLiteCompiler(ansisql.ANSICompiler):
         ansisql.ANSICompiler.__init__(self, *args, **params)
 
 class SQLiteSchemaGenerator(ansisql.ANSISchemaGenerator):
-    def get_column_specification(self, column, override_pk=False):
+    def get_column_specification(self, column, override_pk=False, **kwargs):
         colspec = column.name + " " + column.type.get_col_spec()
         if not column.nullable:
             colspec += " NOT NULL"
@@ -193,20 +189,17 @@ class SQLiteSchemaGenerator(ansisql.ANSISchemaGenerator):
             colspec += " REFERENCES %s(%s)" % (column.foreign_key.column.table.name, column.foreign_key.column.name) 
         return colspec
     def visit_table(self, table):
-        """sqlite is going to create multi-primary keys as a single PK plus a UNIQUE index.  otherwise
-        its autoincrement functionality seems to get lost"""
+        """sqlite is going to create multi-primary keys with just a UNIQUE index."""
         self.append("\nCREATE TABLE " + table.fullname + "(")
 
         separator = "\n"
 
         have_pk = False
+        use_pks = len(table.primary_keys) == 1
         for column in table.columns:
             self.append(separator)
             separator = ", \n"
-            # specify PRIMARY KEY for just the first primary key
-            self.append("\t" + self.get_column_specification(column, override_pk=have_pk))
-            if column.primary_key:
-                have_pk = True
+            self.append("\t" + self.get_column_specification(column, override_pk=not use_pks))
                 
         if len(table.primary_keys) > 1:
             self.append(", \n")
index 30a2bceed1be406796606f715feb9349999fcd07..983ce70848651b538f77a6a6d93fdbbf3f0e6082 100644 (file)
@@ -63,13 +63,14 @@ class TableSingleton(type):
             autoload = kwargs.pop('autoload', False)
             redefine = kwargs.pop('redefine', False)
             mustexist = kwargs.pop('mustexist', False)
+            useexisting = kwargs.pop('useexisting', False)
             key = _get_table_key(engine, name, schema)
             table = engine.tables[key]
             if len(args):
                 if redefine:
                     table.reload_values(*args)
-                else:
-                    raise "Table '%s.%s' is already defined. specify 'redefine=True' to remap columns" % (schema, name)
+                elif not useexisting:
+                    raise "Table '%s.%s' is already defined. specify 'redefine=True' to remap columns, or 'useexisting=True' to use the existing table" % (schema, name)
             return table
         except KeyError:
             if mustexist:
index 85d87b10afe03b203a57c7e46c73cd8fb330f25d..54b604930a9d727a72d343c4666c0453a4658e74 100644 (file)
@@ -328,7 +328,7 @@ class CompareMixin(object):
         elif len(other) == 1 and not isinstance(other[0], Selectable):
             return self.__eq__(other[0])
         elif _is_literal(other[0]):
-            return self._compare('IN', CompoundClause(',', other))
+            return self._compare('IN', CompoundClause(',', spaces=False, parens=True, *other))
         else:
             return self._compare('IN', union(*other))
 
@@ -444,10 +444,11 @@ class Null(ClauseElement):
     
 class CompoundClause(ClauseElement):
     """represents a list of clauses joined by an operator"""
-    def __init__(self, operator, *clauses):
+    def __init__(self, operator, *clauses, **kwargs):
         self.operator = operator
         self.clauses = []
-        self.parens = False
+        self.parens = kwargs.pop('parens', False)
+        self.spaces = kwargs.pop('spaces', False)
         for c in clauses:
             if c is None: continue
             self.append(c)
@@ -458,7 +459,7 @@ class CompoundClause(ClauseElement):
         
     def append(self, clause):
         if _is_literal(clause):
-            clause = TextClause(str(clause))
+            clause = TextClause(repr(clause))
         elif isinstance(clause, CompoundClause):
             clause.parens = True
         self.clauses.append(clause)
@@ -682,9 +683,11 @@ class TableImpl(Selectable):
     def __init__(self, table):
         self.table = table
         self.id = self.table.name
-        self.rowid_column = schema.Column(self.table.engine.rowid_column_name(), types.Integer, hidden=True)
-        self.rowid_column._set_parent(table)
-        
+        self._rowid_column = schema.Column(self.table.engine.rowid_column_name(), types.Integer, hidden=True)
+        self._rowid_column._set_parent(table)
+    
+    rowid_column = property(lambda s: s._rowid_column)
+    
     def get_from_text(self):
         return self.table.name
     
index b5a89b35c7fc173b1b83fb2b086abd6da1808a90..317ad0e1f425c5cae16092b909455628691632c8 100644 (file)
@@ -80,9 +80,9 @@ class EngineTest(PersistTest):
             ]
         )
         table.create()
-        table.insert().execute({'multi_rev':1,'name':'row1', 'value':'value1'})
-        table.insert().execute({'multi_rev':18,'name':'row2', 'value':'value2'})
-        table.insert().execute({'multi_rev':3,'name':'row3', 'value':'value3'})
+        table.insert().execute({'multi_id':1,'multi_rev':1,'name':'row1', 'value':'value1'})
+        table.insert().execute({'multi_id':2,'multi_rev':18,'name':'row2', 'value':'value2'})
+        table.insert().execute({'multi_id':3,'multi_rev':3,'name':'row3', 'value':'value3'})
         table.select().execute().fetchall()
         table.drop()
         
index d8f84e7d651c9144db0261b7aaad59f1b6760190..51bb27c2044ef503c8a3d2f44f692d31199f044a 100644 (file)
@@ -183,7 +183,6 @@ class LazyTest(MapperSuperTest):
             {'item_id' : 4, 'keywords' : (Keyword, [])},
             {'item_id' : 5, 'keywords' : (Keyword, [])}
         )
-
         l = Item.mapper.select(and_(keywords.c.name == 'red', keywords.c.keyword_id == itemkeywords.c.keyword_id, Item.c.item_id==itemkeywords.c.item_id))
         self.assert_result(l, Item, 
             {'item_id' : 1, 'keywords' : (Keyword, [{'keyword_id' : 2}, {'keyword_id' : 4}, {'keyword_id' : 6}])},