]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
working on pyodbc / mxodbc
authorMike Bayer <mike_mp@zzzcomputing.com>
Sat, 27 Feb 2010 20:03:33 +0000 (20:03 +0000)
committerMike Bayer <mike_mp@zzzcomputing.com>
Sat, 27 Feb 2010 20:03:33 +0000 (20:03 +0000)
15 files changed:
CHANGES
lib/sqlalchemy/connectors/mxodbc.py
lib/sqlalchemy/connectors/pyodbc.py
lib/sqlalchemy/dialects/mssql/base.py
lib/sqlalchemy/dialects/mssql/mxodbc.py
lib/sqlalchemy/dialects/mssql/pyodbc.py
lib/sqlalchemy/engine/default.py
lib/sqlalchemy/sql/compiler.py
lib/sqlalchemy/test/requires.py
test/dialect/test_mssql.py
test/engine/test_reflection.py
test/sql/test_query.py
test/sql/test_returning.py
test/sql/test_rowcount.py
test/sql/test_types.py

diff --git a/CHANGES b/CHANGES
index df49116bc045ce8fad0381e7267113083ee4db29..7365f29a37de0a8d4c62549b3847bb03776ccb02 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -255,6 +255,10 @@ CHANGES
 - mssql
   - Re-established initial support for pymssql.
 
+  - Various fixes for implicit returning, reflection,
+    etc. - the MS-SQL dialects aren't quite complete
+    in 0.6 yet (but are close)
+  
   - Added basic support for mxODBC [ticket:1710].
   
   - Removed the text_as_varchar option.
index 93e323fb93c35379880c3693c2c4c49a40be7897..ef7852f61bc0e09045144cd6324823b55f0d1f9b 100644 (file)
@@ -1,5 +1,7 @@
 
 import sys
+import re
+
 from sqlalchemy.connectors import Connector
 
 class MxODBCConnector(Connector):
@@ -8,7 +10,8 @@ class MxODBCConnector(Connector):
     supports_sane_multi_rowcount = False
     supports_unicode_statements = False
     supports_unicode_binds = False
-
+    supports_native_decimal = False
+    
     @classmethod
     def dbapi(cls):
         platform = sys.platform
@@ -61,4 +64,14 @@ class MxODBCConnector(Connector):
         else:
             return False
 
-
+    def _get_server_version_info(self, connection):
+        dbapi_con = connection.connection
+        version = []
+        r = re.compile('[.\-]')
+        # 18 == pyodbc.SQL_DBMS_VER
+        for n in r.split(dbapi_con.getinfo(18)[1]):
+            try:
+                version.append(int(n))
+            except ValueError:
+                version.append(n)
+        return tuple(version)
index 46b0556d563d50999d5041af8216e34b070530cb..6abdbf0ddc11726aa2904cd8e5537c7242ef2e64 100644 (file)
@@ -12,6 +12,7 @@ class PyODBCConnector(Connector):
     # PyODBC unicode is broken on UCS-4 builds
     supports_unicode = sys.maxunicode == 65535
     supports_unicode_statements = supports_unicode
+    supports_native_decimal = True
     default_paramstyle = 'named'
     
     # for non-DSN connections, this should
index 1ce3cbde8c0bdeee596314ff182ceeffe6df4b11..dc767882b82286b459c362157e30b56f2c813f25 100644 (file)
@@ -279,13 +279,17 @@ RESERVED_WORDS = set(
 class _MSNumeric(sqltypes.Numeric):
     def result_processor(self, dialect, coltype):
         if self.asdecimal:
-            return processors.to_decimal_processor_factory(decimal.Decimal)
+            if getattr(self, 'scale', None) is not None and dialect.supports_native_decimal:
+                return None
+            else:
+                return processors.to_decimal_processor_factory(decimal.Decimal)
         else:
             #XXX: if the DBAPI returns a float (this is likely, given the
             # processor when asdecimal is True), this should be a None
             # processor instead.
             return processors.to_float
-
+            return None
+            
     def bind_processor(self, dialect):
         def process(value):
             if isinstance(value, decimal.Decimal):
@@ -797,14 +801,18 @@ class MSExecutionContext(default.DefaultExecutionContext):
             else:
                 self.cursor.execute("SELECT @@identity AS lastrowid")
             # fetchall() ensures the cursor is consumed without closing it
-            row = self.cursor.fetchall()[0]   
+            row = self.cursor.fetchall()[0]
             self._lastrowid = int(row[0])
 
         if (self.isinsert or self.isupdate or self.isdelete) and self.compiled.returning:
             self._result_proxy = base.FullyBufferedResultProxy(self)
             
         if self._enable_identity_insert:
-            self.cursor.execute("SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer.format_table(self.compiled.statement.table))
+            self.cursor.execute(
+                        "SET IDENTITY_INSERT %s OFF" %  
+                                self.dialect.identifier_preparer.
+                                    format_table(self.compiled.statement.table)
+                        )
         
     def get_lastrowid(self):
         return self._lastrowid
@@ -1080,7 +1088,7 @@ class MSDialect(default.DefaultDialect):
                  query_timeout=None,
                  use_scope_identity=True,
                  max_identifier_length=None,
-                 schema_name="dbo", **opts):
+                 schema_name=u"dbo", **opts):
         self.query_timeout = int(query_timeout or 0)
         self.schema_name = schema_name
 
@@ -1099,7 +1107,8 @@ class MSDialect(default.DefaultDialect):
     
     def initialize(self, connection):
         super(MSDialect, self).initialize(connection)
-        if self.server_version_info >= MS_2005_VERSION and 'implicit_returning' not in self.__dict__:
+        if self.server_version_info >= MS_2005_VERSION and \
+                    'implicit_returning' not in self.__dict__:
             self.implicit_returning = True
         
     def _get_default_schema_name(self, connection):
@@ -1115,7 +1124,7 @@ class MSDialect(default.DefaultDialect):
             try:
                 default_schema_name = connection.scalar(query, [user_name])
                 if default_schema_name is not None:
-                    return default_schema_name
+                    return unicode(default_schema_name)
             except:
                 pass
         return self.schema_name
@@ -1282,34 +1291,34 @@ class MSDialect(default.DefaultDialect):
                                     name='%s_identity' % col_name)
                 break
         cursor.close()
-        if ic is not None:
-            try:
-                # is this table_fullname reliable?
-                table_fullname = "%s.%s" % (current_schema, tablename)
-                cursor = connection.execute(
-                    sql.text("select ident_seed(:seed), ident_incr(:incr)"), 
-                    {'seed':table_fullname, 'incr':table_fullname}
+
+        if ic is not None and self.server_version_info >= MS_2005_VERSION:
+            table_fullname = "%s.%s" % (current_schema, tablename)
+            cursor = connection.execute(
+                sql.text("select ident_seed(:tname), ident_incr(:tname)",
+                    bindparams=[
+                                    sql.bindparam('tname', table_fullname)
+                    ]
                 )
-                row = cursor.first()
-                if not row is None:
-                    colmap[ic]['sequence'].update({
-                        'start' : int(row[0]),
-                        'increment' : int(row[1])
-                    })
-            except:
-                # ignoring it, works just like before
-                pass
+            )
+            row = cursor.first()
+            if not row is None:
+                colmap[ic]['sequence'].update({
+                    'start' : int(row[0]),
+                    'increment' : int(row[1])
+                })
         return cols
 
     @reflection.cache
     def get_primary_keys(self, connection, tablename, schema=None, **kw):
         current_schema = schema or self.default_schema_name
         pkeys = []
-        # Add constraints
-        RR = ischema.ref_constraints    #information_schema.referential_constraints
-        TC = ischema.constraints        #information_schema.table_constraints
-        C  = ischema.key_constraints.alias('C') #information_schema.constraint_column_usage: the constrained column
-        R  = ischema.key_constraints.alias('R') #information_schema.constraint_column_usage: the referenced column
+        RR = ischema.ref_constraints    # information_schema.referential_constraints
+        TC = ischema.constraints        # information_schema.table_constraints
+        C  = ischema.key_constraints.alias('C') # information_schema.constraint_column_usage: 
+                                                # the constrained column
+        R  = ischema.key_constraints.alias('R') # information_schema.constraint_column_usage: 
+                                                # the referenced column
 
         # Primary key constraints
         s = sql.select([C.c.column_name, TC.c.constraint_type],
@@ -1329,13 +1338,16 @@ class MSDialect(default.DefaultDialect):
         # Add constraints
         RR = ischema.ref_constraints    #information_schema.referential_constraints
         TC = ischema.constraints        #information_schema.table_constraints
-        C  = ischema.key_constraints.alias('C') #information_schema.constraint_column_usage: the constrained column
-        R  = ischema.key_constraints.alias('R') #information_schema.constraint_column_usage: the referenced column
+        C  = ischema.key_constraints.alias('C') # information_schema.constraint_column_usage: 
+                                                # the constrained column
+        R  = ischema.key_constraints.alias('R') # information_schema.constraint_column_usage: 
+                                                # the referenced column
 
         # Foreign key constraints
         s = sql.select([C.c.column_name,
                         R.c.table_schema, R.c.table_name, R.c.column_name,
-                        RR.c.constraint_name, RR.c.match_option, RR.c.update_rule, RR.c.delete_rule],
+                        RR.c.constraint_name, RR.c.match_option, RR.c.update_rule,
+                        RR.c.delete_rule],
                        sql.and_(C.c.table_name == tablename,
                                 C.c.table_schema == current_schema,
                                 C.c.constraint_name == RR.c.constraint_name,
@@ -1378,6 +1390,3 @@ class MSDialect(default.DefaultDialect):
 
         return fkeys.values()
 
-
-# fixme.  I added this for the tests to run. -Randall
-MSSQLDialect = MSDialect
index bbaccd328971375be0afb257165512d93b9a812e..38d559e2ba819e90f4d5df36ef19e5212db36790 100644 (file)
@@ -10,7 +10,8 @@ from sqlalchemy.dialects.mssql.pyodbc import MSExecutionContext_pyodbc
 MSExecutionContext_mxodbc = MSExecutionContext_pyodbc
 
 class MSDialect_mxodbc(MxODBCConnector, MSDialect):
-    supports_sane_rowcount = True
+    # FIXME: yikes, plain rowcount doesn't work ?
+    supports_sane_rowcount = False #True
     supports_sane_multi_rowcount = False
 
     execution_ctx_cls = MSExecutionContext_mxodbc
index 9a2a9e4e782c7c4e3446b82654016a6f8a6a49a5..23ab0320cb0d8bd06943073c7a3673eb8458e4f2 100644 (file)
@@ -35,7 +35,8 @@ class MSExecutionContext_pyodbc(MSExecutionContext):
             # We may have to skip over a number of result sets with no data (due to triggers, etc.)
             while True:
                 try:
-                    # fetchall() ensures the cursor is consumed without closing it (FreeTDS particularly)
+                    # fetchall() ensures the cursor is consumed 
+                    # without closing it (FreeTDS particularly)
                     row = self.cursor.fetchall()[0]  
                     break
                 except self.dialect.dbapi.Error, e:
index 74562a70e3f8944516fabe9d9db051e7592141e8..ac933bdf423e3cb14491f99cde088fc86ed0040c 100644 (file)
@@ -39,6 +39,11 @@ class DefaultDialect(base.Dialect):
     supports_native_enum = False
     supports_native_boolean = False
     
+    # if the NUMERIC type
+    # returns decimal.Decimal.
+    # *not* the FLOAT type however.
+    supports_native_decimal = False
+    
     # Py3K
     #supports_unicode_statements = True
     #supports_unicode_binds = True
index 32aa2a9920674ae795426a23048e467420549bb1..60f74e923f04fa1d12d431f0db4b730a738e2f85 100644 (file)
@@ -852,6 +852,7 @@ class SQLCompiler(engine.Compiled):
                 if c.primary_key and \
                     need_pks and \
                     (
+                        implicit_returning or 
                         not postfetch_lastrowid or 
                         c is not stmt.table._autoincrement_column
                     ):
index c97e6f5bbbedcbcf1629be701b2d777bc6b803eb..0bf4689dfe49d59ddbadbb3bcbbe83777bb47363 100644 (file)
@@ -190,6 +190,12 @@ def unicode_ddl(fn):
         exclude('mysql', '<', (4, 1, 1), 'no unicode connection support'),
         )
 
+def sane_rowcount(fn):
+    return _chain_decorators_on(
+        fn,
+        skip_if(lambda: not testing.db.dialect.supports_sane_rowcount)
+    )
+    
 def python2(fn):
     return _chain_decorators_on(
         fn,
index aa5ecf8cdcfe67b2714635510c6ee0e2ee5105ee..caf71ab1031cd5ad12ed9958fb033b35a5f8c0cf 100644 (file)
@@ -214,7 +214,7 @@ class CompileTest(TestBase, AssertsCompiledSQL):
 
 class IdentityInsertTest(TestBase, AssertsCompiledSQL):
     __only_on__ = 'mssql'
-    __dialect__ = mssql.MSSQLDialect()
+    __dialect__ = mssql.MSDialect()
 
     @classmethod
     def setup_class(cls):
@@ -322,7 +322,8 @@ class ReflectionTest(TestBase, ComparesTables):
         meta2 = MetaData(testing.db)
         try:
             table2 = Table('identity_test', meta2, autoload=True)
-            sequence = isinstance(table2.c['col1'].default, schema.Sequence) and table2.c['col1'].default
+            sequence = isinstance(table2.c['col1'].default, schema.Sequence) \
+                                    and table2.c['col1'].default
             assert sequence.start == 2
             assert sequence.increment == 3
         finally:
@@ -704,7 +705,8 @@ class TypesTest(TestBase, AssertsExecutionResults, ComparesTables):
                       '0.0000000000000000002', '0.2', '-0.0000000000000000002', '-2E-2',
                       '156666.458923543', '-156666.458923543', '1', '-1', '-1234', '1234',
                       '2E-12', '4E8', '3E-6', '3E-7', '4.1', '1E-1', '1E-2', '1E-3',
-                      '1E-4', '1E-5', '1E-6', '1E-7', '1E-1', '1E-8', '0.2732E2', '-0.2432E2', '4.35656E2',
+                      '1E-4', '1E-5', '1E-6', '1E-7', '1E-1', '1E-8', '0.2732E2', 
+                      '-0.2432E2', '4.35656E2',
                       '-02452E-2', '45125E-2',
                       '1234.58965E-2', '1.521E+15', '-1E-25', '1E-25', '1254E-25', '-1203E-25',
                       '0', '-0.00', '-0', '4585E12', '000000000000000000012', '000000000000.32E12',
@@ -714,7 +716,7 @@ class TypesTest(TestBase, AssertsExecutionResults, ComparesTables):
             numeric_table.insert().execute(numericcol=value)
 
         for value in select([numeric_table.c.numericcol]).execute():
-            assert value[0] in test_items, "%s not in test_items" % value[0]
+            assert value[0] in test_items, "%r not in test_items" % value[0]
 
     def test_float(self):
         float_table = Table('float_table', metadata,
@@ -1071,16 +1073,17 @@ class TypesTest(TestBase, AssertsExecutionResults, ComparesTables):
         testing.eq_(gen.get_column_specification(t.c.t), "t %s" % expected)
         self.assert_(repr(t.c.t))
         t.create(checkfirst=True)
-        
+    
+    @testing.crashes("+mxodbc", "mxODBC doesn't do scope_identity() with DEFAULT VALUES")
     def test_autoincrement(self):
         Table('ai_1', metadata,
                Column('int_y', Integer, primary_key=True),
                Column('int_n', Integer, DefaultClause('0'),
-                      primary_key=True))
+                      primary_key=True, autoincrement=False))
         Table('ai_2', metadata,
                Column('int_y', Integer, primary_key=True),
                Column('int_n', Integer, DefaultClause('0'),
-                      primary_key=True))
+                      primary_key=True, autoincrement=False))
         Table('ai_3', metadata,
                Column('int_n', Integer, DefaultClause('0'),
                       primary_key=True, autoincrement=False),
@@ -1117,11 +1120,14 @@ class TypesTest(TestBase, AssertsExecutionResults, ComparesTables):
 
         for name in table_names:
             tbl = Table(name, mr, autoload=True)
+            tbl = metadata.tables[name]
             for c in tbl.c:
                 if c.name.startswith('int_y'):
-                    assert c.autoincrement
+                    assert c.autoincrement, name
+                    assert tbl._autoincrement_column is c, name
                 elif c.name.startswith('int_n'):
-                    assert not c.autoincrement
+                    assert not c.autoincrement, name
+                    assert tbl._autoincrement_column is not c, name
             
             for counter, engine in enumerate([
                 engines.testing_engine(options={'implicit_returning':False}),
index 5d3f0ca86c8e6299152db33761932f23107c1691..bf94cce657d345cb5fb9e0ff290ac95bc8d1a8ee 100644 (file)
@@ -804,12 +804,13 @@ class UnicodeReflectionTest(TestBase):
             metadata = MetaData(bind)
 
             if testing.against('sybase', 'maxdb', 'oracle', 'mssql'):
-                names = set(['plain'])
+                names = set([u'plain'])
             else:
                 names = set([u'plain', u'Unit\u00e9ble', u'\u6e2c\u8a66'])
 
             for name in names:
-                Table(name, metadata, Column('id', sa.Integer, sa.Sequence(name + "_id_seq"), primary_key=True))
+                Table(name, metadata, Column('id', sa.Integer, sa.Sequence(name + "_id_seq"),
+                                        primary_key=True))
             metadata.create_all()
 
             reflected = set(bind.table_names())
index d7bca1af49a1885b0a1539ad94575627c4d18bb6..62da772a4c855b442336cabe04331748a8d22215 100644 (file)
@@ -78,6 +78,13 @@ class QueryTest(TestBase):
             detects rows that had defaults and post-fetches.
             """
 
+            # verify implicit_returning is working
+            if engine.dialect.implicit_returning:
+                ins = table.insert()
+                comp = ins.compile(engine, column_keys=list(values))
+                if not set(values).issuperset(c.key for c in table.primary_key):
+                    assert comp.returning
+            
             result = engine.execute(table.insert(), **values)
             ret = values.copy()
             
@@ -85,13 +92,17 @@ class QueryTest(TestBase):
                 ret[col.key] = id
 
             if result.lastrow_has_defaults():
-                criterion = and_(*[col==id for col, id in zip(table.primary_key, result.inserted_primary_key)])
+                criterion = and_(*[col==id for col, id in 
+                                    zip(table.primary_key, result.inserted_primary_key)])
                 row = engine.execute(table.select(criterion)).first()
                 for c in table.c:
                     ret[c.key] = row[c]
             return ret
 
         if testing.against('firebird', 'postgresql', 'oracle', 'mssql'):
+            assert testing.db.dialect.implicit_returning
+            
+        if testing.db.dialect.implicit_returning:
             test_engines = [
                 engines.testing_engine(options={'implicit_returning':False}),
                 engines.testing_engine(options={'implicit_returning':True}),
index a36ce3cd88bd526947a0c75d002e3c93f3a2f7e5..481eba8253acbf7a4cdd22c50b1105722778ba51 100644 (file)
@@ -95,6 +95,7 @@ class ReturningTest(TestBase, AssertsExecutionResults):
 
         @testing.fails_on('postgresql', '')
         @testing.fails_on('oracle', '')
+        @testing.crashes('mssql+mxodbc', 'Raises an error')
         def test_executemany():
             # return value is documented as failing with psycopg2/executemany
             result2 = table.insert().returning(table).execute(
@@ -112,8 +113,6 @@ class ReturningTest(TestBase, AssertsExecutionResults):
 
         test_executemany()
 
-        result3 = table.insert().returning(table.c.id).execute({'persons': 4, 'full': False})
-        eq_([dict(row) for row in result3], [{'id': 4}])
     
         
     @testing.exclude('firebird', '<', (2, 1), '2.1+ feature')
index 82301a4a5c84d068b8a9a3e31648ef49fe32c0d6..6da25b9145cfb36b8a6a408da4e2adde6b548c10 100644 (file)
@@ -4,6 +4,9 @@ from sqlalchemy.test import *
 
 class FoundRowsTest(TestBase, AssertsExecutionResults):
     """tests rowcount functionality"""
+    
+    __requires__ = ('sane_rowcount', )
+    
     @classmethod
     def setup_class(cls):
         metadata = MetaData(testing.db)
@@ -11,7 +14,9 @@ class FoundRowsTest(TestBase, AssertsExecutionResults):
         global employees_table
 
         employees_table = Table('employees', metadata,
-            Column('employee_id', Integer, Sequence('employee_id_seq', optional=True), primary_key=True),
+            Column('employee_id', Integer, 
+                        Sequence('employee_id_seq', optional=True), 
+                        primary_key=True),
             Column('name', String(50)),
             Column('department', String(1)),
         )
index 53f4d8d919e85f729e0df796825ae300ffea4ea6..29b337eda5ebb995a1480588fd40766517118ced 100644 (file)
@@ -112,7 +112,7 @@ class PickleMetadataTest(TestBase):
 class UserDefinedTest(TestBase):
     """tests user-defined types."""
 
-    def testprocessing(self):
+    def test_processing(self):
 
         global users
         users.insert().execute(
@@ -132,7 +132,7 @@ class UserDefinedTest(TestBase):
             [1800, 2250, 1350],
             l
         ):
-            for col in row[1:5]:
+            for col in list(row)[1:5]:
                 eq_(col, assertstr)
             eq_(row[5], assertint)
             eq_(row[6], assertint2)
@@ -1113,7 +1113,7 @@ class BooleanTest(TestBase, AssertsExecutionResults):
         global bool_table
         metadata = MetaData(testing.db)
         bool_table = Table('booltest', metadata,
-            Column('id', Integer, primary_key=True),
+            Column('id', Integer, primary_key=True, autoincrement=False),
             Column('value', Boolean),
             Column('unconstrained_value', Boolean(create_constraint=False)),
             )
@@ -1156,6 +1156,8 @@ class BooleanTest(TestBase, AssertsExecutionResults):
     
     @testing.fails_on('mysql', 
             "The CHECK clause is parsed but ignored by all storage engines.")
+    @testing.fails_on('mssql', 
+            "FIXME: MS-SQL 2005 doesn't honor CHECK ?!?")
     @testing.skip_if(lambda: testing.db.dialect.supports_native_boolean)
     def test_constraint(self):
         assert_raises((exc.IntegrityError, exc.ProgrammingError),