]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
Add SQL Server CI coverage
authorMike Bayer <mike_mp@zzzcomputing.com>
Tue, 29 Aug 2017 16:36:54 +0000 (12:36 -0400)
committerMike Bayer <mike_mp@zzzcomputing.com>
Thu, 31 Aug 2017 21:20:26 +0000 (17:20 -0400)
Change-Id: Ida0d01ae9bcc0573b86e24fddea620a38c962822

36 files changed:
doc/build/changelog/unreleased_12/pymssql_sane_rowcount.rst [new file with mode: 0644]
lib/sqlalchemy/connectors/pyodbc.py
lib/sqlalchemy/dialects/mssql/base.py
lib/sqlalchemy/dialects/mssql/information_schema.py
lib/sqlalchemy/dialects/mssql/pymssql.py
lib/sqlalchemy/dialects/mssql/pyodbc.py
lib/sqlalchemy/dialects/mysql/base.py
lib/sqlalchemy/testing/provision.py
lib/sqlalchemy/testing/requirements.py
lib/sqlalchemy/testing/suite/test_select.py
reap_dbs.py [moved from reap_oracle_dbs.py with 59% similarity]
test/aaa_profiling/test_memusage.py
test/dialect/mssql/test_query.py
test/dialect/mssql/test_reflection.py
test/dialect/mssql/test_types.py
test/engine/test_execute.py
test/orm/inheritance/test_assorted_poly.py
test/orm/inheritance/test_basic.py
test/orm/inheritance/test_polymorphic_rel.py
test/orm/test_assorted_eager.py
test/orm/test_composites.py
test/orm/test_froms.py
test/orm/test_hasparent.py
test/orm/test_manytomany.py
test/orm/test_query.py
test/orm/test_transaction.py
test/orm/test_unitofwork.py
test/orm/test_unitofworkv2.py
test/orm/test_versioning.py
test/requirements.py
test/sql/test_defaults.py
test/sql/test_insert_exec.py
test/sql/test_query.py
test/sql/test_resultset.py
test/sql/test_types.py
tox.ini

diff --git a/doc/build/changelog/unreleased_12/pymssql_sane_rowcount.rst b/doc/build/changelog/unreleased_12/pymssql_sane_rowcount.rst
new file mode 100644 (file)
index 0000000..9b4df2d
--- /dev/null
@@ -0,0 +1,8 @@
+.. change::
+    :tags: bug, mssql, orm
+
+    Enabled the "sane_rowcount" flag for the pymssql dialect, indicating
+    that the DBAPI now reports the correct number of rows affected from
+    an UPDATE or DELETE statement.  This impacts mostly the ORM versioning
+    feature in that it now can verify the number of rows affected on a
+    target version.
\ No newline at end of file
index 66acf007252a476f6276c2f5a93d5ed0f9790f64..b95b2964f4bbd2fdaaa7bfc52a1dd3c87c36eef9 100644 (file)
@@ -9,7 +9,6 @@ from . import Connector
 from .. import util
 
 
-import sys
 import re
 
 
@@ -19,10 +18,8 @@ class PyODBCConnector(Connector):
     supports_sane_rowcount_returning = False
     supports_sane_multi_rowcount = False
 
-    if util.py2k:
-        # PyODBC unicode is broken on UCS-4 builds
-        supports_unicode = sys.maxunicode == 65535
-        supports_unicode_statements = supports_unicode
+    supports_unicode_statements = True
+    supports_unicode_binds = True
 
     supports_native_decimal = True
     default_paramstyle = 'named'
@@ -31,21 +28,10 @@ class PyODBCConnector(Connector):
     # hold the desired driver name
     pyodbc_driver_name = None
 
-    # will be set to True after initialize()
-    # if the freetds.so is detected
-    freetds = False
-
-    # will be set to the string version of
-    # the FreeTDS driver if freetds is detected
-    freetds_driver_version = None
-
-    # will be set to True after initialize()
-    # if the libessqlsrv.so is detected
-    easysoft = False
-
     def __init__(self, supports_unicode_binds=None, **kw):
         super(PyODBCConnector, self).__init__(**kw)
-        self._user_supports_unicode_binds = supports_unicode_binds
+        if supports_unicode_binds is not None:
+            self.supports_unicode_binds = supports_unicode_binds
 
     @classmethod
     def dbapi(cls):
@@ -130,40 +116,8 @@ class PyODBCConnector(Connector):
         else:
             return False
 
-    def initialize(self, connection):
-        # determine FreeTDS first.   can't issue SQL easily
-        # without getting unicode_statements/binds set up.
-
-        pyodbc = self.dbapi
-
-        dbapi_con = connection.connection
-
-        _sql_driver_name = dbapi_con.getinfo(pyodbc.SQL_DRIVER_NAME)
-        self.freetds = bool(re.match(r".*libtdsodbc.*\.so", _sql_driver_name
-                                     ))
-        self.easysoft = bool(re.match(r".*libessqlsrv.*\.so", _sql_driver_name
-                                      ))
-
-        if self.freetds:
-            self.freetds_driver_version = dbapi_con.getinfo(
-                pyodbc.SQL_DRIVER_VER)
-
-        self.supports_unicode_statements = (
-            not util.py2k or
-            (not self.freetds and not self.easysoft)
-        )
-
-        if self._user_supports_unicode_binds is not None:
-            self.supports_unicode_binds = self._user_supports_unicode_binds
-        elif util.py2k:
-            self.supports_unicode_binds = (
-                not self.freetds or self.freetds_driver_version >= '0.91'
-            ) and not self.easysoft
-        else:
-            self.supports_unicode_binds = True
-
-        # run other initialization which asks for user name, etc.
-        super(PyODBCConnector, self).initialize(connection)
+    # def initialize(self, connection):
+    #   super(PyODBCConnector, self).initialize(connection)
 
     def _dbapi_version(self):
         if not self.dbapi:
index 84be8d0e3c8c39a855bb8f4801e4d33cabe9d2cc..5f936fd765f6759523e72fb75ef4b4200afad0e0 100644 (file)
@@ -560,17 +560,20 @@ This option can also be specified engine-wide using the
 Rowcount Support / ORM Versioning
 ---------------------------------
 
-The SQL Server drivers have very limited ability to return the number
-of rows updated from an UPDATE or DELETE statement.  In particular, the
-pymssql driver has no support, whereas the pyodbc driver can only return
-this value under certain conditions.
-
-In particular, updated rowcount is not available when OUTPUT INSERTED
-is used.  This impacts the SQLAlchemy ORM's versioning feature when
-server-side versioning schemes are used.  When
-using pyodbc, the "implicit_returning" flag needs to be set to false
-for any ORM mapped class that uses a version_id column in conjunction with
-a server-side version generator::
+The SQL Server drivers may have limited ability to return the number
+of rows updated from an UPDATE or DELETE statement.
+
+As of this writing, the PyODBC driver is not able to return a rowcount when
+OUTPUT INSERTED is used.  This impacts the SQLAlchemy ORM's versioning feature
+in many cases where server-side value generators are in use in that while the
+versioning operations can succeed, the ORM cannot always check that an UPDATE
+or DELETE statement matched the number of rows expected, which is how it
+verifies that the version identifier matched.   When this condition occurs, a
+warning will be emitted but the operation will proceed.
+
+The use of OUTPUT INSERTED can be disabled by setting the
+:paramref:`.Table.implicit_returning` flag to ``False`` on a particular
+:class:`.Table`, which in declarative looks like::
 
     class MyTable(Base):
         __tablename__ = 'mytable'
@@ -585,14 +588,10 @@ a server-side version generator::
             'implicit_returning': False
         }
 
-Without the implicit_returning flag above, the UPDATE statement will
-use ``OUTPUT inserted.timestamp`` and the rowcount will be returned as
--1, causing the versioning logic to fail.
-
 Enabling Snapshot Isolation
 ---------------------------
 
-Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
+SQL Server has a default transaction
 isolation mode that locks entire tables, and causes even mildly concurrent
 applications to have long held locks and frequent deadlocks.
 Enabling snapshot isolation for the database as a whole is recommended
@@ -606,12 +605,6 @@ following ALTER DATABASE commands executed at the SQL prompt::
 Background on SQL Server snapshot isolation is available at
 http://msdn.microsoft.com/en-us/library/ms175095.aspx.
 
-Known Issues
-------------
-
-* No support for more than one ``IDENTITY`` column per table
-* reflection of indexes does not work with versions older than
-  SQL Server 2005
 
 """
 import datetime
index 625479be7412d4c4afa778d11c8e7915e52f8a3b..a73dbdfad773908d49dcb242478db697c964f4fa 100644 (file)
@@ -38,7 +38,8 @@ class _cast_on_2005(expression.ColumnElement):
 @compiles(_cast_on_2005)
 def _compile(element, compiler, **kw):
     from . import base
-    if compiler.dialect.server_version_info < base.MS_2005_VERSION:
+    if compiler.dialect.server_version_info is None or \
+             compiler.dialect.server_version_info < base.MS_2005_VERSION:
         return compiler.process(element.bindvalue, **kw)
     else:
         return compiler.process(cast(element.bindvalue, Unicode), **kw)
index 51237990e90dc581a283f55067168ab80e8f8c5b..d9a2d59d08433fcb696c5445bd8715deb3da204b 100644 (file)
@@ -17,6 +17,9 @@ pymssql is a Python module that provides a Python DBAPI interface around
 `FreeTDS <http://www.freetds.org/>`_.  Compatible builds are available for
 Linux, MacOSX and Windows platforms.
 
+Modern versions of this driver work very well with SQL Server and
+FreeTDS from Linux and is highly recommended.
+
 """
 from .base import MSDialect, MSIdentifierPreparer
 from ... import types as sqltypes, util, processors
@@ -41,7 +44,7 @@ class MSIdentifierPreparer_pymssql(MSIdentifierPreparer):
 
 
 class MSDialect_pymssql(MSDialect):
-    supports_sane_rowcount = False
+    supports_native_decimal = True
     driver = 'pymssql'
 
     preparer = MSIdentifierPreparer_pymssql
@@ -68,10 +71,6 @@ class MSDialect_pymssql(MSDialect):
                       "the 1.0 series of the pymssql DBAPI.")
         return module
 
-    def __init__(self, **params):
-        super(MSDialect_pymssql, self).__init__(**params)
-        self.use_scope_identity = True
-
     def _get_server_version_info(self, connection):
         vers = connection.scalar("select @@version")
         m = re.match(
@@ -111,6 +110,7 @@ class MSDialect_pymssql(MSDialect):
         else:
             connection.autocommit(False)
             super(MSDialect_pymssql, self).set_isolation_level(connection,
-                                                                level)
+                                                               level)
+
 
 dialect = MSDialect_pymssql
index c6368f9696fd724523ea009d90de8fad18df1a74..a667b671e91a7dbfbdd5c66ac932e33d219d9ab0 100644 (file)
@@ -64,34 +64,19 @@ as illustrated below using ``urllib.quote_plus``::
     engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
 
 
-Unicode Binds
--------------
-
-The current state of PyODBC on a unix backend with FreeTDS and/or
-EasySoft is poor regarding unicode; different OS platforms and versions of
-UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself
-dramatically alter how strings are received.  The PyODBC dialect attempts to
-use all the information it knows to determine whether or not a Python unicode
-literal can be passed directly to the PyODBC driver or not; while SQLAlchemy
-can encode these to bytestrings first, some users have reported that PyODBC
-mis-handles bytestrings for certain encodings and requires a Python unicode
-object, while the author has observed widespread cases where a Python unicode
-is completely misinterpreted by PyODBC, particularly when dealing with
-the information schema tables used in table reflection, and the value
-must first be encoded to a bytestring.
-
-It is for this reason that whether or not unicode literals for bound
-parameters be sent to PyODBC can be controlled using the
-``supports_unicode_binds`` parameter to ``create_engine()``.  When
-left at its default of ``None``, the PyODBC dialect will use its
-best guess as to whether or not the driver deals with unicode literals
-well.  When ``False``, unicode literals will be encoded first, and when
-``True`` unicode literals will be passed straight through.  This is an interim
-flag that hopefully should not be needed when the unicode situation stabilizes
-for unix + PyODBC.
-
-.. versionadded:: 0.7.7
-    ``supports_unicode_binds`` parameter to ``create_engine()``\ .
+Driver / Unicode Support
+-------------------------
+
+PyODBC works best with Microsoft ODBC drivers, particularly in the area
+of Unicode support on both Python 2 and Python 3.
+
+Using the FreeTDS ODBC drivers on Linux or OSX with PyODBC is **not**
+recommended; there have been historically many Unicode-related issues
+in this area, including before Microsoft offered ODBC drivers for Linux
+and OSX.   Now that Microsoft offers drivers for all platforms, for
+PyODBC support these are recommended.  FreeTDS remains relevant for
+non-ODBC drivers such as pymssql where it works very well.
+
 
 Rowcount Support
 ----------------
@@ -272,11 +257,12 @@ class MSDialect_pyodbc(PyODBCConnector, MSDialect):
 
     def _get_server_version_info(self, connection):
         try:
-            raw = connection.scalar("SELECT  SERVERPROPERTY('ProductVersion')")
+            raw = connection.scalar(
+                "SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR)")
         except exc.DBAPIError:
             # SQL Server docs indicate this function isn't present prior to
-            # 2008; additionally, unknown combinations of pyodbc aren't
-            # able to run this query.
+            # 2008.  Before we had the VARCHAR cast above, pyodbc would also
+            # fail on this query.
             return super(MSDialect_pyodbc, self).\
                 _get_server_version_info(connection)
         else:
index 9d6dd7188237e28c17df4a6f12a17bfdc2288ac6..5b01b2c1ffca8175cff37c0501a1bd7b4fb33946 100644 (file)
@@ -1871,6 +1871,13 @@ class MySQLDialect(default.DefaultDialect):
     def _is_mariadb(self):
         return 'MariaDB' in self.server_version_info
 
+    @property
+    def _mariadb_normalized_version_info(self):
+        if len(self.server_version_info) > 5:
+            return self.server_version_info[3:]
+        else:
+            return self.server_version_info
+
     @property
     def _supports_cast(self):
         return self.server_version_info is None or \
index 033537156e9e62a8a3fae36cee5bc66a1e4c5275..17ddbb567e89f045bbfc18712d9c53a875ae5b32 100644 (file)
@@ -287,49 +287,61 @@ def _oracle_update_db_opts(db_url, db_opts):
     db_opts['_retry_on_12516'] = True
 
 
-def reap_oracle_dbs(idents_file):
-    log.info("Reaping Oracle dbs...")
+def reap_dbs(idents_file):
+    log.info("Reaping databases...")
+
+    urls = collections.defaultdict(set)
+    idents = collections.defaultdict(set)
 
-    urls = collections.defaultdict(list)
     with open(idents_file) as file_:
         for line in file_:
             line = line.strip()
             db_name, db_url = line.split(" ")
-            urls[db_url].append(db_name)
-
-    for url in urls:
-        if not url.startswith("oracle"):
-            continue
-        idents = urls[url]
-        log.info("db reaper connecting to %r", url)
-        eng = create_engine(url)
-        with eng.connect() as conn:
-
-            log.info("identifiers in file: %s", ", ".join(idents))
-
-            to_reap = conn.execute(
-                "select u.username from all_users u where username "
-                "like 'TEST_%' and not exists (select username "
-                "from v$session where username=u.username)")
-            all_names = {username.lower() for (username, ) in to_reap}
-            to_drop = set()
-            for name in all_names:
-                if name.endswith("_ts1") or name.endswith("_ts2"):
-                    continue
-                elif name in idents:
-                    to_drop.add(name)
-                    if "%s_ts1" % name in all_names:
-                        to_drop.add("%s_ts1" % name)
-                    if "%s_ts2" % name in all_names:
-                        to_drop.add("%s_ts2" % name)
-
-            dropped = total = 0
-            for total, username in enumerate(to_drop, 1):
-                if _ora_drop_ignore(conn, username):
-                    dropped += 1
-            log.info(
-                "Dropped %d out of %d stale databases detected",
-                dropped, total)
+            url_obj = sa_url.make_url(db_url)
+            url_key = (url_obj.get_backend_name(), url_obj.host)
+            urls[url_key].add(db_url)
+            idents[url_key].add(db_name)
+
+    for url_key in urls:
+        backend = url_key[0]
+        url = list(urls[url_key])[0]
+        ident = idents[url_key]
+        if backend == "oracle":
+            _reap_oracle_dbs(url, ident)
+        elif backend == "mssql":
+            _reap_mssql_dbs(url, ident)
+
+def _reap_oracle_dbs(url, idents):
+    log.info("db reaper connecting to %r", url)
+    eng = create_engine(url)
+    with eng.connect() as conn:
+
+        log.info("identifiers in file: %s", ", ".join(idents))
+
+        to_reap = conn.execute(
+            "select u.username from all_users u where username "
+            "like 'TEST_%' and not exists (select username "
+            "from v$session where username=u.username)")
+        all_names = {username.lower() for (username, ) in to_reap}
+        to_drop = set()
+        for name in all_names:
+            if name.endswith("_ts1") or name.endswith("_ts2"):
+                continue
+            elif name in idents:
+                to_drop.add(name)
+                if "%s_ts1" % name in all_names:
+                    to_drop.add("%s_ts1" % name)
+                if "%s_ts2" % name in all_names:
+                    to_drop.add("%s_ts2" % name)
+
+        dropped = total = 0
+        for total, username in enumerate(to_drop, 1):
+            if _ora_drop_ignore(conn, username):
+                dropped += 1
+        log.info(
+            "Dropped %d out of %d stale databases detected",
+            dropped, total)
+
 
 
 @_follower_url_from_main.for_db("oracle")
@@ -340,3 +352,65 @@ def _oracle_follower_url_from_main(url, ident):
     return url
 
 
+@_create_db.for_db("mssql")
+def _mssql_create_db(cfg, eng, ident):
+    with eng.connect().execution_options(
+            isolation_level="AUTOCOMMIT") as conn:
+        conn.execute("create database %s" % ident)
+        conn.execute(
+            "ALTER DATABASE %s SET ALLOW_SNAPSHOT_ISOLATION ON" % ident)
+        conn.execute(
+            "ALTER DATABASE %s SET READ_COMMITTED_SNAPSHOT ON" % ident)
+        conn.execute("use %s" % ident)
+        conn.execute("create schema test_schema")
+
+
+@_drop_db.for_db("mssql")
+def _mssql_drop_db(cfg, eng, ident):
+    with eng.connect().execution_options(
+            isolation_level="AUTOCOMMIT") as conn:
+        _mssql_drop_ignore(conn, ident)
+
+def _mssql_drop_ignore(conn, ident):
+    try:
+        # typically when this happens, we can't KILL the session anyway,
+        # so let the cleanup process drop the DBs
+        # for row in conn.execute("select session_id from sys.dm_exec_sessions "
+        #        "where database_id=db_id('%s')" % ident):
+        #    log.info("killing SQL server sesssion %s", row['session_id'])
+        #    conn.execute("kill %s" % row['session_id'])
+
+        conn.execute("drop database %s" % ident)
+        log.info("Reaped db: %s", ident)
+        return True
+    except exc.DatabaseError as err:
+        log.warning("couldn't drop db: %s", err)
+        return False
+
+
+def _reap_mssql_dbs(url, idents):
+    log.info("db reaper connecting to %r", url)
+    eng = create_engine(url)
+    with eng.connect().execution_options(
+            isolation_level="AUTOCOMMIT") as conn:
+
+        log.info("identifiers in file: %s", ", ".join(idents))
+
+        to_reap = conn.execute(
+            "select d.name from sys.databases as d where name "
+            "like 'TEST_%' and not exists (select session_id "
+            "from sys.dm_exec_sessions "
+            "where database_id=d.database_id)")
+        all_names = {dbname.lower() for (dbname, ) in to_reap}
+        to_drop = set()
+        for name in all_names:
+            if name in idents:
+                to_drop.add(name)
+
+        dropped = total = 0
+        for total, dbname in enumerate(to_drop, 1):
+            if _mssql_drop_ignore(conn, dbname):
+                dropped += 1
+        log.info(
+            "Dropped %d out of %d stale databases detected",
+            dropped, total)
index 327362bf6c64d55545f3ec3d3a40a9ff505af498..b3ad29a3b13faa03763e0f5942e2b209e43207ed 100644 (file)
@@ -173,6 +173,19 @@ class SuiteRequirements(Requirements):
         """Target database must support window functions."""
         return exclusions.closed()
 
+    @property
+    def ctes(self):
+        """Target database supports CTEs"""
+
+        return exclusions.closed()
+
+    @property
+    def ctes_on_dml(self):
+        """target database supports CTES which consist of INSERT, UPDATE
+        or DELETE"""
+
+        return exclusions.closed()
+
     @property
     def autoincrement_insert(self):
         """target platform generates new surrogate integer primary key values
@@ -579,6 +592,22 @@ class SuiteRequirements(Requirements):
         """
         return exclusions.closed()
 
+    @property
+    def nested_aggregates(self):
+        """target database can select an aggregate from a subquery that's
+        also using an aggregate
+
+        """
+        return exclusions.open()
+
+    @property
+    def recursive_fk_cascade(self):
+        """target database must support ON DELETE CASCADE on a self-referential
+        foreign key
+
+        """
+        return exclusions.open()
+
     @property
     def precision_numerics_retains_significant_digits(self):
         """A precision numeric type will return empty significant digits,
index 4086a4c24d8e0b68f2db342e988cec7e5007620b..22ae1d8c5758b570003119a2d560ceb2f498fca0 100644 (file)
@@ -242,6 +242,7 @@ class CompoundSelectTest(fixtures.TablesTest):
             [(2, 2, 3), (3, 3, 4)]
         )
 
+    @testing.requires.order_by_col_from_union
     @testing.requires.parens_in_union_contained_select_w_limit_offset
     def test_limit_offset_selectable_in_unions(self):
         table = self.tables.some_table
similarity index 59%
rename from reap_oracle_dbs.py
rename to reap_dbs.py
index 29d227464360f785b395a44e8f0dedeb0daba74b..10accde90f9e288993c7565b710e5cae9e157331 100644 (file)
@@ -1,10 +1,14 @@
-"""Drop Oracle databases that are left over from a
+"""Drop Oracle, SQL Server databases that are left over from a
 multiprocessing test run.
 
 Currently the cx_Oracle driver seems to sometimes not release a
 TCP connection even if close() is called, which prevents the provisioning
 system from dropping a database in-process.
 
+For SQL Server, databases still remain in use after tests run and
+running a kill of all detected sessions does not seem to release the
+database in process.
+
 """
 from sqlalchemy.testing import provision
 import logging
@@ -13,6 +17,6 @@ import sys
 logging.basicConfig()
 logging.getLogger(provision.__name__).setLevel(logging.INFO)
 
-provision.reap_oracle_dbs(sys.argv[1])
+provision.reap_dbs(sys.argv[1])
 
 
index 3181cfe6147633d9caeb4fa240c9492bc1a0edee..381e82d3c9fc2dcb79c0bb9f5871d3a39c8f1958 100644 (file)
@@ -246,7 +246,7 @@ class MemUsageTest(EnsureZeroed):
 class MemUsageWBackendTest(EnsureZeroed):
 
     __tags__ = 'memory_intensive',
-    __requires__ = 'cpython',
+    __requires__ = 'cpython', 'memory_process_intensive'
     __backend__ = True
 
     # ensure a pure growing test trips the assertion
index 1164270e9adbcf71bdd7313edcc4d8dd6dcff637..ef2a1426afd47cc68ca61c3d124a8bfb4c470b16 100644 (file)
@@ -6,7 +6,7 @@ from sqlalchemy.testing import fixtures, AssertsCompiledSQL, assertions
 from sqlalchemy import testing
 from sqlalchemy.util import ue
 from sqlalchemy import util
-from sqlalchemy.testing.assertsql import CursorSQL
+from sqlalchemy.testing.assertsql import CursorSQL, DialectSQL
 from sqlalchemy import Integer, String, Table, Column, select, MetaData,\
     func, PrimaryKeyConstraint, desc, Sequence, DDL, ForeignKey, or_, and_
 from sqlalchemy import event
@@ -190,6 +190,8 @@ class QueryUnicodeTest(fixtures.TestBase):
     __only_on__ = 'mssql'
     __backend__ = True
 
+    @testing.requires.mssql_freetds
+    @testing.requires.python2
     def test_convert_unicode(self):
         meta = MetaData(testing.db)
         t1 = Table(
@@ -284,7 +286,7 @@ class QueryTest(testing.AssertsExecutionResults, fixtures.TestBase):
             meta.drop_all()
 
     @testing.provide_metadata
-    def test_disable_scope_identity(self):
+    def _test_disable_scope_identity(self):
         engine = engines.testing_engine(options={"use_scope_identity": False})
         metadata = self.metadata
         t1 = Table(
@@ -298,10 +300,11 @@ class QueryTest(testing.AssertsExecutionResults, fixtures.TestBase):
         with self.sql_execution_asserter(engine) as asserter:
             engine.execute(t1.insert(), {"data": "somedata"})
 
+        # TODO: need a dialect SQL that acts like Cursor SQL
         asserter.assert_(
-            CursorSQL(
-                "INSERT INTO t1 (data) VALUES (?)",
-                ("somedata", )
+            DialectSQL(
+                "INSERT INTO t1 (data) VALUES (:data)",
+                {"data": "somedata"}
             ),
             CursorSQL("SELECT @@identity AS lastrowid"),
         )
index 2f705d8a3602e9cb22716bc20ad95f2cefce7494..d3b270d2f572d73b8358e2df83f58f56cb04616e 100644 (file)
@@ -130,26 +130,28 @@ class ReflectionTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL):
 
         dbname = testing.db.scalar("select db_name()")
         owner = testing.db.scalar("SELECT user_name()")
+        referred_schema = '%(dbname)s.%(owner)s' % {
+            "dbname": dbname, "owner": owner}
 
         inspector = inspect(testing.db)
         bar_via_db = inspector.get_foreign_keys(
-            "bar", schema="%s.%s" % (dbname, owner))
+            "bar", schema=referred_schema)
         eq_(
             bar_via_db,
             [{
                 'referred_table': 'foo',
                 'referred_columns': ['id'],
-                'referred_schema': 'test.dbo',
+                'referred_schema': referred_schema,
                 'name': 'fkfoo',
                 'constrained_columns': ['foo_id']}]
         )
 
-        assert testing.db.has_table("bar", schema="test.dbo")
+        assert testing.db.has_table("bar", schema=referred_schema)
 
         m2 = MetaData()
-        Table('bar', m2, schema="test.dbo", autoload=True,
-                                autoload_with=testing.db)
-        eq_(m2.tables["test.dbo.foo"].schema, "test.dbo")
+        Table('bar', m2, schema=referred_schema, autoload=True,
+                               autoload_with=testing.db)
+        eq_(m2.tables["%s.foo" % referred_schema].schema, referred_schema)
 
     @testing.provide_metadata
     def test_indexes_cols(self):
index 841624303db5ce532039529745dc0792c36bf3ec..f0402e8fbcdb938d7a073724e00ef6f16a52591d 100644 (file)
@@ -8,6 +8,7 @@ from sqlalchemy import Table, Column, MetaData, Float, \
     Date, Time, DateTime, DefaultClause, PickleType, text, Text, \
     UnicodeText, LargeBinary
 from sqlalchemy import types, schema
+from sqlalchemy import util
 from sqlalchemy.databases import mssql
 from sqlalchemy.dialects.mssql.base import TIME, _MSDate
 from sqlalchemy.dialects.mssql.base import MS_2005_VERSION, MS_2008_VERSION
@@ -46,6 +47,8 @@ class TimeTypeTest(fixtures.TestBase):
 
 
 class MSDateTypeTest(fixtures.TestBase):
+    __only_on__ = 'mssql'
+    __backend__ = True
 
     def test_result_processor(self):
         expected = datetime.date(2000, 1, 2)
@@ -435,6 +438,8 @@ class TypeRoundTripTest(
         fixtures.TestBase, AssertsExecutionResults, ComparesTables):
     __only_on__ = 'mssql'
 
+    __backend__ = True
+
     @classmethod
     def setup_class(cls):
         global metadata
@@ -443,9 +448,6 @@ class TypeRoundTripTest(
     def teardown(self):
         metadata.drop_all()
 
-    @testing.fails_on_everything_except(
-        'mssql+pyodbc',
-        'mssql+mxodbc')
     def test_decimal_notation(self):
         numeric_table = Table(
             'numeric_table', metadata,
@@ -812,22 +814,6 @@ class TypeRoundTripTest(
                 engine.execute(tbl.delete())
 
 
-class MonkeyPatchedBinaryTest(fixtures.TestBase):
-    __only_on__ = 'mssql+pymssql'
-
-    def test_unicode(self):
-        module = __import__('pymssql')
-        result = module.Binary('foo')
-        eq_(result, 'foo')
-
-    def test_bytes(self):
-        module = __import__('pymssql')
-        input = b('\x80\x03]q\x00X\x03\x00\x00\x00oneq\x01a.')
-        expected_result = input
-        result = module.Binary(input)
-        eq_(result, expected_result)
-
-
 binary_table = None
 MyPickleType = None
 
@@ -837,6 +823,8 @@ class BinaryTest(fixtures.TestBase, AssertsExecutionResults):
     """Test the Binary and VarBinary types"""
 
     __only_on__ = 'mssql'
+    __requires__ = "non_broken_binary",
+    __backend__ = True
 
     @classmethod
     def setup_class(cls):
@@ -874,6 +862,16 @@ class BinaryTest(fixtures.TestBase, AssertsExecutionResults):
         binary_table.create(engine)
         return binary_table
 
+    def test_character_binary(self):
+        engine = testing.db
+        binary_table = self._fixture(engine)
+        with engine.connect() as conn:
+            conn.execute(
+                binary_table.insert(),
+                primary_id=1,
+                data=b("some normal data")
+            )
+
     def test_binary_legacy_types(self):
         self._test_binary(False)
 
@@ -980,7 +978,10 @@ class BinaryTest(fixtures.TestBase, AssertsExecutionResults):
                 # the type we used here is 100 bytes
                 # so we will get 100 bytes zero-padded
                 paddedstream = list(stream2[0:99])
-                paddedstream.extend(['\x00'] * (100 - len(paddedstream)))
+                if util.py3k:
+                    paddedstream.extend([0] * (100 - len(paddedstream)))
+                else:
+                    paddedstream.extend(['\x00'] * (100 - len(paddedstream)))
                 eq_(
                     list(row['data_slice']), paddedstream
                 )
index 8437aca37037ac090599c601487d2ce68d556e67..5263c79db08add9d109b3d1cdd1a2be83cff10c0 100644 (file)
@@ -179,7 +179,8 @@ class ExecuteTest(fixtures.TestBase):
     @testing.fails_on_everything_except(
         'postgresql+psycopg2', 'postgresql+psycopg2cffi',
         'postgresql+pypostgresql', 'postgresql+pygresql',
-        'mysql+mysqlconnector', 'mysql+pymysql', 'mysql+cymysql')
+        'mysql+mysqlconnector', 'mysql+pymysql', 'mysql+cymysql',
+        'mssql+pymssql')
     def test_raw_python(self):
         def go(conn):
             conn.execute(
index 51085763f25ab71f8d4e317f1840d66e2a93ab96..a6b5861ed83a991497393dff59ee919b3926287e 100644 (file)
@@ -1542,10 +1542,10 @@ class Ticket2419Test(fixtures.DeclarativeMappedTest):
                         test_needs_autoincrement=True)
             b_id = Column(Integer, ForeignKey('b.id'))
 
-    @testing.fails_on("oracle",
-                      "seems like oracle's query engine can't "
-                      "handle this, not clear if there's an "
-                      "expression-level bug on our end though")
+    @testing.fails_on(["oracle", "mssql"],
+                      "Oracle / SQL server engines can't handle this, "
+                      "not clear if there's an expression-level bug on our "
+                      "end though")
     def test_join_w_eager_w_any(self):
         A, B, C, D, E = (self.classes.A,
                          self.classes.B,
index 007061d60cf0c57078ab145c68812738bba2c8a2..7fd9329f9d640201c8a6606cfb0478fb313da3e9 100644 (file)
@@ -517,7 +517,7 @@ class SortOnlyOnImportantFKsTest(fixtures.MappedTest):
               Column('id', Integer, primary_key=True,
                      test_needs_autoincrement=True),
               Column('b_id', Integer,
-                     ForeignKey('b.id', use_alter=True, name='b')))
+                     ForeignKey('b.id', use_alter=True, name='b_fk')))
         Table('b', metadata,
               Column('id', Integer, ForeignKey('a.id'), primary_key=True))
 
index 213856cf62d9733defa54a0d86912976c76621d4..e5234d254276e54411d7bee75ca2e6fec27b477c 100644 (file)
@@ -94,7 +94,8 @@ class _PolymorphicTestBase(object):
             select([func.count('*')]).select_from(
                 sess.query(Person).with_polymorphic('*')
                 .options(joinedload(Engineer.machines))
-                .limit(2).offset(1).with_labels().subquery()
+                .order_by(Person.person_id).limit(2).offset(1)
+                .with_labels().subquery()
             ).scalar(), 2)
 
     def test_get_one(self):
index 210d6ac39b5b5c6f589cd6c55da7e5b1d13a7b3c..affa14c0e9e03ac0f0a4fb11f4e0e314c97c70c7 100644 (file)
@@ -16,7 +16,6 @@ from sqlalchemy.orm import mapper, relationship, backref, create_session
 from sqlalchemy.testing import eq_
 from sqlalchemy.testing import fixtures
 
-
 class EagerTest(fixtures.MappedTest):
     run_deletes = None
     run_inserts = "once"
@@ -25,13 +24,6 @@ class EagerTest(fixtures.MappedTest):
     @classmethod
     def define_tables(cls, metadata):
 
-        if testing.db.dialect.supports_native_boolean:
-            false = 'false'
-        else:
-            false = "0"
-
-        cls.other['false'] = false
-
         Table('owners', metadata,
               Column('id', Integer, primary_key=True,
                      test_needs_autoincrement=True),
@@ -55,7 +47,7 @@ class EagerTest(fixtures.MappedTest):
                      primary_key=True),
               Column('owner_id', Integer, ForeignKey('owners.id'),
                      primary_key=True),
-              Column('someoption', sa.Boolean, server_default=false,
+              Column('someoption', sa.Boolean, server_default=sa.false(),
                      nullable=False))
 
     @classmethod
@@ -216,17 +208,16 @@ class EagerTest(fixtures.MappedTest):
 
     @testing.crashes('sybase', 'FIXME: unknown, verify not fails_on')
     def test_without_outerjoin_literal(self):
-        Thing, tests, false = (self.classes.Thing,
-                               self.tables.tests,
-                               self.other.false)
+        Thing, tests= (self.classes.Thing,
+                               self.tables.tests)
 
         s = create_session()
         q = s.query(Thing).options(sa.orm.joinedload('category'))
         result = (q.filter(
                 (tests.c.owner_id == 1) &
                 text(
-                    'options.someoption is null or options.someoption=%s' %
-                    false)).join('owner_option'))
+                    'options.someoption is null or options.someoption=:opt'
+                ).bindparams(opt=False)).join('owner_option'))
 
         result_str = ["%d %s" % (t.id, t.category.name) for t in result]
         eq_(result_str, ['3 Some Category'])
index cef0ad8d3880ddef24c709e4529eda3a475fd69f..694e85819aaa6620df3d8edea16b8fb7c19c3bef 100644 (file)
@@ -430,7 +430,6 @@ class PrimaryKeyTest(fixtures.MappedTest):
         g2 = sess.query(Graph).get(Version(g.id, g.version_id))
         eq_(g.version, g2.version)
 
-    @testing.fails_on('mssql', 'Cannot update identity columns.')
     def test_pk_mutation(self):
         Graph, Version = self.classes.Graph, self.classes.Version
 
index 9bfa8e8eb75550d3c382bf0fae4b238de7f2e1c4..45656f3fcb3eaa277ca3161c777ee13d3713e2a3 100644 (file)
@@ -755,7 +755,7 @@ class InstancesTest(QueryTest, AssertsCompiledSQL):
             order_by(User.id, adalias.c.id)
 
         def go():
-            eq_(self.static.user_address_result, q.order_by(User.id).all())
+            eq_(self.static.user_address_result, q.all())
         self.assert_sql_count(testing.db, go, 1)
         sess.expunge_all()
 
index 2ea7f4e0f9ce13a0daea13049fd3066e194bfeef..2cb389f11cb08170bf803458189bba7bfe5f9af4 100644 (file)
@@ -102,6 +102,7 @@ class ParentRemovalTest(fixtures.MappedTest):
 
         self._assert_not_hasparent(a1)
 
+    @testing.requires.updateable_autoincrement_pks
     @testing.requires.predictable_gc
     def test_stale_state_positive_pk_change(self):
         """Illustrate that we can't easily link a
index 8fc73129e0ea29873929f1e24f94831fc368be06..f20c3db71592d8aed0f0591b03d5f85e72c5f3c6 100644 (file)
@@ -250,6 +250,7 @@ class M2MTest(fixtures.MappedTest):
                                    (Transition, [{'name': 'transition1'},
                                                  {'name': 'transition2'}])})
 
+    @testing.requires.updateable_autoincrement_pks
     @testing.requires.sane_multi_rowcount
     def test_stale_conditions(self):
         Place, Transition, place_input, place, transition = (
index 082b62300234a648810efae00cf5009042202672..19adf8983ff5703128feceb045d0783939876f3d 100644 (file)
@@ -2082,27 +2082,30 @@ class SliceTest(QueryTest):
         User = self.classes.User
 
         sess = create_session()
-        q = sess.query(User)
+        q = sess.query(User).order_by(User.id)
 
         self.assert_sql(
             testing.db, lambda: q[10:20], [
                 (
                     "SELECT users.id AS users_id, users.name "
-                    "AS users_name FROM users LIMIT :param_1 OFFSET :param_2",
+                    "AS users_name FROM users ORDER BY users.id "
+                    "LIMIT :param_1 OFFSET :param_2",
                     {'param_1': 10, 'param_2': 10})])
 
         self.assert_sql(
             testing.db, lambda: q[:20], [
                 (
                     "SELECT users.id AS users_id, users.name "
-                    "AS users_name FROM users LIMIT :param_1",
+                    "AS users_name FROM users ORDER BY users.id "
+                    "LIMIT :param_1",
                     {'param_1': 20})])
 
         self.assert_sql(
             testing.db, lambda: q[5:], [
                 (
                     "SELECT users.id AS users_id, users.name "
-                    "AS users_name FROM users LIMIT -1 OFFSET :param_1",
+                    "AS users_name FROM users ORDER BY users.id "
+                    "LIMIT -1 OFFSET :param_1",
                     {'param_1': 5})])
 
         self.assert_sql(testing.db, lambda: q[2:2], [])
@@ -2113,19 +2116,19 @@ class SliceTest(QueryTest):
             testing.db, lambda: q[-5:-2], [
                 (
                     "SELECT users.id AS users_id, users.name AS users_name "
-                    "FROM users", {})])
+                    "FROM users ORDER BY users.id", {})])
 
         self.assert_sql(
             testing.db, lambda: q[-5:], [
                 (
                     "SELECT users.id AS users_id, users.name AS users_name "
-                    "FROM users", {})])
+                    "FROM users ORDER BY users.id", {})])
 
         self.assert_sql(
             testing.db, lambda: q[:], [
                 (
                     "SELECT users.id AS users_id, users.name AS users_name "
-                    "FROM users", {})])
+                    "FROM users ORDER BY users.id", {})])
 
 
 class FilterTest(QueryTest, AssertsCompiledSQL):
@@ -4456,6 +4459,8 @@ class SessionBindTest(QueryTest):
         with self._assert_bind_args(session):
             session.query(func.max(User.score)).scalar()
 
+
+    @testing.requires.nested_aggregates
     def test_column_property_select(self):
         User = self.classes.User
         Address = self.classes.Address
index 619e99abd97b96601ead6685e3c73283a22b7311..1510689f9f2e05e7ae7758bc3f459523374fbf58 100644 (file)
@@ -1,6 +1,7 @@
 from __future__ import with_statement
 from sqlalchemy import (
     testing, exc as sa_exc, event, String, Column, Table, select, func)
+from sqlalchemy.sql import elements
 from sqlalchemy.testing import (
     fixtures, engines, eq_, assert_raises, assert_raises_message,
     assert_warnings, mock, expect_warnings, is_, is_not_)
@@ -11,7 +12,6 @@ from sqlalchemy.testing.util import gc_collect
 from test.orm._fixtures import FixtureTest
 from sqlalchemy import inspect
 
-
 class SessionTransactionTest(fixtures.RemovesEvents, FixtureTest):
     run_inserts = None
     __backend__ = True
@@ -491,7 +491,9 @@ class SessionTransactionTest(fixtures.RemovesEvents, FixtureTest):
 
         def prevent_savepoint_rollback(
                 cursor, statement, parameters, context=None):
-            if "rollback to savepoint" in statement.lower():
+            if context is not None and context.compiled and isinstance(
+                    context.compiled.statement,
+                    elements.RollbackToSavepointClause):
                 raise rollback_error
 
         self.event_listen(
@@ -551,7 +553,9 @@ class SessionTransactionTest(fixtures.RemovesEvents, FixtureTest):
 
         def prevent_savepoint_rollback(
                 cursor, statement, parameters, context=None):
-            if "rollback to savepoint" in statement.lower():
+            if context is not None and context.compiled and isinstance(
+                    context.compiled.statement,
+                    elements.RollbackToSavepointClause):
                 raise rollback_error
 
         self.event_listen(testing.db, "handle_error", canary, retval=True)
index 09f64c1084d5175b044038d22067d0e06b45dcc7..90616ae12b5781f53af6b4070ef2e9e4fbde72ae 100644 (file)
@@ -158,9 +158,6 @@ class UnicodeSchemaTest(fixtures.MappedTest):
     def teardown_class(cls):
         super(UnicodeSchemaTest, cls).teardown_class()
 
-    @testing.fails_on(
-        'mssql+pyodbc',
-        'pyodbc returns a non unicode encoding of the results description.')
     def test_mapping(self):
         t2, t1 = self.tables.t2, self.tables.t1
 
@@ -199,9 +196,6 @@ class UnicodeSchemaTest(fixtures.MappedTest):
         assert new_a1.t2s[0].d == b1.d
         session.expunge_all()
 
-    @testing.fails_on(
-        'mssql+pyodbc',
-        'pyodbc returns a non unicode encoding of the results description.')
     def test_inheritance_mapping(self):
         t2, t1 = self.tables.t2, self.tables.t1
 
@@ -241,10 +235,12 @@ class BinaryHistTest(fixtures.MappedTest, testing.AssertsExecutionResults):
         class Foo(cls.Basic):
             pass
 
+    @testing.requires.non_broken_binary
     def test_binary_equality(self):
         Foo, t1 = self.classes.Foo, self.tables.t1
 
-        data = b("this is some data")
+        #data = b("this is some data")
+        data = b'm\x18' #m\xf2\r\n\x7f\x10'
 
         mapper(Foo, t1)
 
@@ -639,7 +635,7 @@ class PassiveDeletesTest(fixtures.MappedTest):
 
 
 class BatchDeleteIgnoresRowcountTest(fixtures.DeclarativeMappedTest):
-    __requires__ = ('foreign_keys',)
+    __requires__ = ('foreign_keys', 'recursive_fk_cascade')
 
     @classmethod
     def setup_classes(cls):
index 270c3708e5aad95edfe58af39844646a913f4ed1..9c1a26e4bb8733a5108692b7b3c24824acf6f423 100644 (file)
@@ -1573,7 +1573,7 @@ class BasicStaleChecksTest(fixtures.MappedTest):
                     sess.flush
                 )
 
-    @testing.requires.sane_multi_rowcount
+    @testing.requires.sane_rowcount
     def test_delete_twice(self):
         Parent, Child = self._fixture()
         sess = Session()
index ed5f78465f23a9fa2dbf4f3ed04231c6bf9c072b..089541848b9d1d1564b1edc339eda93877f726b1 100644 (file)
@@ -786,6 +786,7 @@ class NoBumpOnRelationshipTest(fixtures.MappedTest):
 
 class ColumnTypeTest(fixtures.MappedTest):
     __backend__ = True
+    __requires__ = 'sane_rowcount',
 
     @classmethod
     def define_tables(cls, metadata):
@@ -900,6 +901,7 @@ class RowSwitchTest(fixtures.MappedTest):
 
 class AlternateGeneratorTest(fixtures.MappedTest):
     __backend__ = True
+    __requires__ = 'sane_rowcount',
 
     @classmethod
     def define_tables(cls, metadata):
@@ -1581,6 +1583,7 @@ class ManualVersionTest(fixtures.MappedTest):
 class ManualInheritanceVersionTest(fixtures.MappedTest):
     run_define_tables = 'each'
     __backend__ = True
+    __requires__ = 'sane_rowcount',
 
     @classmethod
     def define_tables(cls, metadata):
index 0362e28d139d2abd96aa5fc904533844c6389c19..9b01a22dd4cd7bdee576a3ec99f676c75a5c762a 100644 (file)
@@ -54,7 +54,7 @@ class DefaultRequirements(SuiteRequirements):
         def mysql_not_mariadb_102(config):
             return against(config, "mysql") and (
                 not config.db.dialect._is_mariadb or
-                config.db.dialect.server_version_info < (5, 5, 5, 10, 2)
+                config.db.dialect._mariadb_normalized_version_info < (10, 2)
             )
 
         return self.check_constraints + fails_on(
@@ -102,6 +102,13 @@ class DefaultRequirements(SuiteRequirements):
         return fails_on_everything_except('sqlite', 'oracle', '+zxjdbc') + \
             skip_if('mssql')
 
+    @property
+    def recursive_fk_cascade(self):
+        """target database must support ON DELETE CASCADE on a self-referential
+        foreign key"""
+
+        return skip_if(["mssql"])
+
     @property
     def deferrable_fks(self):
         """target database must support deferrable fks"""
@@ -191,6 +198,13 @@ class DefaultRequirements(SuiteRequirements):
                     ["firebird"], "not supported"
                 )
 
+    @property
+    def non_broken_binary(self):
+        """target DBAPI must work fully with binary values"""
+
+        # see https://github.com/pymssql/pymssql/issues/504
+        return skip_if(["mssql+pymssql"])
+
     @property
     def binary_comparisons(self):
         """target database/driver can allow BLOB/BINARY fields to be compared
@@ -227,10 +241,8 @@ class DefaultRequirements(SuiteRequirements):
 
         return skip_if(
             [
-                "mssql+pyodbc",
-                "mssql+mxodbc",
-                "mysql+mysqldb",
-                "mysql+pymysql"], "no driver support"
+                "mssql",
+                "mysql"], "no driver support"
         )
 
     @property
@@ -249,6 +261,17 @@ class DefaultRequirements(SuiteRequirements):
                     "SQL Server 2005+ is required for "
                     "independent connections")])
 
+    @property
+    def memory_process_intensive(self):
+        """Driver is able to handle the memory tests which run in a subprocess
+        and iterate through hundreds of connections
+
+        """
+        return skip_if([
+            no_support("oracle", "Oracle XE usually can't handle these"),
+            no_support("mssql+pyodbc", "MS ODBC drivers struggle")
+        ])
+
     @property
     def updateable_autoincrement_pks(self):
         """Target must support UPDATE on autoincrement/integer primary key."""
@@ -330,7 +353,10 @@ class DefaultRequirements(SuiteRequirements):
     @property
     def savepoints_w_release(self):
         return self.savepoints + skip_if(
-            "oracle", "oracle doesn't support release of savepoint")
+            ["oracle", "mssql"],
+            "database doesn't support release of savepoint"
+        )
+
 
     @property
     def schemas(self):
@@ -402,6 +428,15 @@ class DefaultRequirements(SuiteRequirements):
             ['postgresql', 'mssql']
         )
 
+    @property
+    def ctes_on_dml(self):
+        """target database supports CTES which consist of INSERT, UPDATE
+        or DELETE"""
+
+        return only_if(
+            ['postgresql']
+        )
+
     @property
     def mod_operator_as_percent_sign(self):
         """target database must use a plain percent '%' as the 'modulus'
@@ -426,12 +461,24 @@ class DefaultRequirements(SuiteRequirements):
                 "firebird", "mysql", "sybase",
             ], 'no support for EXCEPT')
 
+    @property
+    def order_by_col_from_union(self):
+        """target database supports ordering by a column from a SELECT
+        inside of a UNION
+
+        E.g.  (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
+
+        Fails on SQL Server
+
+        """
+        return fails_if('mssql')
+
     @property
     def parens_in_union_contained_select_w_limit_offset(self):
         """Target database must support parenthesized SELECT in UNION
         when LIMIT/OFFSET is specifically present.
 
-        E.g. (SELECT ...) UNION (SELECT ..)
+        E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
 
         This is known to fail on SQLite.
 
@@ -443,7 +490,7 @@ class DefaultRequirements(SuiteRequirements):
         """Target database must support parenthesized SELECT in UNION
         when OFFSET/LIMIT is specifically not present.
 
-        E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
+        E.g. (SELECT ...) UNION (SELECT ..)
 
         This is known to fail on SQLite.  It also fails on Oracle
         because without LIMIT/OFFSET, there is currently no step that
@@ -549,12 +596,6 @@ class DefaultRequirements(SuiteRequirements):
                 util.py2k,
                 "bug in mysqlconnector 2.0"
             ),
-            LambdaPredicate(
-                lambda config: against(config, 'mssql+pyodbc') and
-                config.db.dialect.freetds and
-                config.db.dialect.freetds_driver_version < "0.91",
-                "older freetds doesn't support unicode DDL"
-            ),
             exclude('mysql', '<', (4, 1, 1), 'no unicode connection support'),
         ])
 
@@ -598,6 +639,13 @@ class DefaultRequirements(SuiteRequirements):
         return fails_on_everything_except('postgresql', 'oracle', 'mssql',
                                           'sybase', 'sqlite')
 
+    @property
+    def nested_aggregates(self):
+        """target database can select an aggregate from a subquery that's
+        also using an aggregate"""
+
+        return skip_if(["mssql"])
+
     @property
     def array_type(self):
         return only_on([
@@ -722,8 +770,7 @@ class DefaultRequirements(SuiteRequirements):
              ('sqlite', None, None, 'TODO'),
              ("firebird", None, None, "Precision must be from 1 to 18"),
              ("sybase+pysybase", None, None, "TODO"),
-             ('mssql+pymssql', None, None,
-              'FIXME: improve pymssql dec handling')]
+            ]
         )
 
     @property
@@ -892,15 +939,7 @@ class DefaultRequirements(SuiteRequirements):
 
     @property
     def mssql_freetds(self):
-        return only_on(
-            LambdaPredicate(
-                lambda config: (
-                    (against(config, 'mssql+pyodbc') and
-                     config.db.dialect.freetds)
-                    or against(config, 'mssql+pymssql')
-                )
-            )
-        )
+        return only_on(["mssql+pymssql"])
 
     @property
     def ad_hoc_engines(self):
index 3c4ccc0502b05792dca30c7fedfff9c91cfe9ff6..1ef49bf047408d2a0c082b2c8f3f2dca9bad91c0 100644 (file)
@@ -634,12 +634,15 @@ class CTEDefaultTest(fixtures.TablesTest):
                 expected
             )
 
+    @testing.requires.ctes_on_dml
     def test_update_in_select(self):
         self._test_a_in_b("update", "select")
 
+    @testing.requires.ctes_on_dml
     def test_delete_in_select(self):
         self._test_a_in_b("update", "select")
 
+    @testing.requires.ctes_on_dml
     def test_insert_in_select(self):
         self._test_a_in_b("update", "select")
 
index 6015f4e7479d9e7e25b0bcef73a5a74e18945c6d..502ef6912292ebc0b082249124188005e40261a5 100644 (file)
@@ -158,6 +158,7 @@ class InsertExecTest(fixtures.TablesTest):
             {'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"}
         )
 
+    @testing.requires.sequences
     def test_lastrow_accessor_four(self):
         metadata = MetaData()
         self._test_lastrow_accessor(
index 28300855f863bd1577636feb59a554555e6e1c9f..afb1137488688c8228b72ae4853ba16fd6e92f2b 100644 (file)
@@ -503,9 +503,7 @@ class QueryTest(fixtures.TestBase):
 
     @testing.fails_on('firebird', "uses sql-92 rules")
     @testing.fails_on('sybase', "uses sql-92 rules")
-    @testing.fails_if(
-        lambda: testing.against('mssql+pyodbc') and not
-        testing.db.dialect.freetds, "uses sql-92 rules")
+    @testing.skip_if(['mssql'])
     def test_bind_in(self):
         """test calling IN against a bind parameter.
 
index 48fe288613f8c751a1e2b15b77819586d49be508..41092efe9ecbd5f3524a152ad15d6920e46906fc 100644 (file)
@@ -523,6 +523,7 @@ class ResultProxyTest(fixtures.TablesTest):
         eq_(result.fetchone(), None)
         assert connection.closed
 
+    @testing.requires.updateable_autoincrement_pks
     def test_connectionless_autoclose_no_metadata(self):
         result = testing.db.execute("update users set user_id=5")
         connection = result.connection
index fdcf53c27769577558de1fe7367f268c3abf341e..b6cc04322ff8a4b25596882fbc5d1ab904d7e684 100644 (file)
@@ -344,13 +344,18 @@ class UserDefinedTest(fixtures.TablesTest, AssertsCompiledSQL):
             def get_col_spec(self):
                 return "BAR"
 
+        t = Table('t', MetaData(), Column('bar', MyType, nullable=False))
+
         self.assert_compile(
-            ddl.CreateColumn(Column('bar', MyType)),
-            "bar FOOB bar"
+            ddl.CreateColumn(t.c.bar),
+            "bar FOOB bar NOT NULL"
         )
+
+        t = Table('t', MetaData(),
+                  Column('bar', MyOtherType, nullable=False))
         self.assert_compile(
-            ddl.CreateColumn(Column('bar', MyOtherType)),
-            "bar BAR"
+            ddl.CreateColumn(t.c.bar),
+            "bar BAR NOT NULL"
         )
 
     def test_typedecorator_literal_render_fallback_bound(self):
@@ -1165,7 +1170,7 @@ class EnumTest(AssertsCompiledSQL, fixtures.TablesTest):
 
         Table(
             'non_native_enum_table', metadata,
-            Column("id", Integer, primary_key=True),
+            Column("id", Integer, primary_key=True, autoincrement=False),
             Column('someenum', Enum('one', 'two', 'three', native_enum=False)),
             Column('someotherenum',
                    Enum('one', 'two', 'three',
@@ -1369,7 +1374,7 @@ class EnumTest(AssertsCompiledSQL, fixtures.TablesTest):
     @testing.requires.enforces_check_constraints
     def test_check_constraint(self):
         assert_raises(
-            (exc.IntegrityError, exc.ProgrammingError),
+            (exc.IntegrityError, exc.ProgrammingError, exc.OperationalError),
             testing.db.execute,
             "insert into non_native_enum_table "
             "(id, someenum) values(1, 'four')")
@@ -1614,6 +1619,7 @@ class BinaryTest(fixtures.TestBase, AssertsExecutionResults):
     def teardown_class(cls):
         metadata.drop_all()
 
+    @testing.requires.non_broken_binary
     def test_round_trip(self):
         testobj1 = pickleable.Foo('im foo 1')
         testobj2 = pickleable.Foo('im foo 2')
@@ -2399,10 +2405,10 @@ class TestKWArgPassThru(AssertsCompiledSQL, fixtures.TestBase):
                 return "FOOB %s" % kw['type_expression'].name
 
         m = MetaData()
-        t = Table('t', m, Column('bar', MyType))
+        t = Table('t', m, Column('bar', MyType, nullable=False))
         self.assert_compile(
             ddl.CreateColumn(t.c.bar),
-            "bar FOOB bar"
+            "bar FOOB bar NOT NULL"
         )
 
 
diff --git a/tox.ini b/tox.ini
index 42144cafe4fea2e1c6d794cf6452178bd096528c..c1e24ee677beb2cdf896100a3046f2dc95bbe3ec 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -55,8 +55,10 @@ setenv=
     sqlite: SQLITE={env:TOX_SQLITE:--db sqlite}
     postgresql: POSTGRESQL={env:TOX_POSTGRESQL:--db postgresql}
     mysql: MYSQL={env:TOX_MYSQL:--db mysql --db pymysql}
-    oracle: ORACLE={env:TOX_ORACLE:--db oracle} --write-idents oracle_idents.txt --nomemory
-    mssql: MSSQL={env:TOX_MSSQL:--db pyodbc --db pymssql}
+    oracle: ORACLE={env:TOX_ORACLE:--db oracle}
+    mssql: MSSQL={env:TOX_MSSQL:--db mssql --db mssql_pymssql}
+    oracle,mssql: IDENTS=--write-idents db_idents.txt
+    oracle,mssql: NOMEMORY=--nomemory
     backendonly: BACKENDONLY=--backend-only
 
 # tox as of 2.0 blocks all environment variables from the
@@ -66,10 +68,9 @@ passenv=ORACLE_HOME NLS_LANG TOX_POSTGRESQL TOX_MYSQL TOX_ORACLE TOX_MSSQL TOX_S
 
 # for nocext, we rm *.so in lib in case we are doing usedevelop=True
 commands=
-  {nocext}: sh -c "rm -f lib/sqlalchemy/*.so"
-  {env:BASECOMMAND} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:COVERAGE:} {posargs}
-  {oracle}: python reap_oracle_dbs.py oracle_idents.txt
-
+  nocext: sh -c "rm -f lib/sqlalchemy/*.so"
+  {env:BASECOMMAND} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:NOMEMORY:} {env:COVERAGE:} {posargs}
+  oracle,mssql: python reap_dbs.py db_idents.txt
 
 [testenv:pep8]
 deps=flake8