]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
audition pymssql once more; retire sane_rowcount_returning
authorMike Bayer <mike_mp@zzzcomputing.com>
Fri, 3 Mar 2023 14:30:58 +0000 (09:30 -0500)
committerMike Bayer <mike_mp@zzzcomputing.com>
Sun, 5 Mar 2023 04:22:20 +0000 (23:22 -0500)
pymssql seems to be maintained again and seems to be working
completely, so let's try re-enabling it.

Fixed issue in the new :class:`.Uuid` datatype which prevented it from
working with the pymssql driver. As pymssql seems to be maintained again,
restored testing support for pymssql.

Tweaked the pymssql dialect to take better advantage of
RETURNING for INSERT statements in order to retrieve last inserted primary
key values, in the same way as occurs for the mssql+pyodbc dialect right
now.

Identified that the ``sqlite`` and ``mssql+pyodbc`` dialects are now
compatible with the SQLAlchemy ORM's "versioned rows" feature, since
SQLAlchemy now computes rowcount for a RETURNING statement in this specific
case by counting the rows returned, rather than relying upon
``cursor.rowcount``.  In particular, the ORM versioned rows use case
(documented at :ref:`mapper_version_counter`) should now be fully
supported with the SQL Server pyodbc dialect.

Change-Id: I38a0666587212327aecf8f98e86031ab25d1f14d
References: #5321
Fixes: #9414
18 files changed:
doc/build/changelog/unreleased_20/9414.rst [new file with mode: 0644]
lib/sqlalchemy/dialects/mssql/base.py
lib/sqlalchemy/dialects/mssql/provision.py
lib/sqlalchemy/dialects/mssql/pymssql.py
lib/sqlalchemy/dialects/mssql/pyodbc.py
lib/sqlalchemy/dialects/sqlite/base.py
lib/sqlalchemy/orm/persistence.py
lib/sqlalchemy/testing/requirements.py
lib/sqlalchemy/testing/suite/test_rowcount.py
lib/sqlalchemy/testing/suite/test_types.py
setup.cfg
test/dialect/mssql/test_query.py
test/dialect/mssql/test_types.py
test/orm/test_versioning.py
test/requirements.py
test/sql/test_defaults.py
test/sql/test_resultset.py
tox.ini

diff --git a/doc/build/changelog/unreleased_20/9414.rst b/doc/build/changelog/unreleased_20/9414.rst
new file mode 100644 (file)
index 0000000..ab11eed
--- /dev/null
@@ -0,0 +1,27 @@
+.. change::
+    :tags: bug, mssql
+    :tickets: 9414
+
+    Fixed issue in the new :class:`.Uuid` datatype which prevented it from
+    working with the pymssql driver. As pymssql seems to be maintained again,
+    restored testing support for pymssql.
+
+.. change::
+    :tags: bug, mssql
+
+    Tweaked the pymssql dialect to take better advantage of
+    RETURNING for INSERT statements in order to retrieve last inserted primary
+    key values, in the same way as occurs for the mssql+pyodbc dialect right
+    now.
+
+.. change::
+    :tags: bug, orm
+
+    Identified that the ``sqlite`` and ``mssql+pyodbc`` dialects are now
+    compatible with the SQLAlchemy ORM's "versioned rows" feature, since
+    SQLAlchemy now computes rowcount for a RETURNING statement in this specific
+    case by counting the rows returned, rather than relying upon
+    ``cursor.rowcount``.  In particular, the ORM versioned rows use case
+    (documented at :ref:`mapper_version_counter`) should now be fully
+    supported with the SQL Server pyodbc dialect.
+
index a30c57c7f641a81d6edb5c3024e5970f4d8722e5..b970f6c0a4ad14b40c0be009c9d0ddd2ff906c21 100644 (file)
@@ -885,29 +885,20 @@ The SQL Server drivers may have limited ability to return the number
 of rows updated from an UPDATE or DELETE statement.
 
 As of this writing, the PyODBC driver is not able to return a rowcount when
-OUTPUT INSERTED is used.  This impacts the SQLAlchemy ORM's versioning feature
-in many cases where server-side value generators are in use in that while the
-versioning operations can succeed, the ORM cannot always check that an UPDATE
-or DELETE statement matched the number of rows expected, which is how it
-verifies that the version identifier matched.   When this condition occurs, a
-warning will be emitted but the operation will proceed.
-
-The use of OUTPUT INSERTED can be disabled by setting the
-:paramref:`_schema.Table.implicit_returning` flag to ``False`` on a particular
-:class:`_schema.Table`, which in declarative looks like::
-
-    class MyTable(Base):
-        __tablename__ = 'mytable'
-        id = Column(Integer, primary_key=True)
-        stuff = Column(String(10))
-        timestamp = Column(TIMESTAMP(), default=text('DEFAULT'))
-        __mapper_args__ = {
-            'version_id_col': timestamp,
-            'version_id_generator': False,
-        }
-        __table_args__ = {
-            'implicit_returning': False
-        }
+OUTPUT INSERTED is used.    Previous versions of SQLAlchemy therefore had
+limitations for features such as the "ORM Versioning" feature that relies upon
+accurate rowcounts in order to match version numbers with matched rows.
+
+SQLAlchemy 2.0 now retrieves the "rowcount" manually for these particular use
+cases based on counting the rows that arrived back within RETURNING; so while
+the driver still has this limitation, the ORM Versioning feature is no longer
+impacted by it. As of SQLAlchemy 2.0.5, ORM versioning has been fully
+re-enabled for the pyodbc driver.
+
+.. versionchanged:: 2.0.5  ORM versioning support is restored for the pyodbc
+   driver.  Previously, a warning would be emitted during ORM flush that
+   versioning was not supported.
+
 
 Enabling Snapshot Isolation
 ---------------------------
@@ -2965,6 +2956,7 @@ class MSDialect(default.DefaultDialect):
     supports_statement_cache = True
     supports_default_values = True
     supports_empty_insert = False
+    favor_returning_over_lastrowid = True
 
     supports_comments = True
     supports_default_metavalue = False
index a7ecf4aa3af6177284f60f53ba1098bf234fca76..336e10cd9c2f2411b78030ace575b296f12767bf 100644 (file)
@@ -12,6 +12,7 @@ from ...schema import Table
 from ...testing.provision import create_db
 from ...testing.provision import drop_all_schema_objects_pre_tables
 from ...testing.provision import drop_db
+from ...testing.provision import generate_driver_url
 from ...testing.provision import get_temp_table_name
 from ...testing.provision import log
 from ...testing.provision import normalize_sequence
@@ -19,6 +20,27 @@ from ...testing.provision import run_reap_dbs
 from ...testing.provision import temp_table_keyword_args
 
 
+@generate_driver_url.for_db("mssql")
+def generate_driver_url(url, driver, query_str):
+
+    backend = url.get_backend_name()
+
+    new_url = url.set(drivername="%s+%s" % (backend, driver))
+
+    if driver != "pyodbc":
+        new_url = new_url.set(query="")
+
+    if query_str:
+        new_url = new_url.update_query_string(query_str)
+
+    try:
+        new_url.get_dialect()
+    except exc.NoSuchModuleError:
+        return None
+    else:
+        return new_url
+
+
 @create_db.for_db("mssql")
 def _mssql_create_db(cfg, eng, ident):
     with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
index 699f61714b1c6897cff3d2678174d11c937b7fec..3823db91b3aac65e7b4d305930b5568311f5f191 100644 (file)
 pymssql is a Python module that provides a Python DBAPI interface around
 `FreeTDS <https://www.freetds.org/>`_.
 
-.. note::
+.. versionchanged:: 2.0.5
 
-    pymssql is currently not included in SQLAlchemy's continuous integration
-    (CI) testing.
+    pymssql was restored to SQLAlchemy's continuous integration testing
 
 
 """  # noqa
@@ -51,6 +50,7 @@ class MSIdentifierPreparer_pymssql(MSIdentifierPreparer):
 class MSDialect_pymssql(MSDialect):
     supports_statement_cache = True
     supports_native_decimal = True
+    supports_native_uuid = True
     driver = "pymssql"
 
     preparer = MSIdentifierPreparer_pymssql
index 4546cabcd43af3770716cb20e88444786da6a3dc..3fa752565670ac409e88146f69015462f3ac2955 100644 (file)
@@ -281,9 +281,9 @@ non-ODBC drivers such as pymssql where it works very well.
 Rowcount Support
 ----------------
 
-Pyodbc only has partial support for rowcount.  See the notes at
-:ref:`mssql_rowcount_versioning` for important notes when using ORM
-versioning.
+Previous limitations with the SQLAlchemy ORM's "versioned rows" feature with
+Pyodbc have been resolved as of SQLAlchemy 2.0.5. See the notes at
+:ref:`mssql_rowcount_versioning`.
 
 .. _mssql_pyodbc_fastexecutemany:
 
@@ -607,11 +607,10 @@ class MSExecutionContext_pyodbc(MSExecutionContext):
 class MSDialect_pyodbc(PyODBCConnector, MSDialect):
     supports_statement_cache = True
 
-    # mssql still has problems with this on Linux
+    # note this parameter is no longer used by the ORM or default dialect
+    # see #9414
     supports_sane_rowcount_returning = False
 
-    favor_returning_over_lastrowid = True
-
     execution_ctx_cls = MSExecutionContext_pyodbc
 
     colspecs = util.update_copy(
index fcabf5bb82d90729ba8cc21d82ca6fbd1e647716..20065d88e77984a3291390678ab55b548209cde2 100644 (file)
@@ -1910,7 +1910,10 @@ class SQLiteDialect(default.DefaultDialect):
     supports_default_values = True
     supports_default_metavalue = False
 
+    # sqlite issue:
     # https://github.com/python/cpython/issues/93421
+    # note this parameter is no longer used by the ORM or default dialect
+    # see #9414
     supports_sane_rowcount_returning = False
 
     supports_empty_insert = False
index b8368001b2ba678605f12c0942f562e7841d3a94..a331d4ed85cdcf983061b96f35a5411f81877498 100644 (file)
@@ -805,11 +805,7 @@ def _emit_update_statements(
             statement = statement.return_defaults(mapper.version_id_col)
             return_defaults = True
 
-        assert_singlerow = (
-            connection.dialect.supports_sane_rowcount
-            if not return_defaults
-            else connection.dialect.supports_sane_rowcount_returning
-        )
+        assert_singlerow = connection.dialect.supports_sane_rowcount
 
         assert_multirow = (
             assert_singlerow
@@ -1274,9 +1270,6 @@ def _emit_post_update_statements(
 
     if mapper._version_id_has_server_side_value:
         statement = statement.return_defaults(mapper.version_id_col)
-        return_defaults = True
-    else:
-        return_defaults = False
 
     # execute each UPDATE in the order according to the original
     # list of states to guarantee row access order, but
@@ -1291,11 +1284,7 @@ def _emit_post_update_statements(
         records = list(records)
         connection = key[0]
 
-        assert_singlerow = (
-            connection.dialect.supports_sane_rowcount
-            if not return_defaults
-            else connection.dialect.supports_sane_rowcount_returning
-        )
+        assert_singlerow = connection.dialect.supports_sane_rowcount
         assert_multirow = (
             assert_singlerow
             and connection.dialect.supports_sane_multi_rowcount
index 9bfc94e78a583e2b92bd3a91e8a75799722be0de..3332f7ce249aec8135cc4ea907550c590903e205 100644 (file)
@@ -492,6 +492,13 @@ class SuiteRequirements(Requirements):
         """
         return exclusions.open()
 
+    @property
+    def arraysize(self):
+        """dialect includes the required pep-249 attribute
+        ``cursor.arraysize``"""
+
+        return exclusions.open()
+
     @property
     def emulated_lastrowid(self):
         """target dialect retrieves cursor.lastrowid, or fetches
@@ -850,6 +857,24 @@ class SuiteRequirements(Requirements):
 
         return exclusions.closed()
 
+    @property
+    def date_implicit_bound(self):
+        """target dialect when given a date object will bind it such
+        that the database server knows the object is a date, and not
+        a plain string.
+
+        """
+        return exclusions.open()
+
+    @property
+    def time_implicit_bound(self):
+        """target dialect when given a time object will bind it such
+        that the database server knows the object is a time, and not
+        a plain string.
+
+        """
+        return exclusions.open()
+
     @property
     def datetime_implicit_bound(self):
         """target dialect when given a datetime object will bind it such
index 8e19a24a8c3d3ab608b36b9a9d7d2646fa1c0536..ba8e1043772caa50c1a988aab1948307442c0695 100644 (file)
@@ -3,6 +3,7 @@
 from sqlalchemy import bindparam
 from sqlalchemy import Column
 from sqlalchemy import Integer
+from sqlalchemy import MetaData
 from sqlalchemy import select
 from sqlalchemy import String
 from sqlalchemy import Table
@@ -88,23 +89,58 @@ class RowCountTest(fixtures.TablesTest):
         )
         eq_(r.rowcount, 3)
 
-    @testing.requires.update_returning
-    def test_update_rowcount_return_defaults(self, connection):
+    @testing.variation("implicit_returning", [True, False])
+    @testing.variation(
+        "dml",
+        [
+            ("update", testing.requires.update_returning),
+            ("delete", testing.requires.delete_returning),
+        ],
+    )
+    def test_update_delete_rowcount_return_defaults(
+        self, connection, implicit_returning, dml
+    ):
         """note this test should succeed for all RETURNING backends
         as of 2.0.  In
         Idf28379f8705e403a3c6a937f6a798a042ef2540 we changed rowcount to use
         len(rows) when we have implicit returning
 
         """
-        employees_table = self.tables.employees
+
+        if implicit_returning:
+            employees_table = self.tables.employees
+        else:
+            employees_table = Table(
+                "employees",
+                MetaData(),
+                Column(
+                    "employee_id",
+                    Integer,
+                    autoincrement=False,
+                    primary_key=True,
+                ),
+                Column("name", String(50)),
+                Column("department", String(1)),
+                implicit_returning=False,
+            )
 
         department = employees_table.c.department
-        stmt = (
-            employees_table.update()
-            .where(department == "C")
-            .values(name=employees_table.c.department + "Z")
-            .return_defaults()
-        )
+
+        if dml.update:
+            stmt = (
+                employees_table.update()
+                .where(department == "C")
+                .values(name=employees_table.c.department + "Z")
+                .return_defaults()
+            )
+        elif dml.delete:
+            stmt = (
+                employees_table.delete()
+                .where(department == "C")
+                .return_defaults()
+            )
+        else:
+            dml.fail()
 
         r = connection.execute(stmt)
         eq_(r.rowcount, 3)
index d6a74b220c4497f8abc5b30187678a8239d7ffc1..bc288534190dc98a00b263fc750ef0dc21296a31 100644 (file)
@@ -450,11 +450,6 @@ class _DateFixture(_LiteralRoundTripFixture, fixtures.TestBase):
             Column("decorated_date_data", Decorated),
         )
 
-    @testing.requires.datetime_implicit_bound
-    def test_select_direct(self, connection):
-        result = connection.scalar(select(literal(self.data)))
-        eq_(result, self.data)
-
     def test_round_trip(self, connection):
         date_table = self.tables.date_table
 
@@ -531,6 +526,11 @@ class DateTimeTest(_DateFixture, fixtures.TablesTest):
     datatype = DateTime
     data = datetime.datetime(2012, 10, 15, 12, 57, 18)
 
+    @testing.requires.datetime_implicit_bound
+    def test_select_direct(self, connection):
+        result = connection.scalar(select(literal(self.data)))
+        eq_(result, self.data)
+
 
 class DateTimeTZTest(_DateFixture, fixtures.TablesTest):
     __requires__ = ("datetime_timezone",)
@@ -540,6 +540,11 @@ class DateTimeTZTest(_DateFixture, fixtures.TablesTest):
         2012, 10, 15, 12, 57, 18, tzinfo=datetime.timezone.utc
     )
 
+    @testing.requires.datetime_implicit_bound
+    def test_select_direct(self, connection):
+        result = connection.scalar(select(literal(self.data)))
+        eq_(result, self.data)
+
 
 class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
     __requires__ = ("datetime_microseconds",)
@@ -566,6 +571,11 @@ class TimeTest(_DateFixture, fixtures.TablesTest):
     datatype = Time
     data = datetime.time(12, 57, 18)
 
+    @testing.requires.time_implicit_bound
+    def test_select_direct(self, connection):
+        result = connection.scalar(select(literal(self.data)))
+        eq_(result, self.data)
+
 
 class TimeTZTest(_DateFixture, fixtures.TablesTest):
     __requires__ = ("time_timezone",)
@@ -573,6 +583,11 @@ class TimeTZTest(_DateFixture, fixtures.TablesTest):
     datatype = Time(timezone=True)
     data = datetime.time(12, 57, 18, tzinfo=datetime.timezone.utc)
 
+    @testing.requires.time_implicit_bound
+    def test_select_direct(self, connection):
+        result = connection.scalar(select(literal(self.data)))
+        eq_(result, self.data)
+
 
 class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
     __requires__ = ("time_microseconds",)
@@ -580,6 +595,11 @@ class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
     datatype = Time
     data = datetime.time(12, 57, 18, 396)
 
+    @testing.requires.time_implicit_bound
+    def test_select_direct(self, connection):
+        result = connection.scalar(select(literal(self.data)))
+        eq_(result, self.data)
+
 
 class DateTest(_DateFixture, fixtures.TablesTest):
     __requires__ = ("date",)
@@ -587,6 +607,11 @@ class DateTest(_DateFixture, fixtures.TablesTest):
     datatype = Date
     data = datetime.date(2012, 10, 15)
 
+    @testing.requires.date_implicit_bound
+    def test_select_direct(self, connection):
+        result = connection.scalar(select(literal(self.data)))
+        eq_(result, self.data)
+
 
 class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest):
     """this particular suite is testing that datetime parameters get
@@ -600,6 +625,11 @@ class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest):
     data = datetime.datetime(2012, 10, 15, 12, 57, 18)
     compare = datetime.date(2012, 10, 15)
 
+    @testing.requires.datetime_implicit_bound
+    def test_select_direct(self, connection):
+        result = connection.scalar(select(literal(self.data)))
+        eq_(result, self.data)
+
 
 class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest):
     __requires__ = ("datetime_historic",)
@@ -607,6 +637,11 @@ class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest):
     datatype = DateTime
     data = datetime.datetime(1850, 11, 10, 11, 52, 35)
 
+    @testing.requires.date_implicit_bound
+    def test_select_direct(self, connection):
+        result = connection.scalar(select(literal(self.data)))
+        eq_(result, self.data)
+
 
 class DateHistoricTest(_DateFixture, fixtures.TablesTest):
     __requires__ = ("date_historic",)
@@ -614,6 +649,11 @@ class DateHistoricTest(_DateFixture, fixtures.TablesTest):
     datatype = Date
     data = datetime.date(1727, 4, 1)
 
+    @testing.requires.date_implicit_bound
+    def test_select_direct(self, connection):
+        result = connection.scalar(select(literal(self.data)))
+        eq_(result, self.data)
+
 
 class IntegerTest(_LiteralRoundTripFixture, fixtures.TestBase):
     __backend__ = True
index 789b0e528dc9b24fb5bf028ea6624f852e569c44..efeeee36d3b8e4bd567fa3c4d19ea0274b109d65 100644 (file)
--- a/setup.cfg
+++ b/setup.cfg
@@ -168,7 +168,7 @@ asyncmy_fallback = mysql+asyncmy://scott:tiger@127.0.0.1:3306/test?charset=utf8m
 mariadb = mariadb+mysqldb://scott:tiger@127.0.0.1:3306/test
 mariadb_connector = mariadb+mariadbconnector://scott:tiger@127.0.0.1:3306/test
 mssql = mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server
-mssql_pymssql = mssql+pymssql://scott:tiger@ms_2008
+pymssql = mssql+pymssql://scott:tiger^5HHH@mssql2017:1433/test
 docker_mssql = mssql+pyodbc://scott:tiger^5HHH@127.0.0.1:1433/test?driver=ODBC+Driver+17+for+SQL+Server
 oracle = oracle+cx_oracle://scott:tiger@oracle18c/xe
 cxoracle = oracle+cx_oracle://scott:tiger@oracle18c/xe
index b65e274455ee494fb193fc54f2e63a07c0644631..35575bc13ae5752e81783964a1b46a5c9606a7b5 100644 (file)
@@ -18,6 +18,7 @@ from sqlalchemy import String
 from sqlalchemy import Table
 from sqlalchemy import testing
 from sqlalchemy.dialects.mssql import base as mssql
+from sqlalchemy.dialects.mssql import pyodbc as mssql_pyodbc
 from sqlalchemy.testing import AssertsCompiledSQL
 from sqlalchemy.testing import config
 from sqlalchemy.testing import engines
@@ -409,7 +410,7 @@ def full_text_search_missing():
         return result.scalar() == 0
 
 
-class MatchTest(fixtures.TablesTest, AssertsCompiledSQL):
+class MatchTest(AssertsCompiledSQL, fixtures.TablesTest):
 
     __only_on__ = "mssql"
     __skip_if__ = (full_text_search_missing,)
@@ -517,6 +518,7 @@ class MatchTest(fixtures.TablesTest, AssertsCompiledSQL):
         self.assert_compile(
             matchtable.c.title.match("somstr"),
             "CONTAINS (matchtable.title, ?)",
+            dialect=mssql_pyodbc.dialect(paramstyle="qmark"),
         )
 
     def test_simple_match(self, connection):
index 867e422020e1ead46ff7310875d766a2fd573eea..cb7ed3102e12bba30ba836af300fcf625970382e 100644 (file)
@@ -815,7 +815,36 @@ class TypeRoundTripTest(
         )
         return t, (d1, t1, d2, d3)
 
-    def test_date_roundtrips(self, date_fixture, connection):
+    def test_date_roundtrips_no_offset(self, date_fixture, connection):
+        t, (d1, t1, d2, d3) = date_fixture
+        connection.execute(
+            t.insert(),
+            dict(
+                adate=d1,
+                adatetime=d2,
+                atime1=t1,
+                atime2=d2,
+            ),
+        )
+
+        row = connection.execute(t.select()).first()
+        eq_(
+            (
+                row.adate,
+                row.adatetime,
+                row.atime1,
+                row.atime2,
+            ),
+            (
+                d1,
+                d2,
+                t1,
+                d2.time(),
+            ),
+        )
+
+    @testing.skip_if("+pymssql", "offsets dont seem to work")
+    def test_date_roundtrips_w_offset(self, date_fixture, connection):
         t, (d1, t1, d2, d3) = date_fixture
         connection.execute(
             t.insert(),
@@ -855,6 +884,7 @@ class TypeRoundTripTest(
         (datetime.datetime(2007, 10, 30, 11, 2, 32)),
         argnames="date",
     )
+    @testing.skip_if("+pymssql", "unknown failures")
     def test_tz_present_or_non_in_dates(self, date_fixture, connection, date):
         t, (d1, t1, d2, d3) = date_fixture
         connection.execute(
@@ -954,6 +984,7 @@ class TypeRoundTripTest(
         id_="iaaa",
         argnames="dto_param_value, expected_offset_hours, should_fail",
     )
+    @testing.skip_if("+pymssql", "offsets dont seem to work")
     def test_datetime_offset(
         self,
         datetimeoffset_fixture,
index f6b9f18fc4245fd9b21750a232e754e9438579cc..7de90fc5cd4339924f7b8a965d994c0d19d1bc5c 100644 (file)
@@ -45,12 +45,7 @@ def conditional_sane_rowcount_warnings(
     update=False, delete=False, only_returning=False
 ):
     warnings = ()
-    if (
-        only_returning
-        and not testing.db.dialect.supports_sane_rowcount_returning
-    ) or (
-        not only_returning and not testing.db.dialect.supports_sane_rowcount
-    ):
+    if not testing.db.dialect.supports_sane_rowcount:
         if update:
             warnings += (
                 "Dialect .* does not support "
@@ -1466,7 +1461,6 @@ class ServerVersioningTest(fixtures.MappedTest):
 
         eq_(f1.version_id, 2)
 
-    @testing.requires.sane_rowcount_w_returning
     @testing.requires.updateable_autoincrement_pks
     @testing.requires.update_returning
     def test_sql_expr_w_mods_bump(self):
@@ -1636,7 +1630,6 @@ class ServerVersioningTest(fixtures.MappedTest):
             self.assert_sql_execution(testing.db, sess.flush, *statements)
 
     @testing.requires.independent_connections
-    @testing.requires.sane_rowcount_w_returning
     def test_concurrent_mod_err_expire_on_commit(self):
         sess = self._fixture()
 
@@ -1661,7 +1654,6 @@ class ServerVersioningTest(fixtures.MappedTest):
         )
 
     @testing.requires.independent_connections
-    @testing.requires.sane_rowcount_w_returning
     def test_concurrent_mod_err_noexpire_on_commit(self):
         sess = self._fixture(expire_on_commit=False)
 
index 923d98b4626a2e70a97515192c077add9091a226..9d51ae4777e9c3e684695d74250f6da4889be047 100644 (file)
@@ -966,6 +966,10 @@ class DefaultRequirements(SuiteRequirements):
             [no_support("oracle", "ORA-03001: unimplemented feature")]
         )
 
+    @property
+    def arraysize(self):
+        return skip_if("+pymssql", "DBAPI is missing this attribute")
+
     @property
     def emulated_lastrowid(self):
         """ "target dialect retrieves cursor.lastrowid or an equivalent
@@ -1187,10 +1191,52 @@ class DefaultRequirements(SuiteRequirements):
 
         return exclusions.open()
 
+    @property
+    def date_implicit_bound(self):
+        """target dialect when given a date object will bind it such
+        that the database server knows the object is a date, and not
+        a plain string.
+
+        """
+
+        # mariadbconnector works.  pyodbc we dont know, not supported in
+        # testing.
+        return exclusions.fails_on(
+            [
+                "+mysqldb",
+                "+pymysql",
+                "+asyncmy",
+                "+mysqlconnector",
+                "+cymysql",
+                "+aiomysql",
+            ]
+        )
+
+    @property
+    def time_implicit_bound(self):
+        """target dialect when given a time object will bind it such
+        that the database server knows the object is a time, and not
+        a plain string.
+
+        """
+
+        # mariadbconnector works.  pyodbc we dont know, not supported in
+        # testing.
+        return exclusions.fails_on(
+            [
+                "+mysqldb",
+                "+pymysql",
+                "+asyncmy",
+                "+mysqlconnector",
+                "+cymysql",
+                "+aiomysql",
+            ]
+        )
+
     @property
     def datetime_implicit_bound(self):
         """target dialect when given a datetime object will bind it such
-        that the database server knows the object is a datetime, and not
+        that the database server knows the object is a date, and not
         a plain string.
 
         """
@@ -1205,6 +1251,7 @@ class DefaultRequirements(SuiteRequirements):
                 "+mysqlconnector",
                 "+cymysql",
                 "+aiomysql",
+                "+pymssql",
             ]
         )
 
index 1fe1b33230df1ac47204f468acf8e52347b56277..633972b4582cdd0c453af230078e578eb0bed740 100644 (file)
@@ -984,7 +984,11 @@ class PKDefaultTest(fixtures.TestBase):
                 metadata,
                 Column(
                     "date_id",
-                    DateTime(timezone=True),
+                    # we want no tzinfo normally since pymssql doesn't do
+                    # it right now
+                    DateTime().with_variant(
+                        DateTime(timezone=True), "postgresql"
+                    ),
                     default=text("current_timestamp"),
                     primary_key=True,
                 ),
index 66584c96e1d17961d79975530b26ecd959772fcf..ba812d38b8863e33e9a3845212685ca187beebc4 100644 (file)
@@ -539,6 +539,7 @@ class CursorResultTest(fixtures.TablesTest):
             rows.append(row)
         eq_(len(rows), 2)
 
+    @testing.requires.arraysize
     def test_fetchmany_arraysize_default(self, connection):
         users = self.tables.users
 
@@ -552,6 +553,7 @@ class CursorResultTest(fixtures.TablesTest):
 
         eq_(len(rows), min(arraysize, 150))
 
+    @testing.requires.arraysize
     def test_fetchmany_arraysize_set(self, connection):
         users = self.tables.users
 
diff --git a/tox.ini b/tox.ini
index 74ea3b960aa9441dffe332efd4a925dae81eeb72..dad8a0d2a57c5315a454740fbf228e642fef785b 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -31,6 +31,7 @@ extras=
      oracle: oracle
      oracle: oracle_oracledb
      mssql: mssql
+     mssql: mssql_pymssql
 
 deps=
      pytest>=7.0.0rc1,<8
@@ -114,8 +115,8 @@ setenv=
     # py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver mariadbconnector --dbdriver asyncmy}
     py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver asyncmy}
 
-
     mssql: MSSQL={env:TOX_MSSQL:--db mssql}
+    py3{,7,8,9,10,11}-mssql: EXTRA_MSSQL_DRIVERS={env:EXTRA_MSSQL_DRIVERS:--dbdriver pyodbc --dbdriver pymssql}
 
     oracle,mssql,sqlite_file: IDENTS=--write-idents db_idents.txt
 
@@ -145,7 +146,7 @@ commands=
   # that flag for coverage mode.
   nocext: sh -c "rm -f lib/sqlalchemy/*.so"
 
-  {env:BASECOMMAND} {env:WORKERS} {env:SQLITE:} {env:EXTRA_SQLITE_DRIVERS:} {env:POSTGRESQL:} {env:EXTRA_PG_DRIVERS:} {env:MYSQL:} {env:EXTRA_MYSQL_DRIVERS:} {env:ORACLE:} {env:EXTRA_ORACLE_DRIVERS:} {env:MSSQL:} {env:IDENTS:} {env:PYTEST_EXCLUDES:} {env:COVERAGE:} {posargs}
+  {env:BASECOMMAND} {env:WORKERS} {env:SQLITE:} {env:EXTRA_SQLITE_DRIVERS:} {env:POSTGRESQL:} {env:EXTRA_PG_DRIVERS:} {env:MYSQL:} {env:EXTRA_MYSQL_DRIVERS:} {env:ORACLE:} {env:EXTRA_ORACLE_DRIVERS:} {env:MSSQL:} {env:EXTRA_MSSQL_DRIVERS:} {env:IDENTS:} {env:PYTEST_EXCLUDES:} {env:COVERAGE:} {posargs}
   oracle,mssql,sqlite_file: python reap_dbs.py db_idents.txt