]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
translate joined inheritance cols in UPDATE/DELETE
authorMike Bayer <mike_mp@zzzcomputing.com>
Wed, 3 Aug 2022 20:55:23 +0000 (16:55 -0400)
committerMike Bayer <mike_mp@zzzcomputing.com>
Fri, 5 Aug 2022 16:53:35 +0000 (12:53 -0400)
Fixed issue in ORM enabled UPDATE when the statement is created against a
joined-inheritance subclass, updating only local table columns, where the
"fetch" synchronization strategy would not render the correct RETURNING
clause for databases that use RETURNING for fetch synchronization.
Also adjusts the strategy used for RETURNING in UPDATE FROM and
DELETE FROM statements.

Also fixes MariaDB which does not support RETURNING with
DELETE..USING.  this was not caught in tests because
"fetch" strategy wasn't tested.  so also adjust the ORMDMLState
classes to look for "extra froms" first before adding
RETURNING, add new parameters to interfaces for
"update_returning_multitable" and "delete_returning_multitable".
A new execution option is_delete_using=True, described in the
changelog message, is added to allow the ORM to know up front
if a certain statement should have a SELECT up front
for "fetch" strategy.

Fixes: #8344
Change-Id: I3dcdb68e6e97ab0807a573c2fdb3d53c16d063ba

12 files changed:
doc/build/changelog/unreleased_20/6195_7011.rst [new file with mode: 0644]
doc/build/changelog/unreleased_20/8344.rst [new file with mode: 0644]
lib/sqlalchemy/dialects/mssql/base.py
lib/sqlalchemy/dialects/postgresql/base.py
lib/sqlalchemy/engine/default.py
lib/sqlalchemy/engine/interfaces.py
lib/sqlalchemy/orm/persistence.py
lib/sqlalchemy/orm/query.py
lib/sqlalchemy/sql/dml.py
test/orm/test_update_delete.py
test/requirements.py
test/sql/test_delete.py

diff --git a/doc/build/changelog/unreleased_20/6195_7011.rst b/doc/build/changelog/unreleased_20/6195_7011.rst
new file mode 100644 (file)
index 0000000..1e212a9
--- /dev/null
@@ -0,0 +1,18 @@
+.. change::
+    :tags: usecase, sqlite
+    :tickets: 6195
+
+    Added RETURNING support for the SQLite dialect.  SQLite supports RETURNING
+    since version 3.35.
+
+
+.. change::
+    :tags: usecase, mariadb
+    :tickets: 7011
+
+    Added INSERT..RETURNING and DELETE..RETURNING support for the MariaDB
+    dialect.  UPDATE..RETURNING is not yet supported by MariaDB.  MariaDB
+    supports INSERT..RETURNING as of 10.5.0 and DELETE..RETURNING as of
+    10.0.5.
+
+
diff --git a/doc/build/changelog/unreleased_20/8344.rst b/doc/build/changelog/unreleased_20/8344.rst
new file mode 100644 (file)
index 0000000..d98c145
--- /dev/null
@@ -0,0 +1,44 @@
+.. change::
+    :tags: bug, orm
+    :tickets: 8344
+
+    Fixed issue in ORM enabled UPDATE when the statement is created against a
+    joined-inheritance subclass, updating only local table columns, where the
+    "fetch" synchronization strategy would not render the correct RETURNING
+    clause for databases that use RETURNING for fetch synchronization.
+    Also adjusts the strategy used for RETURNING in UPDATE FROM and
+    DELETE FROM statements.
+
+.. change::
+    :tags: usecase, mariadb
+    :tickets: 8344
+
+    Added a new execution option ``is_delete_using=True``, which is consumed
+    by the ORM when using an ORM-enabled DELETE statement in conjunction with
+    the "fetch" synchronization strategy; this option indicates that the
+    DELETE statement is expected to use multiple tables, which on MariaDB
+    is the DELETE..USING syntax.   The option then indicates that
+    RETURNING (newly implemented in SQLAlchemy 2.0 for MariaDB
+    for  :ticket:`7011`) should not be used for databases that are known
+    to not support "DELETE..USING..RETURNING" syntax, even though they
+    support "DELETE..USING", which is MariaDB's current capability.
+
+    The rationale for this option is that the current workings of ORM-enabled
+    DELETE doesn't know up front if a DELETE statement is against multiple
+    tables or not until compilation occurs, which is cached in any case, yet it
+    needs to be known so that a SELECT for the to-be-deleted row can be emitted
+    up front. Instead of applying an across-the-board performance penalty for
+    all DELETE statements by proactively checking them all for this
+    relatively unusual SQL pattern, the ``is_delete_using=True`` execution
+    option is requested via a new exception message that is raised
+    within the compilation step.  This exception message is specifically
+    (and only) raised when:   the statement is an ORM-enabled DELETE where
+    the "fetch" synchronization strategy has been requested; the
+    backend is MariaDB or other backend with this specific limitation;
+    the statement has been detected within the initial compilation
+    that it would otherwise emit "DELETE..USING..RETURNING".   By applying
+    the execution option, the ORM knows to run a SELECT upfront instead.
+    A similar option is implemented for ORM-enabled UPDATE but there is not
+    currently a backend where it is needed.
+
+
index e33ae4dbb686ec2de0de0515a7b7b20875d41e60..8789b8673d421b06f39af7fd5a81aa466415e109 100644 (file)
@@ -2800,6 +2800,8 @@ class MSDialect(default.DefaultDialect):
     insert_returning = True
     update_returning = True
     delete_returning = True
+    update_returning_multifrom = True
+    delete_returning_multifrom = True
 
     colspecs = {
         sqltypes.DateTime: _MSDateTime,
index 8b89cdee2063bd00548388ef27f8a76fc7857280..8be221eee27365c07f257157b862472341f87a81 100644 (file)
@@ -2924,6 +2924,8 @@ class PGDialect(default.DefaultDialect):
     update_returning = True
     delete_returning = True
     insert_returning = True
+    update_returning_multifrom = True
+    delete_returning_multifrom = True
 
     connection_characteristics = (
         default.DefaultDialect.connection_characteristics
index 4b312dcebc483a27418a89410213a1d8dca1627f..80e687c3296ac0b3100d7e6bc61123b65ea8f926 100644 (file)
@@ -143,6 +143,8 @@ class DefaultDialect(Dialect):
     insert_null_pk_still_autoincrements = False
     update_returning = False
     delete_returning = False
+    update_returning_multifrom = False
+    delete_returning_multifrom = False
     insert_returning = False
     insert_executemany_returning = False
 
index 208c4f6b08721fda5d7851123d5a1478d6ac1178..778c07592f5400cbdcc7e67b97f4c05b2176d6c7 100644 (file)
@@ -781,6 +781,13 @@ class Dialect(EventTarget):
 
     """
 
+    update_returning_multifrom: bool
+    """if the dialect supports RETURNING with UPDATE..FROM
+
+    .. versionadded:: 2.0
+
+    """
+
     delete_returning: bool
     """if the dialect supports RETURNING with DELETE
 
@@ -788,6 +795,13 @@ class Dialect(EventTarget):
 
     """
 
+    delete_returning_multifrom: bool
+    """if the dialect supports RETURNING with DELETE..FROM
+
+    .. versionadded:: 2.0
+
+    """
+
     favor_returning_over_lastrowid: bool
     """for backends that support both a lastrowid and a RETURNING insert
     strategy, favor RETURNING for simple single-int pk inserts.
index c10f4701e031e2009a2ca2fb972eb99d111b8a5e..0eae4fcf117ca10ff24d125e0b08fcc4aa4d95e1 100644 (file)
@@ -1799,6 +1799,8 @@ _EMPTY_DICT = util.immutabledict()
 class BulkUDCompileState(CompileState):
     class default_update_options(Options):
         _synchronize_session = "evaluate"
+        _is_delete_using = False
+        _is_update_from = False
         _autoflush = True
         _subject_mapper = None
         _resolved_values = _EMPTY_DICT
@@ -1809,7 +1811,15 @@ class BulkUDCompileState(CompileState):
         _refresh_identity_token = None
 
     @classmethod
-    def can_use_returning(cls, dialect: Dialect, mapper: Mapper[Any]) -> bool:
+    def can_use_returning(
+        cls,
+        dialect: Dialect,
+        mapper: Mapper[Any],
+        *,
+        is_multitable: bool = False,
+        is_update_from: bool = False,
+        is_delete_using: bool = False,
+    ) -> bool:
         raise NotImplementedError()
 
     @classmethod
@@ -1830,7 +1840,7 @@ class BulkUDCompileState(CompileState):
             execution_options,
         ) = BulkUDCompileState.default_update_options.from_execution_options(
             "_sa_orm_update_options",
-            {"synchronize_session"},
+            {"synchronize_session", "is_delete_using", "is_update_from"},
             execution_options,
             statement._execution_options,
         )
@@ -1857,7 +1867,11 @@ class BulkUDCompileState(CompileState):
             session._autoflush()
 
         statement = statement._annotate(
-            {"synchronize_session": update_options._synchronize_session}
+            {
+                "synchronize_session": update_options._synchronize_session,
+                "is_delete_using": update_options._is_delete_using,
+                "is_update_from": update_options._is_update_from,
+            }
         )
 
         # this stage of the execution is called before the do_orm_execute event
@@ -1957,6 +1971,56 @@ class BulkUDCompileState(CompileState):
 
         return return_crit
 
+    @classmethod
+    def _interpret_returning_rows(cls, mapper, rows):
+        """translate from local inherited table columns to base mapper
+        primary key columns.
+
+        Joined inheritance mappers always establish the primary key in terms of
+        the base table.   When we UPDATE a sub-table, we can only get
+        RETURNING for the sub-table's columns.
+
+        Here, we create a lookup from the local sub table's primary key
+        columns to the base table PK columns so that we can get identity
+        key values from RETURNING that's against the joined inheritance
+        sub-table.
+
+        the complexity here is to support more than one level deep of
+        inheritance, where we have to link columns to each other across
+        the inheritance hierarchy.
+
+        """
+
+        if mapper.local_table is not mapper.base_mapper.local_table:
+            return rows
+
+        # this starts as a mapping of
+        # local_pk_col: local_pk_col.
+        # we will then iteratively rewrite the "value" of the dict with
+        # each successive superclass column
+        local_pk_to_base_pk = {pk: pk for pk in mapper.local_table.primary_key}
+
+        for mp in mapper.iterate_to_root():
+            if mp.inherits is None:
+                break
+            elif mp.local_table is mp.inherits.local_table:
+                continue
+
+            t_to_e = dict(mp._table_to_equated[mp.inherits.local_table])
+            col_to_col = {sub_pk: super_pk for super_pk, sub_pk in t_to_e[mp]}
+            for pk, super_ in local_pk_to_base_pk.items():
+                local_pk_to_base_pk[pk] = col_to_col[super_]
+
+        lookup = {
+            local_pk_to_base_pk[lpk]: idx
+            for idx, lpk in enumerate(mapper.local_table.primary_key)
+        }
+        primary_key_convert = [
+            lookup[bpk] for bpk in mapper.base_mapper.primary_key
+        ]
+
+        return [tuple(row[idx] for idx in primary_key_convert) for row in rows]
+
     @classmethod
     def _do_pre_synchronize_evaluate(
         cls,
@@ -2105,8 +2169,12 @@ class BulkUDCompileState(CompileState):
 
         def skip_for_returning(orm_context: ORMExecuteState) -> Any:
             bind = orm_context.session.get_bind(**orm_context.bind_arguments)
-
-            if cls.can_use_returning(bind.dialect, mapper):
+            if cls.can_use_returning(
+                bind.dialect,
+                mapper,
+                is_update_from=update_options._is_update_from,
+                is_delete_using=update_options._is_delete_using,
+            ):
                 return _result.null_result()
             else:
                 return None
@@ -2294,25 +2362,60 @@ class BulkORMUpdate(ORMDMLState, UpdateDMLState, BulkUDCompileState):
         # if we are against a lambda statement we might not be the
         # topmost object that received per-execute annotations
 
+        # do this first as we need to determine if there is
+        # UPDATE..FROM
+
+        UpdateDMLState.__init__(self, new_stmt, compiler, **kw)
+
         if compiler._annotations.get(
             "synchronize_session", None
-        ) == "fetch" and self.can_use_returning(compiler.dialect, mapper):
+        ) == "fetch" and self.can_use_returning(
+            compiler.dialect, mapper, is_multitable=self.is_multitable
+        ):
             if new_stmt._returning:
                 raise sa_exc.InvalidRequestError(
                     "Can't use synchronize_session='fetch' "
                     "with explicit returning()"
                 )
-            new_stmt = new_stmt.returning(*mapper.primary_key)
-
-        UpdateDMLState.__init__(self, new_stmt, compiler, **kw)
+            self.statement = self.statement.returning(
+                *mapper.local_table.primary_key
+            )
 
         return self
 
     @classmethod
-    def can_use_returning(cls, dialect: Dialect, mapper: Mapper[Any]) -> bool:
-        return (
+    def can_use_returning(
+        cls,
+        dialect: Dialect,
+        mapper: Mapper[Any],
+        *,
+        is_multitable: bool = False,
+        is_update_from: bool = False,
+        is_delete_using: bool = False,
+    ) -> bool:
+
+        # normal answer for "should we use RETURNING" at all.
+        normal_answer = (
             dialect.update_returning and mapper.local_table.implicit_returning
         )
+        if not normal_answer:
+            return False
+
+        # these workarounds are currently hypothetical for UPDATE,
+        # unlike DELETE where they impact MariaDB
+        if is_update_from:
+            return dialect.update_returning_multifrom
+
+        elif is_multitable and not dialect.update_returning_multifrom:
+            raise sa_exc.CompileError(
+                f'Dialect "{dialect.name}" does not support RETURNING '
+                "with UPDATE..FROM; for synchronize_session='fetch', "
+                "please add the additional execution option "
+                "'is_update_from=True' to the statement to indicate that "
+                "a separate SELECT should be used for this backend."
+            )
+
+        return True
 
     @classmethod
     def _get_crud_kv_pairs(cls, statement, kv_iterator):
@@ -2423,9 +2526,11 @@ class BulkORMUpdate(ORMDMLState, UpdateDMLState, BulkUDCompileState):
         evaluated_keys = list(update_options._value_evaluators.keys())
 
         if result.returns_rows:
+            rows = cls._interpret_returning_rows(target_mapper, result.all())
+
             matched_rows = [
                 tuple(row) + (update_options._refresh_identity_token,)
-                for row in result.all()
+                for row in rows
             ]
         else:
             matched_rows = update_options._matched_rows
@@ -2494,20 +2599,64 @@ class BulkORMDelete(ORMDMLState, DeleteDMLState, BulkUDCompileState):
         if new_crit:
             statement = statement.where(*new_crit)
 
+        # do this first as we need to determine if there is
+        # DELETE..FROM
+        DeleteDMLState.__init__(self, statement, compiler, **kw)
+
         if compiler._annotations.get(
             "synchronize_session", None
-        ) == "fetch" and self.can_use_returning(compiler.dialect, mapper):
-            statement = statement.returning(*mapper.primary_key)
-
-        DeleteDMLState.__init__(self, statement, compiler, **kw)
+        ) == "fetch" and self.can_use_returning(
+            compiler.dialect,
+            mapper,
+            is_multitable=self.is_multitable,
+            is_delete_using=compiler._annotations.get(
+                "is_delete_using", False
+            ),
+        ):
+            self.statement = statement.returning(*statement.table.primary_key)
 
         return self
 
     @classmethod
-    def can_use_returning(cls, dialect: Dialect, mapper: Mapper[Any]) -> bool:
-        return (
+    def can_use_returning(
+        cls,
+        dialect: Dialect,
+        mapper: Mapper[Any],
+        *,
+        is_multitable: bool = False,
+        is_update_from: bool = False,
+        is_delete_using: bool = False,
+    ) -> bool:
+
+        # normal answer for "should we use RETURNING" at all.
+        normal_answer = (
             dialect.delete_returning and mapper.local_table.implicit_returning
         )
+        if not normal_answer:
+            return False
+
+        # now get into special workarounds because MariaDB supports
+        # DELETE...RETURNING but not DELETE...USING...RETURNING.
+        if is_delete_using:
+            # is_delete_using hint was passed.   use
+            # additional dialect feature (True for PG, False for MariaDB)
+            return dialect.delete_returning_multifrom
+
+        elif is_multitable and not dialect.delete_returning_multifrom:
+            # is_delete_using hint was not passed, but we determined
+            # at compile time that this is in fact a DELETE..USING.
+            # it's too late to continue since we did not pre-SELECT.
+            # raise that we need that hint up front.
+
+            raise sa_exc.CompileError(
+                f'Dialect "{dialect.name}" does not support RETURNING '
+                "with DELETE..USING; for synchronize_session='fetch', "
+                "please add the additional execution option "
+                "'is_delete_using=True' to the statement to indicate that "
+                "a separate SELECT should be used for this backend."
+            )
+
+        return True
 
     @classmethod
     def _do_post_synchronize_evaluate(cls, session, result, update_options):
@@ -2524,9 +2673,11 @@ class BulkORMDelete(ORMDMLState, DeleteDMLState, BulkUDCompileState):
         target_mapper = update_options._subject_mapper
 
         if result.returns_rows:
+            rows = cls._interpret_returning_rows(target_mapper, result.all())
+
             matched_rows = [
                 tuple(row) + (update_options._refresh_identity_token,)
-                for row in result.all()
+                for row in rows
             ]
         else:
             matched_rows = update_options._matched_rows
index 99131e3e9a7afdb114ebdab180ae2b9e1feae777..4fb1013c47f33d2c958f2bf4e8a35a566081f120 100644 (file)
@@ -3021,7 +3021,9 @@ class Query(
             self.session.execute(
                 delete_,
                 self._params,
-                execution_options={"synchronize_session": synchronize_session},
+                execution_options=self._execution_options.union(
+                    {"synchronize_session": synchronize_session}
+                ),
             ),
         )
         bulk_del.result = result  # type: ignore
@@ -3113,7 +3115,9 @@ class Query(
             self.session.execute(
                 upd,
                 self._params,
-                execution_options={"synchronize_session": synchronize_session},
+                execution_options=self._execution_options.union(
+                    {"synchronize_session": synchronize_session}
+                ),
             ),
         )
         bulk_ud.result = result  # type: ignore
index 76a16eb1c84f90a3148631ea72e9fc054fc9f743..68ce0d1c5d21b0cb72f3144782b67e7dd9807d7b 100644 (file)
@@ -359,6 +359,7 @@ class DeleteDMLState(DMLState):
         t, ef = self._make_extra_froms(statement)
         self._primary_table = t
         self._extra_froms = ef
+        self.is_multitable = ef
 
 
 SelfUpdateBase = typing.TypeVar("SelfUpdateBase", bound="UpdateBase")
index 933d2bb1fa47df044d6a3666ccd49f9888fc4e3a..adaed8f6f8b58834e6099cc9d120780d9b7a6588 100644 (file)
@@ -1715,8 +1715,8 @@ class UpdateDeleteFromTest(fixtures.MappedTest):
             ),
         )
 
-    @testing.requires.delete_from
-    def test_delete_from_joined_subq_test(self):
+    @testing.requires.delete_using
+    def test_delete_using_joined_subq_test(self):
         Document = self.classes.Document
         s = fixture_session()
 
@@ -1958,6 +1958,11 @@ class InheritTest(fixtures.DeclarativeMappedTest):
             id = Column(Integer, ForeignKey("person.id"), primary_key=True)
             engineer_name = Column(String(50))
 
+        class Programmer(Engineer):
+            __tablename__ = "programmer"
+            id = Column(Integer, ForeignKey("engineer.id"), primary_key=True)
+            primary_language = Column(String(50))
+
         class Manager(Person):
             __tablename__ = "manager"
             id = Column(Integer, ForeignKey("person.id"), primary_key=True)
@@ -1965,10 +1970,11 @@ class InheritTest(fixtures.DeclarativeMappedTest):
 
     @classmethod
     def insert_data(cls, connection):
-        Engineer, Person, Manager = (
+        Engineer, Person, Manager, Programmer = (
             cls.classes.Engineer,
             cls.classes.Person,
             cls.classes.Manager,
+            cls.classes.Programmer,
         )
         s = Session(connection)
         s.add_all(
@@ -1977,11 +1983,14 @@ class InheritTest(fixtures.DeclarativeMappedTest):
                 Manager(name="m1", manager_name="m1"),
                 Engineer(name="e2", engineer_name="e2"),
                 Person(name="p1"),
+                Programmer(
+                    name="pp1", engineer_name="pp1", primary_language="python"
+                ),
             ]
         )
         s.commit()
 
-    @testing.only_on("mysql", "Multi table update")
+    @testing.only_on(["mysql", "mariadb"], "Multi table update")
     def test_update_from_join_no_problem(self):
         person = self.classes.Person.__table__
         engineer = self.classes.Engineer.__table__
@@ -1998,53 +2007,253 @@ class InheritTest(fixtures.DeclarativeMappedTest):
         eq_(obj.name, "updated")
         eq_(obj.engineer_name, "e2a")
 
-    def test_update_subtable_only(self):
+    @testing.combinations(None, "fetch", "evaluate")
+    def test_update_sub_table_only(self, synchronize_session):
         Engineer = self.classes.Engineer
         s = Session(testing.db)
-        s.query(Engineer).update({"engineer_name": "e5"})
+        s.query(Engineer).update(
+            {"engineer_name": "e5"}, synchronize_session=synchronize_session
+        )
 
-        eq_(s.query(Engineer.engineer_name).all(), [("e5",), ("e5",)])
+        eq_(s.query(Engineer.engineer_name).all(), [("e5",), ("e5",), ("e5",)])
+
+    @testing.combinations(None, "fetch", "evaluate")
+    def test_update_sub_sub_table_only(self, synchronize_session):
+        Programmer = self.classes.Programmer
+        s = Session(testing.db)
+        s.query(Programmer).update(
+            {"primary_language": "c++"},
+            synchronize_session=synchronize_session,
+        )
+
+        eq_(
+            s.query(Programmer.primary_language).all(),
+            [
+                ("c++",),
+            ],
+        )
 
     @testing.requires.update_from
-    def test_update_from(self):
+    @testing.combinations(None, "fetch", "fetch_w_hint", "evaluate")
+    def test_update_from(self, synchronize_session):
+        """test an UPDATE that uses multiple tables.
+
+        The limitation that MariaDB has with DELETE does not apply here
+        at the moment as MariaDB doesn't support UPDATE..RETURNING at all.
+        However, the logic from DELETE is still implemented in
+        persistence.py. If MariaDB adds UPDATE...RETURNING, or SQLite adds
+        UPDATE..FROM, etc., then it will be useful.
+
+        """
         Engineer = self.classes.Engineer
         Person = self.classes.Person
         s = Session(testing.db)
-        s.query(Engineer).filter(Engineer.id == Person.id).filter(
-            Person.name == "e2"
-        ).update({"engineer_name": "e5"})
+
+        # we don't have any backends with this combination right now.
+        db_has_hypothetical_limitation = (
+            testing.db.dialect.update_returning
+            and not testing.db.dialect.update_returning_multifrom
+        )
+
+        e2 = s.query(Engineer).filter_by(name="e2").first()
+
+        with self.sql_execution_asserter() as asserter:
+            eq_(e2.engineer_name, "e2")
+            q = (
+                s.query(Engineer)
+                .filter(Engineer.id == Person.id)
+                .filter(Person.name == "e2")
+            )
+            if synchronize_session == "fetch_w_hint":
+                q.execution_options(is_update_from=True).update(
+                    {"engineer_name": "e5"},
+                    synchronize_session="fetch",
+                )
+            elif (
+                synchronize_session == "fetch"
+                and db_has_hypothetical_limitation
+            ):
+                with expect_raises_message(
+                    exc.CompileError,
+                    'Dialect ".*" does not support RETURNING with '
+                    "UPDATE..FROM;",
+                ):
+                    q.update(
+                        {"engineer_name": "e5"},
+                        synchronize_session=synchronize_session,
+                    )
+                return
+            else:
+                q.update(
+                    {"engineer_name": "e5"},
+                    synchronize_session=synchronize_session,
+                )
+
+            if synchronize_session is None:
+                eq_(e2.engineer_name, "e2")
+            else:
+                eq_(e2.engineer_name, "e5")
+
+        if synchronize_session in ("fetch", "fetch_w_hint") and (
+            db_has_hypothetical_limitation
+            or not testing.db.dialect.update_returning
+        ):
+            asserter.assert_(
+                CompiledSQL(
+                    "SELECT person.id FROM person INNER JOIN engineer "
+                    "ON person.id = engineer.id WHERE engineer.id = person.id "
+                    "AND person.name = %s",
+                    [{"name_1": "e2"}],
+                    dialect="mariadb",
+                ),
+                CompiledSQL(
+                    "UPDATE engineer, person SET engineer.engineer_name=%s "
+                    "WHERE engineer.id = person.id AND person.name = %s",
+                    [{"engineer_name": "e5", "name_1": "e2"}],
+                    dialect="mariadb",
+                ),
+            )
+        elif synchronize_session in ("fetch", "fetch_w_hint"):
+            asserter.assert_(
+                CompiledSQL(
+                    "UPDATE engineer SET engineer_name=%(engineer_name)s "
+                    "FROM person WHERE engineer.id = person.id "
+                    "AND person.name = %(name_1)s RETURNING engineer.id",
+                    [{"engineer_name": "e5", "name_1": "e2"}],
+                    dialect="postgresql",
+                ),
+            )
+        else:
+            asserter.assert_(
+                CompiledSQL(
+                    "UPDATE engineer SET engineer_name=%(engineer_name)s "
+                    "FROM person WHERE engineer.id = person.id "
+                    "AND person.name = %(name_1)s",
+                    [{"engineer_name": "e5", "name_1": "e2"}],
+                    dialect="postgresql",
+                ),
+            )
 
         eq_(
             set(s.query(Person.name, Engineer.engineer_name)),
-            set([("e1", "e1"), ("e2", "e5")]),
+            set([("e1", "e1"), ("e2", "e5"), ("pp1", "pp1")]),
         )
 
-    @testing.requires.delete_from
-    def test_delete_from(self):
+    @testing.requires.delete_using
+    @testing.combinations(None, "fetch", "fetch_w_hint", "evaluate")
+    def test_delete_using(self, synchronize_session):
+        """test a DELETE that uses multiple tables.
+
+        due to a limitation in MariaDB, we have an up front "hint" that needs
+        to be passed for this backend if DELETE USING is to be used in
+        conjunction with "fetch" strategy, so that we know before compilation
+        that we won't be able to use RETURNING.
+
+        """
+
         Engineer = self.classes.Engineer
         Person = self.classes.Person
         s = Session(testing.db)
-        s.query(Engineer).filter(Engineer.id == Person.id).filter(
-            Person.name == "e2"
-        ).delete()
 
+        db_has_mariadb_limitation = (
+            testing.db.dialect.delete_returning
+            and not testing.db.dialect.delete_returning_multifrom
+        )
+
+        e2 = s.query(Engineer).filter_by(name="e2").first()
+
+        with self.sql_execution_asserter() as asserter:
+
+            assert e2 in s
+
+            q = (
+                s.query(Engineer)
+                .filter(Engineer.id == Person.id)
+                .filter(Person.name == "e2")
+            )
+
+            if synchronize_session == "fetch_w_hint":
+                q.execution_options(is_delete_using=True).delete(
+                    synchronize_session="fetch"
+                )
+            elif synchronize_session == "fetch" and db_has_mariadb_limitation:
+                with expect_raises_message(
+                    exc.CompileError,
+                    'Dialect ".*" does not support RETURNING with '
+                    "DELETE..USING;",
+                ):
+                    q.delete(synchronize_session=synchronize_session)
+                return
+            else:
+                q.delete(synchronize_session=synchronize_session)
+
+            if synchronize_session is None:
+                assert e2 in s
+            else:
+                assert e2 not in s
+
+        if synchronize_session in ("fetch", "fetch_w_hint") and (
+            db_has_mariadb_limitation
+            or not testing.db.dialect.delete_returning
+        ):
+            asserter.assert_(
+                CompiledSQL(
+                    "SELECT person.id FROM person INNER JOIN engineer ON "
+                    "person.id = engineer.id WHERE engineer.id = person.id "
+                    "AND person.name = %s",
+                    [{"name_1": "e2"}],
+                    dialect="mariadb",
+                ),
+                CompiledSQL(
+                    "DELETE FROM engineer USING engineer, person WHERE "
+                    "engineer.id = person.id AND person.name = %s",
+                    [{"name_1": "e2"}],
+                    dialect="mariadb",
+                ),
+            )
+        elif synchronize_session in ("fetch", "fetch_w_hint"):
+            asserter.assert_(
+                CompiledSQL(
+                    "DELETE FROM engineer USING person WHERE "
+                    "engineer.id = person.id AND person.name = %(name_1)s "
+                    "RETURNING engineer.id",
+                    [{"name_1": "e2"}],
+                    dialect="postgresql",
+                ),
+            )
+        else:
+            asserter.assert_(
+                CompiledSQL(
+                    "DELETE FROM engineer USING person WHERE "
+                    "engineer.id = person.id AND person.name = %(name_1)s",
+                    [{"name_1": "e2"}],
+                    dialect="postgresql",
+                ),
+            )
+
+        # delete actually worked
         eq_(
             set(s.query(Person.name, Engineer.engineer_name)),
-            set([("e1", "e1")]),
+            set([("pp1", "pp1"), ("e1", "e1")]),
         )
 
-    @testing.only_on("mysql", "Multi table update")
-    def test_update_from_multitable(self):
+    @testing.only_on(["mysql", "mariadb"], "Multi table update")
+    @testing.requires.delete_using
+    @testing.combinations(None, "fetch", "evaluate")
+    def test_update_from_multitable(self, synchronize_session):
         Engineer = self.classes.Engineer
         Person = self.classes.Person
         s = Session(testing.db)
         s.query(Engineer).filter(Engineer.id == Person.id).filter(
             Person.name == "e2"
-        ).update({Person.name: "e22", Engineer.engineer_name: "e55"})
+        ).update(
+            {Person.name: "e22", Engineer.engineer_name: "e55"},
+            synchronize_session=synchronize_session,
+        )
 
         eq_(
             set(s.query(Person.name, Engineer.engineer_name)),
-            set([("e1", "e1"), ("e22", "e55")]),
+            set([("e1", "e1"), ("e22", "e55"), ("pp1", "pp1")]),
         )
 
 
index c7c5beed94a1e060a49b60484dffffac64608329..c6776533c0e26c4ff66aea6c1f4bc50f3dd625a1 100644 (file)
@@ -467,11 +467,11 @@ class DefaultRequirements(SuiteRequirements):
         )
 
     @property
-    def delete_from(self):
+    def delete_using(self):
         """Target must support DELETE FROM..FROM or DELETE..USING syntax"""
         return only_on(
             ["postgresql", "mssql", "mysql", "mariadb"],
-            "Backend does not support DELETE..FROM",
+            "Backend does not support DELETE..USING or equivalent",
         )
 
     @property
index 45ead811fce499d5cc8c7707d8ddbb6ef8260e7f..f98e7297d667ba27a63324222fb64ceb5f577a19 100644 (file)
@@ -307,7 +307,7 @@ class DeleteFromRoundTripTest(fixtures.TablesTest):
             ),
         )
 
-    @testing.requires.delete_from
+    @testing.requires.delete_using
     def test_exec_two_table(self, connection):
         users, addresses = self.tables.users, self.tables.addresses
         dingalings = self.tables.dingalings
@@ -326,7 +326,7 @@ class DeleteFromRoundTripTest(fixtures.TablesTest):
         ]
         self._assert_table(connection, addresses, expected)
 
-    @testing.requires.delete_from
+    @testing.requires.delete_using
     def test_exec_three_table(self, connection):
         users = self.tables.users
         addresses = self.tables.addresses
@@ -342,7 +342,7 @@ class DeleteFromRoundTripTest(fixtures.TablesTest):
         expected = [(2, 5, "ding 2/5")]
         self._assert_table(connection, dingalings, expected)
 
-    @testing.requires.delete_from
+    @testing.requires.delete_using
     def test_exec_two_table_plus_alias(self, connection):
         users, addresses = self.tables.users, self.tables.addresses
         dingalings = self.tables.dingalings
@@ -359,7 +359,7 @@ class DeleteFromRoundTripTest(fixtures.TablesTest):
         expected = [(1, 7, "x", "jack@bean.com"), (5, 9, "x", "fred@fred.com")]
         self._assert_table(connection, addresses, expected)
 
-    @testing.requires.delete_from
+    @testing.requires.delete_using
     def test_exec_alias_plus_table(self, connection):
         users, addresses = self.tables.users, self.tables.addresses
         dingalings = self.tables.dingalings