]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
cascade fixes revealed by the removal of cascade_backrefs
authorMike Bayer <mike_mp@zzzcomputing.com>
Tue, 11 Jul 2023 13:58:22 +0000 (09:58 -0400)
committerMike Bayer <mike_mp@zzzcomputing.com>
Tue, 11 Jul 2023 15:34:04 +0000 (11:34 -0400)
Fixed issue where setting a relationship collection directly, where an
object in the new collection were already present, would not trigger a
cascade event for that object, leading to it not being added to the
:class:`_orm.Session` if it were not already present.  This is similar in
nature to :ticket:`6471` and is a more apparent issue due to the removal of
``cascade_backrefs`` in the 2.0 series.  The
:meth:`_orm.AttributeEvents.append_wo_mutation` event added as part of
:ticket:`6471` is now also emitted for existing members of a collection
that are present in a bulk set of that same collection.

Fixed issue where objects that were associated with an unloaded collection
via backref, but were not merged into the :class:`_orm.Session` due to the
removal of ``cascade_backrefs`` in the 2.0 series, would not emit a warning
that these objects were not being included in a flush, even though they
were pending members of the collection; in other such cases, a warning is
emitted when a collection being flushed contains non-attached objects which
will be essentially discarded.  The addition of the warning for
backref-pending collection members establishes greater consistency with
collections that may be present or non-present and possibly flushed or not
flushed at different times based on different relationship loading
strategies.

Fixes: #10089
Fixes: #10090
Change-Id: I95610c17745f32141475cc6b4a4a5c6fc678fd05

doc/build/changelog/unreleased_20/10089.rst [new file with mode: 0644]
doc/build/changelog/unreleased_20/10090.rst [new file with mode: 0644]
lib/sqlalchemy/orm/attributes.py
lib/sqlalchemy/orm/base.py
lib/sqlalchemy/orm/collections.py
lib/sqlalchemy/orm/dependency.py
lib/sqlalchemy/orm/state.py
test/orm/test_cascade.py
test/orm/test_load_on_fks.py
test/orm/test_mapper.py

diff --git a/doc/build/changelog/unreleased_20/10089.rst b/doc/build/changelog/unreleased_20/10089.rst
new file mode 100644 (file)
index 0000000..1444215
--- /dev/null
@@ -0,0 +1,13 @@
+.. change::
+    :tags: bug, orm
+    :tickets: 10089
+
+    Fixed issue where setting a relationship collection directly, where an
+    object in the new collection were already present, would not trigger a
+    cascade event for that object, leading to it not being added to the
+    :class:`_orm.Session` if it were not already present.  This is similar in
+    nature to :ticket:`6471` and is a more apparent issue due to the removal of
+    ``cascade_backrefs`` in the 2.0 series.  The
+    :meth:`_orm.AttributeEvents.append_wo_mutation` event added as part of
+    :ticket:`6471` is now also emitted for existing members of a collection
+    that are present in a bulk set of that same collection.
diff --git a/doc/build/changelog/unreleased_20/10090.rst b/doc/build/changelog/unreleased_20/10090.rst
new file mode 100644 (file)
index 0000000..a6a6cab
--- /dev/null
@@ -0,0 +1,15 @@
+.. change::
+    :tags: bug, orm
+    :tickets: 10090
+
+    Fixed issue where objects that were associated with an unloaded collection
+    via backref, but were not merged into the :class:`_orm.Session` due to the
+    removal of ``cascade_backrefs`` in the 2.0 series, would not emit a warning
+    that these objects were not being included in a flush, even though they
+    were pending members of the collection; in other such cases, a warning is
+    emitted when a collection being flushed contains non-attached objects which
+    will be essentially discarded.  The addition of the warning for
+    backref-pending collection members establishes greater consistency with
+    collections that may be present or non-present and possibly flushed or not
+    flushed at different times based on different relationship loading
+    strategies.
index b82f1bc699ac31d0f531a1452f62780c901e8223..6a9766c6f70b791d9e99cc8994eff6d2109354ce 100644 (file)
@@ -24,6 +24,7 @@ from typing import Callable
 from typing import cast
 from typing import ClassVar
 from typing import Dict
+from typing import Iterable
 from typing import List
 from typing import NamedTuple
 from typing import Optional
@@ -44,6 +45,7 @@ from .base import ATTR_EMPTY
 from .base import ATTR_WAS_SET
 from .base import CALLABLES_OK
 from .base import DEFERRED_HISTORY_LOAD
+from .base import INCLUDE_PENDING_MUTATIONS  # noqa
 from .base import INIT_OK
 from .base import instance_dict as instance_dict
 from .base import instance_state as instance_state
@@ -1678,9 +1680,22 @@ class CollectionAttributeImpl(HasCollectionAdapter, AttributeImpl):
         passive: PassiveFlag = PASSIVE_OFF,
     ) -> History:
         current = self.get(state, dict_, passive=passive)
+
         if current is PASSIVE_NO_RESULT:
-            return HISTORY_BLANK
+            if (
+                passive & PassiveFlag.INCLUDE_PENDING_MUTATIONS
+                and self.key in state._pending_mutations
+            ):
+                pending = state._pending_mutations[self.key]
+                return pending.merge_with_history(HISTORY_BLANK)
+            else:
+                return HISTORY_BLANK
         else:
+            if passive & PassiveFlag.INCLUDE_PENDING_MUTATIONS:
+                # this collection is loaded / present.  should not be any
+                # pending mutations
+                assert self.key not in state._pending_mutations
+
             return History.from_collection(self, state, current)
 
     def get_all_pending(
@@ -2360,6 +2375,13 @@ class History(NamedTuple):
 
         return bool(self.added or self.deleted)
 
+    def _merge(self, added: Iterable[Any], deleted: Iterable[Any]) -> History:
+        return History(
+            list(self.added) + list(added),
+            self.unchanged,
+            list(self.deleted) + list(deleted),
+        )
+
     def as_state(self) -> History:
         return History(
             [
index 5079dfc7c53840f88bca490a6ba4d289403c3fec..ecb10591a3a68ef21bdb4033683e9571c9a3b4bc 100644 (file)
@@ -151,6 +151,8 @@ class PassiveFlag(FastIntFlag):
     DEFERRED_HISTORY_LOAD = 256
     """indicates special load of the previous value of an attribute"""
 
+    INCLUDE_PENDING_MUTATIONS = 512
+
     # pre-packaged sets of flags used as inputs
     PASSIVE_OFF = (
         RELATED_OBJECT_OK | NON_PERSISTENT_OK | INIT_OK | CALLABLES_OK | SQL_OK
@@ -191,6 +193,7 @@ class PassiveFlag(FastIntFlag):
     NO_AUTOFLUSH,
     NO_RAISE,
     DEFERRED_HISTORY_LOAD,
+    INCLUDE_PENDING_MUTATIONS,
     PASSIVE_OFF,
     PASSIVE_RETURN_NO_VALUE,
     PASSIVE_NO_INITIALIZE,
index 1155fc6cf5f3f1bed8a07d6793a72feabc7dce9a..3a4964c4609250226a3addf368821821d50203bd 100644 (file)
@@ -620,6 +620,28 @@ class CollectionAdapter:
     def __bool__(self):
         return True
 
+    def _fire_append_wo_mutation_event_bulk(
+        self, items, initiator=None, key=NO_KEY
+    ):
+        if not items:
+            return
+
+        if initiator is not False:
+            if self.invalidated:
+                self._warn_invalidated()
+
+            if self.empty:
+                self._reset_empty()
+
+            for item in items:
+                self.attr.fire_append_wo_mutation_event(
+                    self.owner_state,
+                    self.owner_state.dict,
+                    item,
+                    initiator,
+                    key,
+                )
+
     def fire_append_wo_mutation_event(self, item, initiator=None, key=NO_KEY):
         """Notify that a entity is entering the collection but is already
         present.
@@ -668,6 +690,26 @@ class CollectionAdapter:
         else:
             return item
 
+    def _fire_remove_event_bulk(self, items, initiator=None, key=NO_KEY):
+        if not items:
+            return
+
+        if initiator is not False:
+            if self.invalidated:
+                self._warn_invalidated()
+
+            if self.empty:
+                self._reset_empty()
+
+            for item in items:
+                self.attr.fire_remove_event(
+                    self.owner_state,
+                    self.owner_state.dict,
+                    item,
+                    initiator,
+                    key,
+                )
+
     def fire_remove_event(self, item, initiator=None, key=NO_KEY):
         """Notify that a entity has been removed from the collection.
 
@@ -763,8 +805,10 @@ def bulk_replace(values, existing_adapter, new_adapter, initiator=None):
             appender(member, _sa_initiator=False)
 
     if existing_adapter:
-        for member in removals:
-            existing_adapter.fire_remove_event(member, initiator=initiator)
+        existing_adapter._fire_append_wo_mutation_event_bulk(
+            constants, initiator=initiator
+        )
+        existing_adapter._fire_remove_event_bulk(removals, initiator=initiator)
 
 
 def prepare_instrumentation(
index 9f57f31ca631c318c52b3c3a9cf162d8d309be8a..e941dbcbf471be78d5a214081a4ed7064245fb27 100644 (file)
@@ -231,7 +231,10 @@ class DependencyProcessor:
 
     def prop_has_changes(self, uowcommit, states, isdelete):
         if not isdelete or self.passive_deletes:
-            passive = attributes.PASSIVE_NO_INITIALIZE
+            passive = (
+                attributes.PASSIVE_NO_INITIALIZE
+                | attributes.INCLUDE_PENDING_MUTATIONS
+            )
         elif self.direction is MANYTOONE:
             # here, we were hoping to optimize having to fetch many-to-one
             # for history and ignore it, if there's no further cascades
@@ -240,7 +243,9 @@ class DependencyProcessor:
             # test_cascade etc. will still fail.
             passive = attributes.PASSIVE_NO_FETCH_RELATED
         else:
-            passive = attributes.PASSIVE_OFF
+            passive = (
+                attributes.PASSIVE_OFF | attributes.INCLUDE_PENDING_MUTATIONS
+            )
 
         for s in states:
             # TODO: add a high speed method
@@ -470,9 +475,15 @@ class OneToManyDP(DependencyProcessor):
             pks_changed = self._pks_changed(uowcommit, state)
 
             if not pks_changed or self.passive_updates:
-                passive = attributes.PASSIVE_NO_INITIALIZE
+                passive = (
+                    attributes.PASSIVE_NO_INITIALIZE
+                    | attributes.INCLUDE_PENDING_MUTATIONS
+                )
             else:
-                passive = attributes.PASSIVE_OFF
+                passive = (
+                    attributes.PASSIVE_OFF
+                    | attributes.INCLUDE_PENDING_MUTATIONS
+                )
 
             history = uowcommit.get_attribute_history(state, self.key, passive)
             if history:
@@ -1121,9 +1132,15 @@ class ManyToManyDP(DependencyProcessor):
                 uowcommit, state
             )
             if need_cascade_pks:
-                passive = attributes.PASSIVE_OFF
+                passive = (
+                    attributes.PASSIVE_OFF
+                    | attributes.INCLUDE_PENDING_MUTATIONS
+                )
             else:
-                passive = attributes.PASSIVE_NO_INITIALIZE
+                passive = (
+                    attributes.PASSIVE_NO_INITIALIZE
+                    | attributes.INCLUDE_PENDING_MUTATIONS
+                )
             history = uowcommit.get_attribute_history(state, self.key, passive)
             if history:
                 for child in history.added:
index b745fc53c2523315949c65dcf704aebe953b7978..b1ae198592fe48c5fc6baff0266a310f1cb4a0bf 100644 (file)
@@ -1122,6 +1122,9 @@ class PendingCollection:
         self.deleted_items = util.IdentitySet()
         self.added_items = util.OrderedIdentitySet()
 
+    def merge_with_history(self, history: History) -> History:
+        return history._merge(self.added_items, self.deleted_items)
+
     def append(self, value: Any) -> None:
         if value in self.deleted_items:
             self.deleted_items.remove(value)
index 6b84ec6f7887d3fb0c6871638a57478282db5e28..fab8f5bb0c28185ccea305538823c811de113151 100644 (file)
@@ -1470,6 +1470,42 @@ class NoSaveCascadeFlushTest(_fixtures.FixtureTest):
                 sa_exc.SAWarning, "not in session", sess.flush
             )
 
+    def test_m2o_backref_future_child_pending(self):
+        """test #10090"""
+
+        User, Address = self.classes.User, self.classes.Address
+
+        self._one_to_many_fixture(o2m=True, m2o=True, m2o_cascade=False)
+        with Session(testing.db, future=True) as sess:
+            u1 = User(name="u1")
+            sess.add(u1)
+            sess.flush()
+
+            a1 = Address(email_address="a1")
+            a1.user = u1
+            assert a1 not in sess
+            assert_warns_message(
+                sa_exc.SAWarning, "not in session", sess.flush
+            )
+
+    def test_m2m_backref_future_child_pending(self):
+        """test #10090"""
+
+        Item, Keyword = self.classes.Item, self.classes.Keyword
+
+        self._many_to_many_fixture(fwd=True, bkd=True)
+        with Session(testing.db, future=True) as sess:
+            i1 = Item(description="i1")
+            sess.add(i1)
+            sess.flush()
+
+            k1 = Keyword(name="k1")
+            k1.items.append(i1)
+            assert k1 not in sess
+            assert_warns_message(
+                sa_exc.SAWarning, "not in session", sess.flush
+            )
+
     def test_m2o_backref_future_child_expunged(self):
         User, Address = self.classes.User, self.classes.Address
 
@@ -4551,6 +4587,7 @@ class CollectionCascadesNoBackrefTest(fixtures.TestBase):
     @testing.combinations(
         (set, "add"),
         (list, "append"),
+        (list, "assign"),
         (attribute_keyed_dict("key"), "__setitem__"),
         (attribute_keyed_dict("key"), "setdefault"),
         (attribute_keyed_dict("key"), "update_dict"),
@@ -4577,7 +4614,9 @@ class CollectionCascadesNoBackrefTest(fixtures.TestBase):
         assert b1 not in s
         assert b3 not in s
 
-        if methname == "__setitem__":
+        if methname == "assign":
+            a1.bs = [b1, b2]
+        elif methname == "__setitem__":
             meth = getattr(a1.bs, methname)
             meth(b1.key, b1)
             meth(b2.key, b2)
index f33a6881d95bafe9f126b2ffb866455b2ee0b70b..b0be470afbdc732ca48c45d1d6f69f8825bff086 100644 (file)
@@ -7,6 +7,7 @@ from sqlalchemy.orm import declarative_base
 from sqlalchemy.orm import relationship
 from sqlalchemy.orm.attributes import instance_state
 from sqlalchemy.testing import AssertsExecutionResults
+from sqlalchemy.testing import expect_warnings
 from sqlalchemy.testing import fixtures
 from sqlalchemy.testing.fixtures import fixture_session
 from sqlalchemy.testing.schema import Column
@@ -259,7 +260,12 @@ class LoadOnFKsTest(fixtures.DeclarativeMappedTest):
         # backref did not fire off when c3.parent was set.
         # originally this was impacted by #3708, now does not happen
         # due to backref_cascades behavior being removed
-        assert c3 not in p1.children
+        # warning is new as of #10090
+        with expect_warnings(
+            r"Object of type <Child> not in session, add operation along "
+            r"'Parent.children' will not proceed"
+        ):
+            assert c3 not in p1.children
 
     def test_enable_rel_loading_allows_backref_event(self, parent_fixture):
         sess, p1, p2, c1, c2 = parent_fixture
@@ -274,7 +280,12 @@ class LoadOnFKsTest(fixtures.DeclarativeMappedTest):
         # backref did not fire off when c3.parent was set.
         # originally this was impacted by #3708, now does not happen
         # due to backref_cascades behavior being removed
-        assert c3 not in p1.children
+        # warning is new as of #10090
+        with expect_warnings(
+            r"Object of type <Child> not in session, add operation along "
+            r"'Parent.children' will not proceed"
+        ):
+            assert c3 not in p1.children
 
     def test_backref_doesnt_double(self, parent_fixture):
         sess, p1, p2, c1, c2 = parent_fixture
index a3aad69f0871a2e4b3b2fb5f7ca71c78154e006d..f90803d6e4de63e4ae723cc31080c3494f43a9fd 100644 (file)
@@ -2682,6 +2682,7 @@ class RequirementsTest(fixtures.MappedTest):
         eq_(s.connection().scalar(select(func.count("*")).select_from(ht1)), 4)
 
         h6 = H6()
+        s.add(h6)
         h6.h1a = h1
         h6.h1b = h1