--- /dev/null
+.. change::
+ :tags: bug, orm
+ :tickets: 10089
+
+ Fixed issue where setting a relationship collection directly, where an
+ object in the new collection were already present, would not trigger a
+ cascade event for that object, leading to it not being added to the
+ :class:`_orm.Session` if it were not already present. This is similar in
+ nature to :ticket:`6471` and is a more apparent issue due to the removal of
+ ``cascade_backrefs`` in the 2.0 series. The
+ :meth:`_orm.AttributeEvents.append_wo_mutation` event added as part of
+ :ticket:`6471` is now also emitted for existing members of a collection
+ that are present in a bulk set of that same collection.
--- /dev/null
+.. change::
+ :tags: bug, orm
+ :tickets: 10090
+
+ Fixed issue where objects that were associated with an unloaded collection
+ via backref, but were not merged into the :class:`_orm.Session` due to the
+ removal of ``cascade_backrefs`` in the 2.0 series, would not emit a warning
+ that these objects were not being included in a flush, even though they
+ were pending members of the collection; in other such cases, a warning is
+ emitted when a collection being flushed contains non-attached objects which
+ will be essentially discarded. The addition of the warning for
+ backref-pending collection members establishes greater consistency with
+ collections that may be present or non-present and possibly flushed or not
+ flushed at different times based on different relationship loading
+ strategies.
from typing import cast
from typing import ClassVar
from typing import Dict
+from typing import Iterable
from typing import List
from typing import NamedTuple
from typing import Optional
from .base import ATTR_WAS_SET
from .base import CALLABLES_OK
from .base import DEFERRED_HISTORY_LOAD
+from .base import INCLUDE_PENDING_MUTATIONS # noqa
from .base import INIT_OK
from .base import instance_dict as instance_dict
from .base import instance_state as instance_state
passive: PassiveFlag = PASSIVE_OFF,
) -> History:
current = self.get(state, dict_, passive=passive)
+
if current is PASSIVE_NO_RESULT:
- return HISTORY_BLANK
+ if (
+ passive & PassiveFlag.INCLUDE_PENDING_MUTATIONS
+ and self.key in state._pending_mutations
+ ):
+ pending = state._pending_mutations[self.key]
+ return pending.merge_with_history(HISTORY_BLANK)
+ else:
+ return HISTORY_BLANK
else:
+ if passive & PassiveFlag.INCLUDE_PENDING_MUTATIONS:
+ # this collection is loaded / present. should not be any
+ # pending mutations
+ assert self.key not in state._pending_mutations
+
return History.from_collection(self, state, current)
def get_all_pending(
return bool(self.added or self.deleted)
+ def _merge(self, added: Iterable[Any], deleted: Iterable[Any]) -> History:
+ return History(
+ list(self.added) + list(added),
+ self.unchanged,
+ list(self.deleted) + list(deleted),
+ )
+
def as_state(self) -> History:
return History(
[
DEFERRED_HISTORY_LOAD = 256
"""indicates special load of the previous value of an attribute"""
+ INCLUDE_PENDING_MUTATIONS = 512
+
# pre-packaged sets of flags used as inputs
PASSIVE_OFF = (
RELATED_OBJECT_OK | NON_PERSISTENT_OK | INIT_OK | CALLABLES_OK | SQL_OK
NO_AUTOFLUSH,
NO_RAISE,
DEFERRED_HISTORY_LOAD,
+ INCLUDE_PENDING_MUTATIONS,
PASSIVE_OFF,
PASSIVE_RETURN_NO_VALUE,
PASSIVE_NO_INITIALIZE,
def __bool__(self):
return True
+ def _fire_append_wo_mutation_event_bulk(
+ self, items, initiator=None, key=NO_KEY
+ ):
+ if not items:
+ return
+
+ if initiator is not False:
+ if self.invalidated:
+ self._warn_invalidated()
+
+ if self.empty:
+ self._reset_empty()
+
+ for item in items:
+ self.attr.fire_append_wo_mutation_event(
+ self.owner_state,
+ self.owner_state.dict,
+ item,
+ initiator,
+ key,
+ )
+
def fire_append_wo_mutation_event(self, item, initiator=None, key=NO_KEY):
"""Notify that a entity is entering the collection but is already
present.
else:
return item
+ def _fire_remove_event_bulk(self, items, initiator=None, key=NO_KEY):
+ if not items:
+ return
+
+ if initiator is not False:
+ if self.invalidated:
+ self._warn_invalidated()
+
+ if self.empty:
+ self._reset_empty()
+
+ for item in items:
+ self.attr.fire_remove_event(
+ self.owner_state,
+ self.owner_state.dict,
+ item,
+ initiator,
+ key,
+ )
+
def fire_remove_event(self, item, initiator=None, key=NO_KEY):
"""Notify that a entity has been removed from the collection.
appender(member, _sa_initiator=False)
if existing_adapter:
- for member in removals:
- existing_adapter.fire_remove_event(member, initiator=initiator)
+ existing_adapter._fire_append_wo_mutation_event_bulk(
+ constants, initiator=initiator
+ )
+ existing_adapter._fire_remove_event_bulk(removals, initiator=initiator)
def prepare_instrumentation(
def prop_has_changes(self, uowcommit, states, isdelete):
if not isdelete or self.passive_deletes:
- passive = attributes.PASSIVE_NO_INITIALIZE
+ passive = (
+ attributes.PASSIVE_NO_INITIALIZE
+ | attributes.INCLUDE_PENDING_MUTATIONS
+ )
elif self.direction is MANYTOONE:
# here, we were hoping to optimize having to fetch many-to-one
# for history and ignore it, if there's no further cascades
# test_cascade etc. will still fail.
passive = attributes.PASSIVE_NO_FETCH_RELATED
else:
- passive = attributes.PASSIVE_OFF
+ passive = (
+ attributes.PASSIVE_OFF | attributes.INCLUDE_PENDING_MUTATIONS
+ )
for s in states:
# TODO: add a high speed method
pks_changed = self._pks_changed(uowcommit, state)
if not pks_changed or self.passive_updates:
- passive = attributes.PASSIVE_NO_INITIALIZE
+ passive = (
+ attributes.PASSIVE_NO_INITIALIZE
+ | attributes.INCLUDE_PENDING_MUTATIONS
+ )
else:
- passive = attributes.PASSIVE_OFF
+ passive = (
+ attributes.PASSIVE_OFF
+ | attributes.INCLUDE_PENDING_MUTATIONS
+ )
history = uowcommit.get_attribute_history(state, self.key, passive)
if history:
uowcommit, state
)
if need_cascade_pks:
- passive = attributes.PASSIVE_OFF
+ passive = (
+ attributes.PASSIVE_OFF
+ | attributes.INCLUDE_PENDING_MUTATIONS
+ )
else:
- passive = attributes.PASSIVE_NO_INITIALIZE
+ passive = (
+ attributes.PASSIVE_NO_INITIALIZE
+ | attributes.INCLUDE_PENDING_MUTATIONS
+ )
history = uowcommit.get_attribute_history(state, self.key, passive)
if history:
for child in history.added:
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
+ def merge_with_history(self, history: History) -> History:
+ return history._merge(self.added_items, self.deleted_items)
+
def append(self, value: Any) -> None:
if value in self.deleted_items:
self.deleted_items.remove(value)
sa_exc.SAWarning, "not in session", sess.flush
)
+ def test_m2o_backref_future_child_pending(self):
+ """test #10090"""
+
+ User, Address = self.classes.User, self.classes.Address
+
+ self._one_to_many_fixture(o2m=True, m2o=True, m2o_cascade=False)
+ with Session(testing.db, future=True) as sess:
+ u1 = User(name="u1")
+ sess.add(u1)
+ sess.flush()
+
+ a1 = Address(email_address="a1")
+ a1.user = u1
+ assert a1 not in sess
+ assert_warns_message(
+ sa_exc.SAWarning, "not in session", sess.flush
+ )
+
+ def test_m2m_backref_future_child_pending(self):
+ """test #10090"""
+
+ Item, Keyword = self.classes.Item, self.classes.Keyword
+
+ self._many_to_many_fixture(fwd=True, bkd=True)
+ with Session(testing.db, future=True) as sess:
+ i1 = Item(description="i1")
+ sess.add(i1)
+ sess.flush()
+
+ k1 = Keyword(name="k1")
+ k1.items.append(i1)
+ assert k1 not in sess
+ assert_warns_message(
+ sa_exc.SAWarning, "not in session", sess.flush
+ )
+
def test_m2o_backref_future_child_expunged(self):
User, Address = self.classes.User, self.classes.Address
@testing.combinations(
(set, "add"),
(list, "append"),
+ (list, "assign"),
(attribute_keyed_dict("key"), "__setitem__"),
(attribute_keyed_dict("key"), "setdefault"),
(attribute_keyed_dict("key"), "update_dict"),
assert b1 not in s
assert b3 not in s
- if methname == "__setitem__":
+ if methname == "assign":
+ a1.bs = [b1, b2]
+ elif methname == "__setitem__":
meth = getattr(a1.bs, methname)
meth(b1.key, b1)
meth(b2.key, b2)
from sqlalchemy.orm import relationship
from sqlalchemy.orm.attributes import instance_state
from sqlalchemy.testing import AssertsExecutionResults
+from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
# backref did not fire off when c3.parent was set.
# originally this was impacted by #3708, now does not happen
# due to backref_cascades behavior being removed
- assert c3 not in p1.children
+ # warning is new as of #10090
+ with expect_warnings(
+ r"Object of type <Child> not in session, add operation along "
+ r"'Parent.children' will not proceed"
+ ):
+ assert c3 not in p1.children
def test_enable_rel_loading_allows_backref_event(self, parent_fixture):
sess, p1, p2, c1, c2 = parent_fixture
# backref did not fire off when c3.parent was set.
# originally this was impacted by #3708, now does not happen
# due to backref_cascades behavior being removed
- assert c3 not in p1.children
+ # warning is new as of #10090
+ with expect_warnings(
+ r"Object of type <Child> not in session, add operation along "
+ r"'Parent.children' will not proceed"
+ ):
+ assert c3 not in p1.children
def test_backref_doesnt_double(self, parent_fixture):
sess, p1, p2, c1, c2 = parent_fixture
eq_(s.connection().scalar(select(func.count("*")).select_from(ht1)), 4)
h6 = H6()
+ s.add(h6)
h6.h1a = h1
h6.h1b = h1