--- /dev/null
+.. change::
+ :tags: bug, orm
+ :tickets: 8862
+
+ Fixed bug where :meth:`_orm.Session.merge` would fail to preserve the
+ current loaded contents of relationship attributes that were indicated with
+ the :paramref:`_orm.relationship.viewonly` parameter, thus defeating
+ strategies that use :meth:`_orm.Session.merge` to pull fully loaded objects
+ from caches and other similar techniques. In a related change, fixed issue
+ where an object that contains a loaded relationship that was nonetheless
+ configured as ``lazy='raise'`` on the mapping would fail when passed to
+ :meth:`_orm.Session.merge`; checks for "raise" are now suspended within
+ the merge process assuming the :paramref:`_orm.Session.merge.load`
+ parameter remains at its default of ``True``.
+
+ Overall, this is a behavioral adjustment to a change introduced in the 1.4
+ series as of :ticket:`4994`, which took "merge" out of the set of cascades
+ applied by default to "viewonly" relationships. As "viewonly" relationships
+ aren't persisted under any circumstances, allowing their contents to
+ transfer during "merge" does not impact the persistence behavior of the
+ target object. This allows :meth:`_orm.Session.merge` to correctly suit one
+ of its use cases, that of adding objects to a :class:`.Session` that were
+ loaded elsewhere, often for the purposes of restoring from a cache.
+
self.dispatch.bulk_replace(state, new_values, evt, keys=new_keys)
- old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT)
+ # propagate NO_RAISE in passive through to the get() for the
+ # existing object (ticket #8862)
+ old = self.get(
+ state,
+ dict_,
+ passive=PASSIVE_ONLY_PERSISTENT ^ (passive & PassiveFlag.NO_RAISE),
+ )
if old is PASSIVE_NO_RESULT:
old = self._default_value(state, dict_)
elif old is orig_iterable:
PASSIVE_ONLY_PERSISTENT = PASSIVE_OFF ^ NON_PERSISTENT_OK
"PASSIVE_OFF ^ NON_PERSISTENT_OK"
+ PASSIVE_MERGE = PASSIVE_OFF | NO_RAISE
+ """PASSIVE_OFF | NO_RAISE
+
+ Symbol used specifically for session.merge() and similar cases
+
+ """
+
(
NO_CHANGE,
PASSIVE_NO_FETCH,
PASSIVE_NO_FETCH_RELATED,
PASSIVE_ONLY_PERSISTENT,
+ PASSIVE_MERGE,
) = PassiveFlag.__members__.values()
DEFAULT_MANAGER_ATTR = "_sa_class_manager"
# map for those already present.
# also assumes CollectionAttributeImpl behavior of loading
# "old" list in any case
- dest_state.get_impl(self.key).get(dest_state, dest_dict)
+ dest_state.get_impl(self.key).get(
+ dest_state, dest_dict, passive=PassiveFlag.PASSIVE_MERGE
+ )
dest_list = []
for current in instances_iterable:
else:
dest_impl = dest_state.get_impl(self.key)
assert is_has_collection_adapter(dest_impl)
- dest_impl.set(dest_state, dest_dict, dest_list, _adapt=False)
+ dest_impl.set(
+ dest_state,
+ dest_dict,
+ dest_list,
+ _adapt=False,
+ passive=PassiveFlag.PASSIVE_MERGE,
+ )
else:
current = source_dict[self.key]
if current is not None:
)
_allowed_cascades = all_cascades
- _viewonly_cascades = ["expunge", "all", "none", "refresh-expire"]
+ _viewonly_cascades = ["expunge", "all", "none", "refresh-expire", "merge"]
__slots__ = (
"save_update",
({"delete"}, {"none"}),
(
{"all, delete-orphan"},
- {"refresh-expire", "expunge"},
+ {"refresh-expire", "expunge", "merge"},
),
({"save-update, expunge"}, {"expunge"}),
)
not_in(o1, sess)
not_in(o2, sess)
- def test_default_merge_cascade(self):
+ @testing.combinations(
+ "persistent", "pending", argnames="collection_status"
+ )
+ def test_default_merge_cascade(self, collection_status):
User, Order, orders, users = (
self.classes.User,
self.classes.Order,
Order(id=2, user_id=1, description="someotherorder"),
)
- u1.orders.append(o1)
- u1.orders.append(o2)
+ if collection_status == "pending":
+ # technically this is pointless, one should not be appending
+ # to this collection
+ u1.orders.append(o1)
+ u1.orders.append(o2)
+ elif collection_status == "persistent":
+ sess.add(u1)
+ sess.flush()
+ sess.add_all([o1, o2])
+ sess.flush()
+ u1.orders
+ else:
+ assert False
u1 = sess.merge(u1)
- assert not u1.orders
+ # in 1.4, as of #4993 this was asserting that u1.orders would
+ # not be present in the new object. However, as observed during
+ # #8862, this defeats schemes that seek to restore fully loaded
+ # objects from caches which may even have lazy="raise", but
+ # in any case would want to not emit new SQL on those collections.
+ # so we assert here that u1.orders is in fact present
+ assert "orders" in u1.__dict__
+ assert u1.__dict__["orders"]
+ assert u1.orders
def test_default_cascade(self):
User, Order, orders, users = (
},
)
- eq_(umapper.attrs["orders"].cascade, set())
+ eq_(umapper.attrs["orders"].cascade, {"merge"})
class CollectionCascadesNoBackrefTest(fixtures.TestBase):
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import PickleType
+from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Text
from sqlalchemy.orm import defer
from sqlalchemy.orm import deferred
from sqlalchemy.orm import foreign
+from sqlalchemy.orm import joinedload
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import not_in
+from sqlalchemy.testing.assertsql import CountStatements
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
except sa.exc.InvalidRequestError as e:
assert "load=False option does not support" in str(e)
+ @testing.combinations("viewonly", "normal", argnames="viewonly")
+ @testing.combinations("load", "noload", argnames="load")
+ @testing.combinations("select", "raise", "raise_on_sql", argnames="lazy")
+ @testing.combinations(
+ "merge_persistent", "merge_detached", argnames="merge_persistent"
+ )
+ @testing.combinations("detached", "persistent", argnames="detach_original")
+ @testing.combinations("o2m", "m2o", argnames="direction")
+ def test_relationship_population_maintained(
+ self,
+ viewonly,
+ load,
+ lazy,
+ merge_persistent,
+ direction,
+ detach_original,
+ ):
+ """test #8862"""
+
+ User, Address = self.classes("User", "Address")
+ users, addresses = self.tables("users", "addresses")
+
+ self.mapper_registry.map_imperatively(
+ User,
+ users,
+ properties={
+ "addresses": relationship(
+ Address,
+ viewonly=viewonly == "viewonly",
+ lazy=lazy,
+ back_populates="user",
+ )
+ },
+ )
+
+ self.mapper_registry.map_imperatively(
+ Address,
+ addresses,
+ properties={
+ "user": relationship(
+ User,
+ viewonly=viewonly == "viewonly",
+ lazy=lazy,
+ back_populates="addresses",
+ )
+ },
+ )
+
+ s = fixture_session()
+
+ u1 = User(id=1, name="u1")
+ s.add(u1)
+ s.flush()
+ s.add_all(
+ [Address(user_id=1, email_address="e%d" % i) for i in range(1, 4)]
+ )
+ s.commit()
+
+ if direction == "o2m":
+ cls_to_merge = User
+ obj_to_merge = (
+ s.scalars(select(User).options(joinedload(User.addresses)))
+ .unique()
+ .one()
+ )
+ attrname = "addresses"
+
+ elif direction == "m2o":
+ cls_to_merge = Address
+ obj_to_merge = (
+ s.scalars(
+ select(Address)
+ .filter_by(email_address="e1")
+ .options(joinedload(Address.user))
+ )
+ .unique()
+ .one()
+ )
+ attrname = "user"
+ else:
+ assert False
+
+ assert attrname in obj_to_merge.__dict__
+
+ s2 = Session(testing.db)
+
+ if merge_persistent == "merge_persistent":
+ target_persistent = s2.get(cls_to_merge, obj_to_merge.id) # noqa
+
+ if detach_original == "detach":
+ s.expunge(obj_to_merge)
+
+ with self.sql_execution_asserter(testing.db) as assert_:
+ merged_object = s2.merge(obj_to_merge, load=load == "load")
+
+ assert_.assert_(
+ CountStatements(
+ 0
+ if load == "noload"
+ else 1
+ if merge_persistent == "merge_persistent"
+ else 2
+ )
+ )
+
+ assert attrname in merged_object.__dict__
+
+ with self.sql_execution_asserter(testing.db) as assert_:
+ if direction == "o2m":
+ eq_(
+ merged_object.addresses,
+ [
+ Address(user_id=1, email_address="e%d" % i)
+ for i in range(1, 4)
+ ],
+ )
+ elif direction == "m2o":
+ eq_(merged_object.user, User(id=1, name="u1"))
+ assert_.assert_(CountStatements(0))
+
def test_synonym(self):
users = self.tables.users