--- /dev/null
+.. change::
+ :tags: bug, orm
+ :tickets: 6459
+
+ Fixed issue in subquery loader strategy which prevented caching from
+ working correctly. This would have been seen in the logs as a "generated"
+ message instead of "cached" for all subqueryload SQL emitted, which by
+ saturating the cache with new keys would degrade overall performance; it
+ also would produce "LRU size alert" warnings.
+
else:
annotations = {
"proxy_key": self.key,
- "proxy_owner": self.class_,
+ "proxy_owner": self._parententity,
"entity_namespace": self._entity_namespace,
}
and getattr(self.class_, self.key).impl.uses_objects
)
+ @property
+ def _parententity(self):
+ return inspection.inspect(self.class_, raiseerr=False)
+
@property
def _entity_namespace(self):
if hasattr(self._comparator, "_parententity"):
# within internal loaders.
orm_key = annotations.get("proxy_key", None)
- proxy_owner = annotations.get("proxy_owner", _entity.entity)
+ proxy_owner = annotations.get("proxy_owner", _entity)
if orm_key:
- self.expr = getattr(proxy_owner, orm_key)
+ self.expr = getattr(proxy_owner.entity, orm_key)
self.translate_raw_column = False
else:
# if orm_key is not present, that means this is an ad-hoc
"parentmapper": self.mapper,
"parententity": self,
"entity_namespace": self,
- "compile_state_plugin": "orm",
}
)._set_propagate_attrs(
{"compile_state_plugin": "orm", "plugin_subject": self}
d = {
"parententity": self,
"parentmapper": self.mapper,
- "compile_state_plugin": "orm",
}
if key:
d["proxy_key"] = key
_executable_traverse_internals = [
("_with_options", InternalTraversal.dp_executable_options),
- ("_with_context_options", ExtendedInternalTraversal.dp_plain_obj),
+ (
+ "_with_context_options",
+ ExtendedInternalTraversal.dp_with_context_options,
+ ),
("_propagate_attrs", ExtendedInternalTraversal.dp_propagate_attrs),
]
These are callable functions that will
be given the CompileState object upon compilation.
- A second argument cache_args is required, which will be combined
- with the identity of the function itself in order to produce a
+ A second argument cache_args is required, which will be combined with
+ the ``__code__`` identity of the function itself in order to produce a
cache key.
"""
def __eq__(self, other):
return self.key == other.key
+ @classmethod
+ def _diff_tuples(cls, left, right):
+ ck1 = CacheKey(left, [])
+ ck2 = CacheKey(right, [])
+ return ck1._diff(ck2)
+
def _whats_different(self, other):
k1 = self.key
visit_propagate_attrs = PROPAGATE_ATTRS
+ def visit_with_context_options(
+ self, attrname, obj, parent, anon_map, bindparams
+ ):
+ return tuple((fn.__code__, c_key) for fn, c_key in obj)
+
def visit_inspectable(self, attrname, obj, parent, anon_map, bindparams):
return (attrname, inspect(obj)._gen_cache_key(anon_map, bindparams))
else:
return left == right
+ def visit_with_context_options(
+ self, attrname, left_parent, left, right_parent, right, **kw
+ ):
+ return tuple((fn.__code__, c_key) for fn, c_key in left) == tuple(
+ (fn.__code__, c_key) for fn, c_key in right
+ )
+
def visit_plain_obj(
self, attrname, left_parent, left, right_parent, right, **kw
):
dp_executable_options = symbol("EO")
+ dp_with_context_options = symbol("WC")
+
dp_fromclause_ordered_set = symbol("CO")
"""Visit an ordered set of :class:`_expression.FromClause` objects. """
import random
+import types
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy.orm import defer
from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
+from sqlalchemy.orm import lazyload
from sqlalchemy.orm import Load
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Query
from sqlalchemy.sql.visitors import InternalTraversal
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
+from sqlalchemy.testing import mock
from sqlalchemy.testing.fixtures import fixture_session
from test.orm import _fixtures
from .inheritance import _poly_fixtures
User,
users,
properties={
- "addresses": relationship(Address, back_populates="user")
+ "addresses": relationship(
+ Address, back_populates="user", order_by=addresses.c.id
+ )
},
)
go()
else:
go()
+
+ @testing.combinations(
+ (lazyload, 2, 6),
+ (joinedload, 1, 0),
+ (selectinload, 2, 5),
+ (subqueryload, 2, 0),
+ argnames="strat,expected_stmt_cache,expected_lambda_cache",
+ )
+ def test_cache_key_loader_strategies(
+ self,
+ plain_fixture,
+ strat,
+ expected_stmt_cache,
+ expected_lambda_cache,
+ connection,
+ ):
+ User, Address = plain_fixture
+
+ cache = {}
+
+ connection = connection.execution_options(compiled_cache=cache)
+ sess = Session(connection)
+
+ with mock.patch(
+ "sqlalchemy.orm.strategies.LazyLoader._query_cache", cache
+ ), mock.patch(
+ "sqlalchemy.orm.strategies.SelectInLoader._query_cache", cache
+ ):
+
+ def go():
+ stmt = (
+ select(User)
+ .where(User.id == 7)
+ .options(strat(User.addresses))
+ )
+
+ u1 = sess.execute(stmt).scalars().first()
+ eq_(u1.addresses, [Address(id=1)])
+
+ go()
+
+ lc = len(cache)
+
+ stmt_entries = [
+ k for k in cache if not isinstance(k[0], types.CodeType)
+ ]
+ lambda_entries = [
+ k for k in cache if isinstance(k[0], types.CodeType)
+ ]
+
+ eq_(len(stmt_entries), expected_stmt_cache)
+ eq_(len(lambda_entries), expected_lambda_cache)
+
+ for i in range(3):
+ go()
+
+ eq_(len(cache), lc)
self.assert_sql_count(testing.db, go, 2)
+ def test_query_is_cached(self):
+ users, Address, addresses, User = (
+ self.tables.users,
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User,
+ )
+
+ mapper(
+ User,
+ users,
+ properties={
+ "addresses": relationship(
+ mapper(Address, addresses),
+ lazy="subquery",
+ order_by=Address.id,
+ )
+ },
+ )
+ query_cache = {}
+ sess = fixture_session()
+
+ def go():
+ sess.close()
+
+ stmt = select(User).filter(User.id == 7)
+
+ sess.execute(
+ stmt, execution_options={"compiled_cache": query_cache}
+ ).one()
+
+ for i in range(3):
+ go()
+
+ qclen = len(query_cache)
+
+ for i in range(5):
+ go()
+
+ eq_(len(query_cache), qclen)
+
def test_params_arent_cached(self):
users, Address, addresses, User = (
self.tables.users,
u1 = (
sess.query(User)
- .execution_options(query_cache=query_cache)
+ .execution_options(compiled_cache=query_cache)
.filter(User.id == 7)
.one()
)
u2 = (
sess.query(User)
- .execution_options(query_cache=query_cache)
+ .execution_options(compiled_cache=query_cache)
.filter(User.id == 8)
.one()
)
"parententity": point_mapper,
"parentmapper": point_mapper,
"proxy_key": "x_alone",
- "proxy_owner": Point,
+ "proxy_owner": point_mapper,
},
)
eq_(
"parententity": point_mapper,
"parentmapper": point_mapper,
"proxy_key": "x",
- "proxy_owner": Point,
+ "proxy_owner": point_mapper,
},
)
a2 = aliased(Point)
eq_(str(a2.x_alone == alias.x), "point_1.x = point_2.x")
+ eq_(
+ a2.x._annotations,
+ {
+ "entity_namespace": inspect(a2),
+ "parententity": inspect(a2),
+ "parentmapper": point_mapper,
+ "proxy_key": "x",
+ "proxy_owner": inspect(a2),
+ },
+ )
+
sess = fixture_session()
self.assert_compile(