--- /dev/null
+.. change::
+ :tags: feature, orm
+ :tickets: 8126
+
+ Added very experimental feature to the :func:`_orm.selectinload` and
+ :func:`_orm.immediateload` loader options called
+ :paramref:`_orm.selectinload.recursion_depth` /
+ :paramref:`_orm.immediateload.recursion_depth` , which allows a single
+ loader option to automatically recurse into self-referential relationships.
+ Is set to an integer indicating depth, and may also be set to -1 to
+ indicate to continue loading until no more levels deep are found.
+ Major internal changes to :func:`_orm.selectinload` and
+ :func:`_orm.immediateload` allow this feature to work while continuing
+ to make correct use of the compilation cache, as well as not using
+ arbitrary recursion, so any level of depth is supported (though would
+ emit that many queries). This may be useful for
+ self-referential structures that must be loaded fully eagerly, such as when
+ using asyncio.
+
+ A warning is also emitted when loader options are connected together with
+ arbitrary lengths (that is, without using the new ``recursion_depth``
+ option) when excessive recursion depth is detected in related object
+ loading. This operation continues to use huge amounts of memory and
+ performs extremely poorly; the cache is disabled when this condition is
+ detected to protect the cache from being flooded with arbitrary statements.
class QueryContext:
__slots__ = (
+ "top_level_context",
"compile_state",
"query",
"params",
_refresh_state = None
_lazy_loaded_from = None
_legacy_uniquing = False
+ _sa_top_level_orm_context = None
def __init__(
self,
self.loaders_require_buffering = False
self.loaders_require_uniquing = False
self.params = params
+ self.top_level_context = load_options._sa_top_level_orm_context
self.propagated_loader_options = tuple(
# issue 7447.
self.yield_per = load_options._yield_per
self.identity_token = load_options._refresh_identity_token
+ def _get_top_level_context(self) -> QueryContext:
+ return self.top_level_context or self
+
_orm_load_exec_options = util.immutabledict(
{"_result_disable_adapt_to_context": True, "future_result": True}
execution_options,
) = QueryContext.default_load_options.from_execution_options(
"_sa_orm_load_options",
- {"populate_existing", "autoflush", "yield_per"},
+ {
+ "populate_existing",
+ "autoflush",
+ "yield_per",
+ "sa_top_level_orm_context",
+ },
execution_options,
statement._execution_options,
)
-
# default execution options for ORM results:
# 1. _result_disable_adapt_to_context=True
# this will disable the ResultSetMetadata._adapt_to_context()
}
)
+ if (
+ getattr(statement._compile_options, "_current_path", None)
+ and len(statement._compile_options._current_path) > 10
+ and execution_options.get("compiled_cache", True) is not None
+ ):
+ util.warn(
+ "Loader depth for query is excessively deep; caching will "
+ "be disabled for additional loaders. Consider using the "
+ "recursion_depth feature for deeply nested recursive eager "
+ "loaders."
+ )
+ execution_options = execution_options.union(
+ {"compiled_cache": None}
+ )
+
bind_arguments["clause"] = statement
# new in 1.4 - the coercions system is leveraged to allow the
"""
context.runid = _new_runid()
- context.post_load_paths = {}
+
+ if context.top_level_context:
+ is_top_level = False
+ context.post_load_paths = context.top_level_context.post_load_paths
+ else:
+ is_top_level = True
+ context.post_load_paths = {}
compile_state = context.compile_state
filtered = compile_state._has_mapper_entities
tuple([proc(row) for proc in process]) for row in fetch
]
- for path, post_load in context.post_load_paths.items():
- post_load.invoke(context, path)
+ # if we are the originating load from a query, meaning we
+ # aren't being called as a result of a nested "post load",
+ # iterate through all the collected post loaders and fire them
+ # off. Previously this used to work recursively, however that
+ # prevented deeply nested structures from being loadable
+ if is_top_level:
+ if yield_per:
+ # if using yield per, memoize the state of the
+ # collection so that it can be restored
+ top_level_post_loads = list(
+ context.post_load_paths.items()
+ )
+
+ while context.post_load_paths:
+ post_loads = list(context.post_load_paths.items())
+ context.post_load_paths.clear()
+ for path, post_load in post_loads:
+ post_load.invoke(context, path)
+
+ if yield_per:
+ context.post_load_paths.clear()
+ context.post_load_paths.update(top_level_post_loads)
yield rows
"quick": [],
"deferred": [],
"expire": [],
- "delayed": [],
"existing": [],
"eager": [],
}
for key, populator in populators["new"]:
populator(state, dict_, row)
- for key, populator in populators["delayed"]:
- populator(state, dict_, row)
+
elif load_path != state.load_path:
# new load path, e.g. object is present in more than one
# column position in a series of rows
for key, populator in populators["new"]:
if key in to_load:
populator(state, dict_, row)
- for key, populator in populators["delayed"]:
- if key in to_load:
- populator(state, dict_, row)
+
for key, populator in populators["eager"]:
if key not in unloaded:
populator(state, dict_, row)
if not self.states:
return
path = path_registry.PathRegistry.coerce(path)
- for token, limit_to_mapper, loader, arg, kw in self.loaders.values():
+ for (
+ effective_context,
+ token,
+ limit_to_mapper,
+ loader,
+ arg,
+ kw,
+ ) in self.loaders.values():
states = [
(state, overwrite)
for state, overwrite in self.states.items()
if state.manager.mapper.isa(limit_to_mapper)
]
if states:
- loader(context, path, states, self.load_keys, *arg, **kw)
+ loader(
+ effective_context, path, states, self.load_keys, *arg, **kw
+ )
self.states.clear()
@classmethod
pl = context.post_load_paths[path.path]
else:
pl = context.post_load_paths[path.path] = PostLoad()
- pl.loaders[token] = (token, limit_to_mapper, loader_callable, arg, kw)
+ pl.loaders[token] = (
+ context,
+ token,
+ limit_to_mapper,
+ loader_callable,
+ arg,
+ kw,
+ )
def load_scalar_attributes(mapper, state, attribute_names, passive):
f"invalid argument for RootRegistry.__getitem__: {entity}"
)
+ def _truncate_recursive(self) -> RootRegistry:
+ return self
+
if not TYPE_CHECKING:
__getitem__ = _getitem
self._default_path_loader_key = self.prop._default_path_loader_key
self._loader_key = ("loader", self.natural_path)
+ def _truncate_recursive(self) -> PropRegistry:
+ earliest = None
+ for i, token in enumerate(reversed(self.path[:-1])):
+ if token is self.prop:
+ earliest = i
+
+ if earliest is None:
+ return self
+ else:
+ return self.coerce(self.path[0 : -(earliest + 1)]) # type: ignore
+
@property
def entity_path(self) -> AbstractEntityRegistry:
assert self.entity is not None
# self.natural_path = parent.natural_path + (entity, )
self.natural_path = self.path
+ def _truncate_recursive(self) -> AbstractEntityRegistry:
+ return self.parent._truncate_recursive()[self.entity]
+
@property
def root_entity(self) -> _InternalEntityType[Any]:
return cast("_InternalEntityType[Any]", self.path[0])
from .interfaces import StrategizedProperty
from .session import _state_session
from .state import InstanceState
+from .strategy_options import Load
from .util import _none_set
from .util import AliasedClass
from .. import event
"'%s' is not available due to lazy='%s'" % (self, lazy)
)
- def _load_for_state(self, state, passive, loadopt=None, extra_criteria=()):
+ def _load_for_state(
+ self,
+ state,
+ passive,
+ loadopt=None,
+ extra_criteria=(),
+ extra_options=(),
+ alternate_effective_path=None,
+ execution_options=util.EMPTY_DICT,
+ ):
if not state.key and (
(
not self.parent_property.load_on_pending
passive,
loadopt,
extra_criteria,
+ extra_options,
+ alternate_effective_path,
+ execution_options,
)
def _get_ident_for_use_get(self, session, state, passive):
passive,
loadopt,
extra_criteria,
+ extra_options,
+ alternate_effective_path,
+ execution_options,
):
strategy_options = util.preloaded.orm_strategy_options
use_get = self.use_get
if state.load_options or (loadopt and loadopt._extra_criteria):
- effective_path = state.load_path[self.parent_property]
+ if alternate_effective_path is None:
+ effective_path = state.load_path[self.parent_property]
+ else:
+ effective_path = alternate_effective_path[self.parent_property]
opts = state.load_options
)
stmt._with_options = opts
- else:
+ elif alternate_effective_path is None:
# this path is used if there are not already any options
# in the query, but an event may want to add them
effective_path = state.mapper._path_registry[self.parent_property]
+ else:
+ # added by immediateloader
+ effective_path = alternate_effective_path[self.parent_property]
+
+ if extra_options:
+ stmt._with_options += extra_options
stmt._compile_options += {"_current_path": effective_path}
self._invoke_raise_load(state, passive, "raise_on_sql")
return loading.load_on_pk_identity(
- session, stmt, primary_key_identity, load_options=load_options
+ session,
+ stmt,
+ primary_key_identity,
+ load_options=load_options,
+ execution_options=execution_options,
)
if self._order_by:
lazy_clause, params = self._generate_lazy_clause(state, passive)
- execution_options = {
- "_sa_orm_load_options": load_options,
- }
+ if execution_options:
+
+ execution_options = util.EMPTY_DICT.merge_with(
+ execution_options,
+ {
+ "_sa_orm_load_options": load_options,
+ },
+ )
+ else:
+ execution_options = {
+ "_sa_orm_load_options": load_options,
+ }
if (
self.key in state.dict
__slots__ = ()
- def _check_recursive_postload(self, context, path, join_depth=None):
+ def _setup_for_recursion(self, context, path, loadopt, join_depth=None):
+
effective_path = (
context.compile_state.current_path or orm_util.PathRegistry.root
) + path
+ top_level_context = context._get_top_level_context()
+ execution_options = util.immutabledict(
+ {"sa_top_level_orm_context": top_level_context}
+ )
+
+ if loadopt:
+ recursion_depth = loadopt.local_opts.get("recursion_depth", None)
+ unlimited_recursion = recursion_depth == -1
+ else:
+ recursion_depth = None
+ unlimited_recursion = False
+
+ if recursion_depth is not None:
+ if not self.parent_property._is_self_referential:
+ raise sa_exc.InvalidRequestError(
+ f"recursion_depth option on relationship "
+ f"{self.parent_property} not valid for "
+ "non-self-referential relationship"
+ )
+ recursion_depth = context.execution_options.get(
+ f"_recursion_depth_{id(self)}", recursion_depth
+ )
+
+ if not unlimited_recursion and recursion_depth < 0:
+ return (
+ effective_path,
+ False,
+ execution_options,
+ recursion_depth,
+ )
+
+ if not unlimited_recursion:
+ execution_options = execution_options.union(
+ {
+ f"_recursion_depth_{id(self)}": recursion_depth - 1,
+ }
+ )
+
if loading.PostLoad.path_exists(
context, effective_path, self.parent_property
):
- return True
+ return effective_path, False, execution_options, recursion_depth
path_w_prop = path[self.parent_property]
effective_path_w_prop = effective_path[self.parent_property]
if not path_w_prop.contains(context.attributes, "loader"):
if join_depth:
if effective_path_w_prop.length / 2 > join_depth:
- return True
+ return (
+ effective_path,
+ False,
+ execution_options,
+ recursion_depth,
+ )
elif effective_path_w_prop.contains_mapper(self.mapper):
- return True
+ return (
+ effective_path,
+ False,
+ execution_options,
+ recursion_depth,
+ )
- return False
+ return effective_path, True, execution_options, recursion_depth
def _immediateload_create_row_processor(
self,
adapter,
populators,
):
- def load_immediate(state, dict_, row):
- state.get_impl(self.key).get(state, dict_, flags)
- if self._check_recursive_postload(context, path):
+ (
+ effective_path,
+ run_loader,
+ execution_options,
+ recursion_depth,
+ ) = self._setup_for_recursion(context, path, loadopt)
+ if not run_loader:
# this will not emit SQL and will only emit for a many-to-one
# "use get" load. the "_RELATED" part means it may return
# instance even if its expired, since this is a mutually-recursive
else:
flags = attributes.PASSIVE_OFF | PassiveFlag.NO_RAISE
- populators["delayed"].append((self.key, load_immediate))
+ loading.PostLoad.callable_for_path(
+ context,
+ effective_path,
+ self.parent,
+ self.parent_property,
+ self._load_for_path,
+ loadopt,
+ flags,
+ recursion_depth,
+ execution_options,
+ )
+
+ def _load_for_path(
+ self,
+ context,
+ path,
+ states,
+ load_only,
+ loadopt,
+ flags,
+ recursion_depth,
+ execution_options,
+ ):
+
+ if recursion_depth:
+ new_opt = Load(loadopt.path.entity)
+ new_opt.context = (
+ loadopt,
+ loadopt._recurse(),
+ )
+ alternate_effective_path = path._truncate_recursive()
+ extra_options = (new_opt,)
+ else:
+ new_opt = None
+ alternate_effective_path = path
+ extra_options = ()
+
+ key = self.key
+ lazyloader = self.parent_property._get_strategy((("lazy", "select"),))
+ for state, overwrite in states:
+ dict_ = state.dict
+
+ if overwrite or key not in dict_:
+ value = lazyloader._load_for_state(
+ state,
+ flags,
+ extra_options=extra_options,
+ alternate_effective_path=alternate_effective_path,
+ execution_options=execution_options,
+ )
+ state.get_impl(key).set_committed_value(state, dict_, value)
@log.class_logger
subq_path = subq_path + path
rewritten_path = rewritten_path + path
- # if not via query option, check for
- # a cycle
- # TODO: why is this here??? this is now handled
- # by the _check_recursive_postload call
- if not path.contains(compile_state.attributes, "loader"):
- if self.join_depth:
- if (
- (
- compile_state.current_path.length
- if compile_state.current_path
- else 0
- )
- + path.length
- ) / 2 > self.join_depth:
- return
- elif subq_path.contains_mapper(self.mapper):
- return
-
# use the current query being invoked, not the compile state
# one. this is so that we get the current parameters. however,
# it means we can't use the existing compile state, we have to make
adapter,
populators,
)
- # the subqueryloader does a similar check in setup_query() unlike
- # the other post loaders, however we have this here for consistency
- elif self._check_recursive_postload(context, path, self.join_depth):
+
+ _, run_loader, _, _ = self._setup_for_recursion(
+ context, path, loadopt, self.join_depth
+ )
+ if not run_loader:
return
- elif not isinstance(context.compile_state, ORMSelectCompileState):
+
+ if not isinstance(context.compile_state, ORMSelectCompileState):
# issue 7505 - subqueryload() in 1.3 and previous would silently
# degrade for from_statement() without warning. this behavior
# is restored here
adapter,
populators,
)
- elif self._check_recursive_postload(context, path, self.join_depth):
+
+ (
+ effective_path,
+ run_loader,
+ execution_options,
+ recursion_depth,
+ ) = self._setup_for_recursion(
+ context, path, loadopt, join_depth=self.join_depth
+ )
+ if not run_loader:
return
if not self.parent.class_manager[self.key].impl.supports_population:
elif not orm_util._entity_isa(path[-1], self.parent):
return
- selectin_path = (
- context.compile_state.current_path or orm_util.PathRegistry.root
- ) + path
+ selectin_path = effective_path
path_w_prop = path[self.parent_property]
self._load_for_path,
effective_entity,
loadopt,
+ recursion_depth,
+ execution_options,
)
def _load_for_path(
- self, context, path, states, load_only, effective_entity, loadopt
+ self,
+ context,
+ path,
+ states,
+ load_only,
+ effective_entity,
+ loadopt,
+ recursion_depth,
+ execution_options,
):
if load_only and self.key not in load_only:
return
),
)
+ if recursion_depth is not None:
+ effective_path = effective_path._truncate_recursive()
+
q = q.options(*new_options)._update_compile_options(
{"_current_path": effective_path}
)
+
if user_defined_options:
q = q.options(*user_defined_options)
if query_info.load_only_child:
self._load_via_child(
- our_states, none_states, query_info, q, context
+ our_states,
+ none_states,
+ query_info,
+ q,
+ context,
+ execution_options,
)
else:
- self._load_via_parent(our_states, query_info, q, context)
+ self._load_via_parent(
+ our_states, query_info, q, context, execution_options
+ )
- def _load_via_child(self, our_states, none_states, query_info, q, context):
+ def _load_via_child(
+ self,
+ our_states,
+ none_states,
+ query_info,
+ q,
+ context,
+ execution_options,
+ ):
uselist = self.uselist
# this sort is really for the benefit of the unit tests
for key in chunk
]
},
+ execution_options=execution_options,
).unique()
}
# collection will be populated
state.get_impl(self.key).set_committed_value(state, dict_, None)
- def _load_via_parent(self, our_states, query_info, q, context):
+ def _load_via_parent(
+ self, our_states, query_info, q, context, execution_options
+ ):
uselist = self.uselist
_empty_result = () if uselist else None
data = collections.defaultdict(list)
for k, v in itertools.groupby(
context.session.execute(
- q, params={"primary_keys": primary_keys}
+ q,
+ params={"primary_keys": primary_keys},
+ execution_options=execution_options,
).unique(),
lambda x: x[0],
):
return self._set_relationship_strategy(attr, {"lazy": "subquery"})
def selectinload(
- self: Self_AbstractLoad, attr: _AttrType
+ self: Self_AbstractLoad,
+ attr: _AttrType,
+ recursion_depth: Optional[int] = None,
) -> Self_AbstractLoad:
"""Indicate that the given attribute should be loaded using
SELECT IN eager loading.
query(Order).options(
lazyload(Order.items).selectinload(Item.keywords))
- .. versionadded:: 1.2
+ :param recursion_depth: optional int; when set to a positive integer
+ in conjunction with a self-referential relationship,
+ indicates "selectin" loading will continue that many levels deep
+ automatically until no items are found.
+
+ .. note:: The :paramref:`_orm.selectinload.recursion_depth` option
+ currently supports only self-referential relationships. There
+ is not yet an option to automatically traverse recursive structures
+ with more than one relationship involved.
+
+ .. warning:: This parameter is new and experimental and should be
+ treated as "alpha" status
+
+ .. versionadded:: 2.0 added
+ :paramref:`_orm.selectinload.recursion_depth`
+
.. seealso::
:ref:`selectin_eager_loading`
"""
- return self._set_relationship_strategy(attr, {"lazy": "selectin"})
+ return self._set_relationship_strategy(
+ attr,
+ {"lazy": "selectin"},
+ opts={"recursion_depth": recursion_depth},
+ )
def lazyload(
self: Self_AbstractLoad, attr: _AttrType
return self._set_relationship_strategy(attr, {"lazy": "select"})
def immediateload(
- self: Self_AbstractLoad, attr: _AttrType
+ self: Self_AbstractLoad,
+ attr: _AttrType,
+ recursion_depth: Optional[int] = None,
) -> Self_AbstractLoad:
"""Indicate that the given attribute should be loaded using
an immediate load with a per-attribute SELECT statement.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
+ :param recursion_depth: optional int; when set to a positive integer
+ in conjunction with a self-referential relationship,
+ indicates "selectin" loading will continue that many levels deep
+ automatically until no items are found.
+
+ .. note:: The :paramref:`_orm.immediateload.recursion_depth` option
+ currently supports only self-referential relationships. There
+ is not yet an option to automatically traverse recursive structures
+ with more than one relationship involved.
+
+ .. warning:: This parameter is new and experimental and should be
+ treated as "alpha" status
+
+ .. versionadded:: 2.0 added
+ :paramref:`_orm.immediateload.recursion_depth`
+
+
.. seealso::
:ref:`loading_toplevel`
:ref:`selectin_eager_loading`
"""
- loader = self._set_relationship_strategy(attr, {"lazy": "immediate"})
+ loader = self._set_relationship_strategy(
+ attr,
+ {"lazy": "immediate"},
+ opts={"recursion_depth": recursion_depth},
+ )
return loader
def noload(self: Self_AbstractLoad, attr: _AttrType) -> Self_AbstractLoad:
if wildcard_key is _RELATIONSHIP_TOKEN:
self.path = load_element.path
self.context += (load_element,)
+
+ # this seems to be effective for selectinloader,
+ # giving the extra match to one more level deep.
+ # but does not work for immediateloader, which still
+ # must add additional options at load time
+ if load_element.local_opts.get("recursion_depth", False):
+ r1 = load_element._recurse()
+ self.context += (r1,)
+
return self
def __getstate__(self):
self._shallow_copy_to(s)
return s
+ def _update_opts(self, **kw: Any) -> _LoadElement:
+ new = self._clone()
+ new.local_opts = new.local_opts.union(kw)
+ return new
+
def __getstate__(self) -> Dict[str, Any]:
d = self._shallow_to_dict()
d["path"] = self.path.serialize()
def __init__(self) -> None:
raise NotImplementedError()
- def _prepend_path_from(self, parent):
+ def _recurse(self) -> _LoadElement:
+ cloned = self._clone()
+ cloned.path = PathRegistry.coerce(self.path[:] + self.path[-2:])
+
+ return cloned
+
+ def _prepend_path_from(
+ self, parent: Union[Load, _LoadElement]
+ ) -> _LoadElement:
"""adjust the path of this :class:`._LoadElement` to be
a subpath of that of the given parent :class:`_orm.Load` object's
path.
@loader_unbound_fn
-def selectinload(*keys: _AttrType) -> _AbstractLoad:
- return _generate_from_keys(Load.selectinload, keys, False, {})
+def selectinload(
+ *keys: _AttrType, recursion_depth: Optional[int] = None
+) -> _AbstractLoad:
+ return _generate_from_keys(
+ Load.selectinload, keys, False, {"recursion_depth": recursion_depth}
+ )
@loader_unbound_fn
@loader_unbound_fn
-def immediateload(*keys: _AttrType) -> _AbstractLoad:
- return _generate_from_keys(Load.immediateload, keys, False, {})
+def immediateload(
+ *keys: _AttrType, recursion_depth: Optional[int] = None
+) -> _AbstractLoad:
+ return _generate_from_keys(
+ Load.immediateload, keys, False, {"recursion_depth": recursion_depth}
+ )
@loader_unbound_fn
return self.assert_sql_execution(db, callable_, *newrules)
def assert_sql_count(self, db, callable_, count):
- self.assert_sql_execution(
+ return self.assert_sql_execution(
db, callable_, assertsql.CountStatements(count)
)
--- /dev/null
+import sqlalchemy as sa
+from sqlalchemy import ForeignKey
+from sqlalchemy import Integer
+from sqlalchemy import select
+from sqlalchemy import String
+from sqlalchemy import testing
+from sqlalchemy.orm import immediateload
+from sqlalchemy.orm import relationship
+from sqlalchemy.orm import selectinload
+from sqlalchemy.orm import Session
+from sqlalchemy.testing import eq_
+from sqlalchemy.testing import expect_raises_message
+from sqlalchemy.testing import expect_warnings
+from sqlalchemy.testing import fixtures
+from sqlalchemy.testing.fixtures import fixture_session
+from sqlalchemy.testing.schema import Column
+from sqlalchemy.testing.schema import Table
+from test.orm import _fixtures
+
+
+class NonRecursiveTest(_fixtures.FixtureTest):
+ @classmethod
+ def setup_mappers(cls):
+ cls._setup_stock_mapping()
+
+ @testing.combinations(selectinload, immediateload, argnames="loader")
+ def test_no_recursion_depth_non_self_referential(self, loader):
+ User = self.classes.User
+
+ sess = fixture_session()
+
+ stmt = select(User).options(
+ selectinload(User.addresses, recursion_depth=-1)
+ )
+ with expect_raises_message(
+ sa.exc.InvalidRequestError,
+ "recursion_depth option on relationship User.addresses not valid",
+ ):
+ sess.execute(stmt).all()
+
+
+class _NodeTest:
+ @classmethod
+ def define_tables(cls, metadata):
+ Table(
+ "nodes",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("parent_id", Integer, ForeignKey("nodes.id")),
+ Column("data", String(30)),
+ )
+
+ @classmethod
+ def setup_mappers(cls):
+
+ nodes = cls.tables.nodes
+ Node = cls.classes.Node
+
+ cls.mapper_registry.map_imperatively(
+ Node,
+ nodes,
+ properties={"children": relationship(Node)},
+ )
+
+ @classmethod
+ def setup_classes(cls):
+ class Node(cls.Comparable):
+ def append(self, node):
+ self.children.append(node)
+
+
+class ShallowRecursiveTest(_NodeTest, fixtures.MappedTest):
+ @classmethod
+ def insert_data(cls, connection):
+ Node = cls.classes.Node
+ n1 = Node(data="n1")
+ n1.append(Node(data="n11"))
+ n1.append(Node(data="n12"))
+ n1.append(Node(data="n13"))
+
+ n1.children[0].children = [Node(data="n111"), Node(data="n112")]
+
+ n1.children[1].append(Node(data="n121"))
+ n1.children[1].append(Node(data="n122"))
+ n1.children[1].append(Node(data="n123"))
+ n2 = Node(data="n2")
+ n2.append(Node(data="n21"))
+ n2.children[0].append(Node(data="n211"))
+ n2.children[0].append(Node(data="n212"))
+
+ with Session(connection) as sess:
+ sess.add(n1)
+ sess.add(n2)
+ sess.commit()
+
+ @testing.fixture
+ def data_fixture(self):
+ Node = self.classes.Node
+
+ def go(sess):
+ n1, n2 = sess.scalars(
+ select(Node)
+ .where(Node.data.in_(["n1", "n2"]))
+ .order_by(Node.id)
+ ).all()
+ return n1, n2
+
+ return go
+
+ def _full_structure(self):
+ Node = self.classes.Node
+ return [
+ Node(
+ data="n1",
+ children=[
+ Node(data="n11"),
+ Node(
+ data="n12",
+ children=[
+ Node(data="n121"),
+ Node(data="n122"),
+ Node(data="n123"),
+ ],
+ ),
+ Node(data="n13"),
+ ],
+ ),
+ Node(
+ data="n2",
+ children=[
+ Node(
+ data="n21",
+ children=[
+ Node(data="n211"),
+ Node(data="n212"),
+ ],
+ )
+ ],
+ ),
+ ]
+
+ @testing.combinations(
+ (selectinload, 4),
+ (immediateload, 14),
+ argnames="loader,expected_sql_count",
+ )
+ def test_recursion_depth_opt(
+ self, data_fixture, loader, expected_sql_count
+ ):
+ Node = self.classes.Node
+
+ sess = fixture_session()
+ n1, n2 = data_fixture(sess)
+
+ def go():
+ return (
+ sess.query(Node)
+ .filter(Node.data.in_(["n1", "n2"]))
+ .options(loader(Node.children, recursion_depth=-1))
+ .order_by(Node.data)
+ .all()
+ )
+
+ result = self.assert_sql_count(testing.db, go, expected_sql_count)
+ sess.close()
+
+ eq_(result, self._full_structure())
+
+
+class DeepRecursiveTest(_NodeTest, fixtures.MappedTest):
+ @classmethod
+ def insert_data(cls, connection):
+ nodes = cls.tables.nodes
+ connection.execute(
+ nodes.insert(),
+ [
+ {"id": i, "parent_id": i - 1 if i > 1 else None}
+ for i in range(1, 201)
+ ],
+ )
+ connection.commit()
+
+ @testing.fixture
+ def limited_cache_conn(self, connection):
+
+ connection.engine._compiled_cache.clear()
+
+ assert_limit = 0
+
+ def go(limit):
+ nonlocal assert_limit
+ assert_limit = limit
+ return connection
+
+ yield go
+
+ clen = len(connection.engine._compiled_cache)
+
+ # make sure we used the cache
+ assert clen > 1
+
+ # make sure it didn't grow much. current top is 6, as the loaders
+ # seem to generate a few times, i think there is some artifact
+ # in the cache key gen having to do w/ other things being memoized
+ # or not that causes it to generate a different cache key a few times,
+ # should figure out and document what that is
+ assert clen < assert_limit, f"cache grew to {clen}"
+
+ def _stack_loaders(self, loader_fn, depth):
+ Node = self.classes.Node
+
+ opt = loader_fn(Node.children)
+
+ while depth:
+ opt = getattr(opt, loader_fn.__name__)(Node.children)
+ depth -= 1
+ return opt
+
+ def _assert_depth(self, obj, depth):
+ stack = [obj]
+ depth += 1
+
+ while stack and depth:
+ n = stack.pop(0)
+ stack.extend(n.__dict__["children"])
+ depth -= 1
+
+ for n in stack:
+ assert "children" not in n.__dict__
+
+ @testing.combinations(selectinload, immediateload, argnames="loader_fn")
+ @testing.combinations(1, 15, 25, 185, 78, argnames="depth")
+ def test_recursion_depth(self, loader_fn, depth, limited_cache_conn):
+ connection = limited_cache_conn(6)
+ Node = self.classes.Node
+
+ for i in range(2):
+ stmt = (
+ select(Node)
+ .filter(Node.id == 1)
+ .options(loader_fn(Node.children, recursion_depth=depth))
+ )
+ with Session(connection) as s:
+ result = s.scalars(stmt)
+ self._assert_depth(result.one(), depth)
+
+ @testing.combinations(selectinload, immediateload, argnames="loader_fn")
+ def test_unlimited_recursion(self, loader_fn, limited_cache_conn):
+ connection = limited_cache_conn(6)
+ Node = self.classes.Node
+
+ for i in range(2):
+ stmt = (
+ select(Node)
+ .filter(Node.id == 1)
+ .options(loader_fn(Node.children, recursion_depth=-1))
+ )
+ with Session(connection) as s:
+ result = s.scalars(stmt)
+ self._assert_depth(result.one(), 200)
+
+ @testing.combinations(selectinload, immediateload, argnames="loader_fn")
+ @testing.combinations(4, 9, 12, 25, 41, 55, argnames="depth")
+ def test_warning_w_no_recursive_opt(
+ self, loader_fn, depth, limited_cache_conn
+ ):
+ connection = limited_cache_conn(27)
+
+ Node = self.classes.Node
+
+ for i in range(2):
+ stmt = (
+ select(Node)
+ .filter(Node.id == 1)
+ .options(self._stack_loaders(loader_fn, depth))
+ )
+
+ # note this is a magic number, it's not important that it's exact,
+ # just that when someone makes a huge recursive thing,
+ # it warns
+ if depth > 8:
+ with expect_warnings(
+ "Loader depth for query is excessively deep; "
+ "caching will be disabled for additional loaders."
+ ):
+ with Session(connection) as s:
+ result = s.scalars(stmt)
+ self._assert_depth(result.one(), depth)
+ else:
+ with Session(connection) as s:
+ result = s.scalars(stmt)
+ self._assert_depth(result.one(), depth)
+
+
+# TODO:
+# we should do another set of tests using Node -> Edge -> Node
Column("data", String(30)),
)
- def test_basic(self):
- nodes = self.tables.nodes
-
- class Node(fixtures.ComparableEntity):
+ @classmethod
+ def setup_classes(cls):
+ class Node(cls.Comparable):
def append(self, node):
self.children.append(node)
+ @testing.fixture
+ def data_fixture(self):
+ def go(sess):
+ Node = self.classes.Node
+ n1 = Node(data="n1")
+ n1.append(Node(data="n11"))
+ n1.append(Node(data="n12"))
+ n1.append(Node(data="n13"))
+
+ n1.children[0].children = [Node(data="n111"), Node(data="n112")]
+
+ n1.children[1].append(Node(data="n121"))
+ n1.children[1].append(Node(data="n122"))
+ n1.children[1].append(Node(data="n123"))
+ n2 = Node(data="n2")
+ n2.append(Node(data="n21"))
+ n2.children[0].append(Node(data="n211"))
+ n2.children[0].append(Node(data="n212"))
+ sess.add(n1)
+ sess.add(n2)
+ sess.flush()
+ sess.expunge_all()
+ return n1, n2
+
+ return go
+
+ def _full_structure(self):
+ Node = self.classes.Node
+ return [
+ Node(
+ data="n1",
+ children=[
+ Node(data="n11"),
+ Node(
+ data="n12",
+ children=[
+ Node(data="n121"),
+ Node(data="n122"),
+ Node(data="n123"),
+ ],
+ ),
+ Node(data="n13"),
+ ],
+ ),
+ Node(
+ data="n2",
+ children=[
+ Node(
+ data="n21",
+ children=[
+ Node(data="n211"),
+ Node(data="n212"),
+ ],
+ )
+ ],
+ ),
+ ]
+
+ def test_basic(self, data_fixture):
+ nodes = self.tables.nodes
+
+ Node = self.classes.Node
+
self.mapper_registry.map_imperatively(
Node,
nodes,
},
)
sess = fixture_session()
- n1 = Node(data="n1")
- n1.append(Node(data="n11"))
- n1.append(Node(data="n12"))
- n1.append(Node(data="n13"))
- n1.children[1].append(Node(data="n121"))
- n1.children[1].append(Node(data="n122"))
- n1.children[1].append(Node(data="n123"))
- n2 = Node(data="n2")
- n2.append(Node(data="n21"))
- n2.children[0].append(Node(data="n211"))
- n2.children[0].append(Node(data="n212"))
-
- sess.add(n1)
- sess.add(n2)
- sess.flush()
- sess.expunge_all()
+ n1, n2 = data_fixture(sess)
def go():
d = (
.all()
)
eq_(
- [
- Node(
- data="n1",
- children=[
- Node(data="n11"),
- Node(
- data="n12",
- children=[
- Node(data="n121"),
- Node(data="n122"),
- Node(data="n123"),
- ],
- ),
- Node(data="n13"),
- ],
- ),
- Node(
- data="n2",
- children=[
- Node(
- data="n21",
- children=[
- Node(data="n211"),
- Node(data="n212"),
- ],
- )
- ],
- ),
- ],
+ self._full_structure(),
d,
)
self.assert_sql_count(testing.db, go, 4)
- def test_lazy_fallback_doesnt_affect_eager(self):
+ def test_lazy_fallback_doesnt_affect_eager(self, data_fixture):
nodes = self.tables.nodes
-
- class Node(fixtures.ComparableEntity):
- def append(self, node):
- self.children.append(node)
+ Node = self.classes.Node
self.mapper_registry.map_imperatively(
Node,
},
)
sess = fixture_session()
- n1 = Node(data="n1")
- n1.append(Node(data="n11"))
- n1.append(Node(data="n12"))
- n1.append(Node(data="n13"))
- n1.children[0].append(Node(data="n111"))
- n1.children[0].append(Node(data="n112"))
- n1.children[1].append(Node(data="n121"))
- n1.children[1].append(Node(data="n122"))
- n1.children[1].append(Node(data="n123"))
- sess.add(n1)
- sess.flush()
- sess.expunge_all()
+ n1, n2 = data_fixture(sess)
def go():
allnodes = sess.query(Node).order_by(Node.data).all()
self.assert_sql_count(testing.db, go, 2)
- def test_with_deferred(self):
+ def test_with_deferred(self, data_fixture):
nodes = self.tables.nodes
-
- class Node(fixtures.ComparableEntity):
- def append(self, node):
- self.children.append(node)
+ Node = self.classes.Node
self.mapper_registry.map_imperatively(
Node,
},
)
sess = fixture_session()
- n1 = Node(data="n1")
- n1.append(Node(data="n11"))
- n1.append(Node(data="n12"))
- sess.add(n1)
- sess.flush()
- sess.expunge_all()
+ n1, n2 = data_fixture(sess)
def go():
eq_(
- Node(data="n1", children=[Node(data="n11"), Node(data="n12")]),
+ Node(
+ data="n1",
+ children=[
+ Node(data="n11"),
+ Node(data="n12"),
+ Node(data="n13"),
+ ],
+ ),
sess.query(Node).order_by(Node.id).first(),
)
- self.assert_sql_count(testing.db, go, 6)
+ self.assert_sql_count(testing.db, go, 8)
sess.expunge_all()
def go():
eq_(
- Node(data="n1", children=[Node(data="n11"), Node(data="n12")]),
+ Node(
+ data="n1",
+ children=[
+ Node(data="n11"),
+ Node(data="n12"),
+ Node(data="n13"),
+ ],
+ ),
sess.query(Node)
.options(undefer(Node.data))
.order_by(Node.id)
.first(),
)
- self.assert_sql_count(testing.db, go, 5)
+ self.assert_sql_count(testing.db, go, 7)
sess.expunge_all()
def go():
eq_(
- Node(data="n1", children=[Node(data="n11"), Node(data="n12")]),
+ Node(
+ data="n1",
+ children=[
+ Node(data="n11"),
+ Node(data="n12"),
+ Node(data="n13"),
+ ],
+ ),
sess.query(Node)
.options(
undefer(Node.data),
.first(),
)
- self.assert_sql_count(testing.db, go, 3)
+ self.assert_sql_count(testing.db, go, 4)
- def test_options(self):
+ def test_options(self, data_fixture):
nodes = self.tables.nodes
-
- class Node(fixtures.ComparableEntity):
- def append(self, node):
- self.children.append(node)
+ Node = self.classes.Node
self.mapper_registry.map_imperatively(
Node,
properties={"children": relationship(Node, order_by=nodes.c.id)},
)
sess = fixture_session()
- n1 = Node(data="n1")
- n1.append(Node(data="n11"))
- n1.append(Node(data="n12"))
- n1.append(Node(data="n13"))
- n1.children[1].append(Node(data="n121"))
- n1.children[1].append(Node(data="n122"))
- n1.children[1].append(Node(data="n123"))
- sess.add(n1)
- sess.flush()
- sess.expunge_all()
+ n1, n2 = data_fixture(sess)
def go():
d = (
self.assert_sql_count(testing.db, go, 3)
- def test_no_depth(self):
+ def test_no_depth(self, data_fixture):
"""no join depth is set, so no eager loading occurs."""
nodes = self.tables.nodes
-
- class Node(fixtures.ComparableEntity):
- def append(self, node):
- self.children.append(node)
+ Node = self.classes.Node
self.mapper_registry.map_imperatively(
Node,
properties={"children": relationship(Node, lazy="selectin")},
)
sess = fixture_session()
- n1 = Node(data="n1")
- n1.append(Node(data="n11"))
- n1.append(Node(data="n12"))
- n1.append(Node(data="n13"))
- n1.children[1].append(Node(data="n121"))
- n1.children[1].append(Node(data="n122"))
- n1.children[1].append(Node(data="n123"))
- n2 = Node(data="n2")
- n2.append(Node(data="n21"))
- sess.add(n1)
- sess.add(n2)
- sess.flush()
- sess.expunge_all()
+ n1, n2 = data_fixture(sess)
def go():
d = (
# /home/classic/dev/sqlalchemy/test/profiles.txt
# This file is written out on a per-environment basis.
-# For each test in aaa_profiling, the corresponding function and
+# For each test in aaa_profiling, the corresponding function and
# environment is located within this file. If it doesn't exist,
# the test is skipped.
-# If a callcount does exist, it is compared to what we received.
+# If a callcount does exist, it is compared to what we received.
# assertions are raised if the counts do not match.
-#
-# To add a new callcount test, apply the function_call_count
-# decorator and re-run the tests using the --write-profiles
+#
+# To add a new callcount test, apply the function_call_count
+# decorator and re-run the tests using the --write-profiles
# option - this file will be rewritten including the new count.
-#
+#
# TEST: test.aaa_profiling.test_compiler.CompileTest.test_insert
# TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 53330
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 63740
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 53630
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 63940
# TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 49435
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 59745
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 51830
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 62140
# TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 53335
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 61745
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 56130
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 64540
# TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 52435
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 60845
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 55130
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 63540
# TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 45035
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 48345
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 47330
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 50640
# TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 48335
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 56145
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 50830
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 58640
# TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 47435
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 55245
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 49830
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 57640
# TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 33805
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 37005
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 35605
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 38805
# TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 32905
-test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 36105
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 34605
+test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 37805
# TEST: test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 3479
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 3479
+test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 3599
+test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 3599
# TEST: test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 5529
-test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 5529
+test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 5527
+test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 5527
# TEST: test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching
-test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 124
-test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 124
+test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 128
+test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 128
# TEST: test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching
-test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 124
-test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 124
+test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 128
+test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 128
# TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 15313
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 26332
+test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 15324
+test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 26343
# TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 21377
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 26396
+test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 21378
+test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 26397
# TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased
-test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 10504
-test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 10654
+test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 10854
+test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 11004
# TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join
# TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain
-test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 4254
-test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 4404
+test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 4204
+test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 4354
# TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d
-test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 98506
-test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 104006
+test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 101006
+test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 106756
# TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased
-test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 96844
-test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 102344
-
+test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 99074
+test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 104824
# TEST: test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results
# TEST: test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results_integrated
-# wow first time ever decreasing a value, woop. not sure why though
-test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results_integrated x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 28587,1014,96450
+test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results_integrated x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 29440,1011,95853
+test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results_integrated x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 29847,1195,114253
# TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 22984
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 22984
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 23981
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 23981
# TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 106870
-test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 115127
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 110202
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 118459
# TEST: test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 20030
-test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 21434
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 20432
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 21842
# TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_load
-test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1366
-test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1455
+test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1413
+test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1502
# TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_no_load
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 103,18
-test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 103,18
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 104,19
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 104,19
# TEST: test.aaa_profiling.test_orm.QueryTest.test_query_cols
-test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 6167
-test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6987
+test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 6442
+test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 7262
# TEST: test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results
-test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 259205
-test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 278405
+test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 266105
+test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 288405
# TEST: test.aaa_profiling.test_orm.SessionTest.test_expire_lots
-test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1252
-test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1260
+test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1212
+test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1212
# TEST: test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect