"""private module containing functions used to convert database
rows into object instances and associated state.
-the functions here are called primarily by Query, Mapper,
+the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from __future__ import absolute_import
from .. import util
-from . import attributes, exc as orm_exc
+from . import attributes, exc as orm_exc, state as statelib
from .interfaces import EXT_CONTINUE
from ..sql import util as sql_util
from .util import _none_set, state_str
-statelib = util.importlater("sqlalchemy.orm", "state")
_new_runid = util.counter()
(process, labels) = \
zip(*[
- query_entity.row_processor(query,
+ query_entity.row_processor(query,
context, custom_rows)
for query_entity in query._entities
])
if single_entity:
if isinstance(query._entities[0], querylib._MapperEntity):
result = [session._merge(
- attributes.instance_state(instance),
- attributes.instance_dict(instance),
+ attributes.instance_state(instance),
+ attributes.instance_dict(instance),
load=load, _recursive={})
for instance in iterator]
else:
result = list(iterator)
else:
- mapped_entities = [i for i, e in enumerate(query._entities)
+ mapped_entities = [i for i, e in enumerate(query._entities)
if isinstance(e, querylib._MapperEntity)]
result = []
for row in iterator:
newrow = list(row)
for i in mapped_entities:
newrow[i] = session._merge(
- attributes.instance_state(newrow[i]),
- attributes.instance_dict(newrow[i]),
+ attributes.instance_state(newrow[i]),
+ attributes.instance_dict(newrow[i]),
load=load, _recursive={})
result.append(util.NamedTuple(newrow, row._labels))
session.autoflush = autoflush
def get_from_identity(session, key, passive):
- """Look up the given key in the given session's identity map,
+ """Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
else:
return None
-def load_on_ident(query, key,
+def load_on_ident(query, key,
refresh_state=None, lockmode=None,
only_load_props=None):
"""Load the given identity key from the database."""
except orm_exc.NoResultFound:
return None
-def instance_processor(mapper, context, path, adapter,
- polymorphic_from=None,
- only_load_props=None,
+def instance_processor(mapper, context, path, adapter,
+ polymorphic_from=None,
+ only_load_props=None,
refresh_state=None,
polymorphic_discriminator=None):
- """Produce a mapper level row processor callable
+ """Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
- # called _instance(), resists being broken out, as
+ # called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
identitykey = mapper._identity_key_from_state(refresh_state)
else:
identitykey = (
- identity_class,
+ identity_class,
tuple([row[column] for column in pk_cols])
)
version_id_col is not None and \
context.version_check and \
mapper._get_state_attr_by_column(
- state,
- dict_,
+ state,
+ dict_,
mapper.version_id_col) != \
row[version_id_col]:
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
- "does not match database-loaded version id '%s'."
- % (state_str(state),
+ "does not match database-loaded version id '%s'."
+ % (state_str(state),
mapper._get_state_attr_by_column(
state, dict_,
mapper.version_id_col),
row[version_id_col]))
elif refresh_state:
# out of band refresh_state detected (i.e. its not in the
- # session.identity_map) honor it anyway. this can happen
+ # session.identity_map) honor it anyway. this can happen
# if a _get() occurs within save_obj(), such as
# when eager_defaults is True.
state = refresh_state
if create_instance:
for fn in create_instance:
- instance = fn(mapper, context,
+ instance = fn(mapper, context,
row, mapper.class_)
if instance is not EXT_CONTINUE:
manager = attributes.manager_of_class(
if populate_instance:
for fn in populate_instance:
- ret = fn(mapper, context, row, state,
- only_load_props=only_load_props,
+ ret = fn(mapper, context, row, state,
+ only_load_props=only_load_props,
instancekey=identitykey, isnew=isnew)
if ret is not EXT_CONTINUE:
break
if populate_instance:
for fn in populate_instance:
- ret = fn(mapper, context, row, state,
- only_load_props=attrs,
+ ret = fn(mapper, context, row, state,
+ only_load_props=attrs,
instancekey=identitykey, isnew=isnew)
if ret is not EXT_CONTINUE:
break
if result is not None:
if append_result:
for fn in append_result:
- if fn(mapper, context, row, state,
+ if fn(mapper, context, row, state,
result, instancekey=identitykey,
isnew=isnew) is not EXT_CONTINUE:
break
def _populators(mapper, context, path, row, adapter,
new_populators, existing_populators, eager_populators):
- """Produce a collection of attribute level row processor
+ """Produce a collection of attribute level row processor
callables."""
delayed_populators = []
- pops = (new_populators, existing_populators, delayed_populators,
+ pops = (new_populators, existing_populators, delayed_populators,
eager_populators)
for prop in mapper._props.itervalues():
for i, pop in enumerate(prop.create_row_processor(
if sub_mapper is mapper:
return None
- # replace the tip of the path info with the subclass mapper
- # being used, that way accurate "load_path" info is available
+ # replace the tip of the path info with the subclass mapper
+ # being used, that way accurate "load_path" info is available
# for options invoked during deferred loads, e.g.
# query(Person).options(defer(Engineer.machines, Machine.name)).
# for AliasedClass paths, disregard this step (new in 0.8).
return instance_processor(
sub_mapper,
- context,
- path.parent[sub_mapper]
- if not path.is_aliased_class
- else path,
+ context,
+ path.parent[sub_mapper]
+ if not path.is_aliased_class
+ else path,
adapter,
polymorphic_from=mapper)
return configure_subclass_mapper
from .. import util
from . import exc as orm_exc, attributes,util as orm_util
from .attributes import (
- PASSIVE_NO_RESULT,
+ PASSIVE_NO_RESULT,
SQL_OK, NEVER_SET, ATTR_WAS_SET, NO_VALUE,\
PASSIVE_NO_INITIALIZE
)
-from . import mapperlib
-from . import session as sessionlib
+sessionlib = util.importlater("sqlalchemy.orm", "session")
+instrumentation = util.importlater("sqlalchemy.orm", "instrumentation")
+mapperlib = util.importlater("sqlalchemy.orm", "mapperlib")
class InstanceState(object):
d = {'instance':self.obj()}
d.update(
(k, self.__dict__[k]) for k in (
- 'committed_state', '_pending_mutations', 'modified', 'expired',
+ 'committed_state', '_pending_mutations', 'modified', 'expired',
'callables', 'key', 'parents', 'load_options',
'class_',
- ) if k in self.__dict__
+ ) if k in self.__dict__
)
if self.load_path:
d['load_path'] = self.load_path.serialize()
return d
def __setstate__(self, state):
- from sqlalchemy.orm import instrumentation
inst = state['instance']
if inst is not None:
self.obj = weakref.ref(inst, self._cleanup)
self.__dict__.update([
(k, state[k]) for k in (
- 'key', 'load_options',
- ) if k in state
+ 'key', 'load_options',
+ ) if k in state
])
if 'load_path' in state:
self.load_path = orm_util.PathRegistry.\
deserialize(state['load_path'])
- # setup _sa_instance_state ahead of time so that
+ # setup _sa_instance_state ahead of time so that
# unpickle events can access the object normally.
# see [ticket:2362]
manager.setup_instance(inst, self)
manager.dispatch.unpickle(self, state)
def initialize(self, key):
- """Set this attribute to an empty value or collection,
+ """Set this attribute to an empty value or collection,
based on the AttributeImpl in use."""
self.manager.get_impl(key).initialize(self, self.dict)
def reset(self, dict_, key):
- """Remove the given attribute and any
+ """Remove the given attribute and any
callables associated with it."""
dict_.pop(key, None)
self.manager.deferred_scalar_loader(self, toload)
- # if the loader failed, or this
+ # if the loader failed, or this
# instance state didn't have an identity,
# the attributes still might be in the callables
# dict. ensure they are removed.
@property
def expired_attributes(self):
"""Return the set of keys which are 'expired' to be loaded by
- the manager's deferred scalar loader, assuming no pending
+ the manager's deferred scalar loader, assuming no pending
changes.
see also the ``unmodified`` collection which is intersected
self.committed_state[attr.key] = previous
- # the "or not self.modified" is defensive at
+ # the "or not self.modified" is defensive at
# this point. The assertion below is expected
# to be True:
# assert self._strong_obj is None or self.modified
raise orm_exc.ObjectDereferencedError(
"Can't emit change event for attribute '%s' - "
"parent object of type %s has been garbage "
- "collected."
+ "collected."
% (
- self.manager[attr.key],
+ self.manager[attr.key],
orm_util.state_class_str(self)
))
self.modified = True
- the "modified" flag is set to False
- any "expired" markers/callables for attributes loaded are removed.
- Attributes marked as "expired" can potentially remain
+ Attributes marked as "expired" can potentially remain
"expired" after this step if a value was not populated in state.dict.
"""
@property
def history(self):
- return self.state.get_history(self.key,
+ return self.state.get_history(self.key,
PASSIVE_NO_INITIALIZE)