if self.passive_updates:
self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE
else:
- self._passive_update_flag= attributes.PASSIVE_OFF
+ self._passive_update_flag = attributes.PASSIVE_OFF
self.key = prop.key
if not self.prop.synchronize_pairs:
def hasparent(self, state):
"""return True if the given object instance has a parent,
- according to the ``InstrumentedAttribute`` handled by this
+ according to the ``InstrumentedAttribute`` handled by this
``DependencyProcessor``.
"""
before_delete = unitofwork.ProcessAll(uow, self, True, True)
parent_saves = unitofwork.SaveUpdateAll(
- uow,
+ uow,
self.parent.primary_base_mapper
)
child_saves = unitofwork.SaveUpdateAll(
- uow,
+ uow,
self.mapper.primary_base_mapper
)
parent_deletes = unitofwork.DeleteAll(
- uow,
+ uow,
self.parent.primary_base_mapper
)
child_deletes = unitofwork.DeleteAll(
- uow,
+ uow,
self.mapper.primary_base_mapper
)
- self.per_property_dependencies(uow,
- parent_saves,
- child_saves,
- parent_deletes,
- child_deletes,
- after_save,
+ self.per_property_dependencies(uow,
+ parent_saves,
+ child_saves,
+ parent_deletes,
+ child_deletes,
+ after_save,
before_delete
)
def per_state_flush_actions(self, uow, states, isdelete):
"""establish actions and dependencies related to a flush.
- These actions will operate on all relevant states
+ These actions will operate on all relevant states
individually. This occurs only if there are cycles
in the 'aggregated' version of events.
# check if the "parent" side is part of the cycle
if not isdelete:
parent_saves = unitofwork.SaveUpdateAll(
- uow,
+ uow,
self.parent.base_mapper)
parent_deletes = before_delete = None
if parent_saves in uow.cycles:
parent_in_cycles = True
else:
parent_deletes = unitofwork.DeleteAll(
- uow,
+ uow,
self.parent.base_mapper)
parent_saves = after_save = None
if parent_deletes in uow.cycles:
continue
if isdelete:
- before_delete = unitofwork.ProcessState(uow,
+ before_delete = unitofwork.ProcessState(uow,
self, True, state)
if parent_in_cycles:
parent_deletes = unitofwork.DeleteState(
- uow,
- state,
+ uow,
+ state,
parent_base_mapper)
else:
after_save = unitofwork.ProcessState(uow, self, False, state)
if parent_in_cycles:
parent_saves = unitofwork.SaveUpdateState(
- uow,
- state,
+ uow,
+ state,
parent_base_mapper)
if child_in_cycles:
if deleted:
child_action = (
unitofwork.DeleteState(
- uow, child_state,
- child_base_mapper),
+ uow, child_state,
+ child_base_mapper),
True)
else:
child_action = (
unitofwork.SaveUpdateState(
- uow, child_state,
- child_base_mapper),
+ uow, child_state,
+ child_base_mapper),
False)
child_actions.append(child_action)
# establish dependencies between our possibly per-state
# parent action and our possibly per-state child action.
for child_action, childisdelete in child_actions:
- self.per_state_dependencies(uow, parent_saves,
- parent_deletes,
- child_action,
- after_save, before_delete,
+ self.per_state_dependencies(uow, parent_saves,
+ parent_deletes,
+ child_action,
+ after_save, before_delete,
isdelete, childisdelete)
passive = attributes.PASSIVE_OFF
for s in states:
- # TODO: add a high speed method
+ # TODO: add a high speed method
# to InstanceState which returns: attribute
# has a non-None value, or had one
history = uowcommit.get_attribute_history(
- s,
- self.key,
+ s,
+ self.key,
passive)
if history and not history.empty():
return True
def _verify_canload(self, state):
if state is not None and \
- not self.mapper._canload(state,
+ not self.mapper._canload(state,
allow_subtypes=not self.enable_typechecks):
if self.mapper._canload(state, allow_subtypes=True):
raise exc.FlushError('Attempting to flush an item of type '
return None
process_key = tuple(sorted(
- [self.key] +
+ [self.key] +
[p.key for p in self.prop._reverse_property]
))
return uow.memo(
- ('reverse_key', process_key),
+ ('reverse_key', process_key),
set
)
for x in related:
if x is not None:
uowcommit.issue_post_update(
- state,
+ state,
[r for l, r in self.prop.synchronize_pairs]
)
break
class OneToManyDP(DependencyProcessor):
- def per_property_dependencies(self, uow, parent_saves,
- child_saves,
- parent_deletes,
- child_deletes,
- after_save,
+ def per_property_dependencies(self, uow, parent_saves,
+ child_saves,
+ parent_deletes,
+ child_deletes,
+ after_save,
before_delete,
):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
- uow,
- self.mapper.primary_base_mapper,
+ uow,
+ self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
- uow,
- self.mapper.primary_base_mapper,
+ uow,
+ self.mapper.primary_base_mapper,
True)
uow.dependencies.update([
(before_delete, child_deletes),
])
- def per_state_dependencies(self, uow,
- save_parent,
- delete_parent,
- child_action,
- after_save, before_delete,
+ def per_state_dependencies(self, uow,
+ save_parent,
+ delete_parent,
+ child_action,
+ after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
- uow,
- self.mapper.primary_base_mapper,
+ uow,
+ self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
- uow,
- self.mapper.primary_base_mapper,
+ uow,
+ self.mapper.primary_base_mapper,
True)
# TODO: this whole block is not covered
else:
uow.dependencies.update([
(before_delete, child_pre_updates),
- (child_pre_updates, delete_parent),
+ (child_pre_updates, delete_parent),
])
elif not isdelete:
uow.dependencies.update([
])
def presort_deletes(self, uowcommit, states):
- # head object is being deleted, and we manage its list of
- # child objects the child objects have to have their
+ # head object is being deleted, and we manage its list of
+ # child objects the child objects have to have their
# foreign key to the parent set to NULL
should_null_fks = not self.cascade.delete and \
not self.passive_deletes == 'all'
for state in states:
history = uowcommit.get_attribute_history(
- state,
- self.key,
+ state,
+ self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if should_null_fks:
for child in history.unchanged:
if child is not None:
- uowcommit.register_object(child,
+ uowcommit.register_object(child,
operation="delete", prop=self.prop)
passive = attributes.PASSIVE_OFF
history = uowcommit.get_attribute_history(
- state,
- self.key,
+ state,
+ self.key,
passive)
if history:
for child in history.added:
if child is not None:
- uowcommit.register_object(child, cancel_delete=True,
- operation="add",
+ uowcommit.register_object(child, cancel_delete=True,
+ operation="add",
prop=self.prop)
children_added.update(history.added)
for child in history.deleted:
if not self.cascade.delete_orphan:
- uowcommit.register_object(child, isdelete=False,
- operation='delete',
+ uowcommit.register_object(child, isdelete=False,
+ operation='delete',
prop=self.prop)
elif self.hasparent(child) is False:
- uowcommit.register_object(child, isdelete=True,
+ uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete', child):
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
- child,
- False,
+ child,
+ False,
self.passive_updates,
operation="pk change",
prop=self.prop)
def process_deletes(self, uowcommit, states):
- # head object is being deleted, and we manage its list of
- # child objects the child objects have to have their foreign
- # key to the parent set to NULL this phase can be called
+ # head object is being deleted, and we manage its list of
+ # child objects the child objects have to have their foreign
+ # key to the parent set to NULL this phase can be called
# safely for any cascade but is unnecessary if delete cascade
# is on.
for state in states:
history = uowcommit.get_attribute_history(
- state,
- self.key,
+ state,
+ self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if child is not None and \
self.hasparent(child) is False:
self._synchronize(
- state,
- child,
- None, True,
+ state,
+ child,
+ None, True,
uowcommit, False)
if self.post_update and child:
self._post_update(child, uowcommit, [state])
difference(children_added):
if child is not None:
self._synchronize(
- state,
- child,
- None, True,
+ state,
+ child,
+ None, True,
uowcommit, False)
if self.post_update and child:
- self._post_update(child,
- uowcommit,
+ self._post_update(child,
+ uowcommit,
[state])
# technically, we can even remove each child from the
- # collection here too. but this would be a somewhat
- # inconsistent behavior since it wouldn't happen
+ # collection here too. but this would be a somewhat
+ # inconsistent behavior since it wouldn't happen
#if the old parent wasn't deleted but child was moved.
def process_saves(self, uowcommit, states):
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.added:
- self._synchronize(state, child, None,
+ self._synchronize(state, child, None,
False, uowcommit, False)
if child is not None and self.post_update:
self._post_update(child, uowcommit, [state])
for child in history.deleted:
if not self.cascade.delete_orphan and \
not self.hasparent(child):
- self._synchronize(state, child, None, True,
+ self._synchronize(state, child, None, True,
uowcommit, False)
if self._pks_changed(uowcommit, state):
for child in history.unchanged:
- self._synchronize(state, child, None,
+ self._synchronize(state, child, None,
False, uowcommit, True)
- def _synchronize(self, state, child,
+ def _synchronize(self, state, child,
associationrow, clearkeys, uowcommit,
pks_changed):
source = state
if clearkeys:
sync.clear(dest, self.mapper, self.prop.synchronize_pairs)
else:
- sync.populate(source, self.parent, dest, self.mapper,
+ sync.populate(source, self.parent, dest, self.mapper,
self.prop.synchronize_pairs, uowcommit,
self.passive_updates and pks_changed)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
- uowcommit,
- state,
- self.parent,
+ uowcommit,
+ state,
+ self.parent,
self.prop.synchronize_pairs)
class ManyToOneDP(DependencyProcessor):
DependencyProcessor.__init__(self, prop)
self.mapper._dependency_processors.append(DetectKeySwitch(prop))
- def per_property_dependencies(self, uow,
- parent_saves,
- child_saves,
- parent_deletes,
- child_deletes,
- after_save,
+ def per_property_dependencies(self, uow,
+ parent_saves,
+ child_saves,
+ parent_deletes,
+ child_deletes,
+ after_save,
before_delete):
if self.post_update:
parent_post_updates = unitofwork.IssuePostUpdate(
- uow,
- self.parent.primary_base_mapper,
+ uow,
+ self.parent.primary_base_mapper,
False)
parent_pre_updates = unitofwork.IssuePostUpdate(
- uow,
- self.parent.primary_base_mapper,
+ uow,
+ self.parent.primary_base_mapper,
True)
uow.dependencies.update([
(parent_deletes, child_deletes)
])
- def per_state_dependencies(self, uow,
- save_parent,
- delete_parent,
- child_action,
- after_save, before_delete,
+ def per_state_dependencies(self, uow,
+ save_parent,
+ delete_parent,
+ child_action,
+ after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
if not isdelete:
parent_post_updates = unitofwork.IssuePostUpdate(
- uow,
- self.parent.primary_base_mapper,
+ uow,
+ self.parent.primary_base_mapper,
False)
if childisdelete:
uow.dependencies.update([
])
else:
parent_pre_updates = unitofwork.IssuePostUpdate(
- uow,
- self.parent.primary_base_mapper,
+ uow,
+ self.parent.primary_base_mapper,
True)
uow.dependencies.update([
if self.cascade.delete or self.cascade.delete_orphan:
for state in states:
history = uowcommit.get_attribute_history(
- state,
- self.key,
+ state,
+ self.key,
self._passive_delete_flag)
if history:
if self.cascade.delete_orphan:
for child in todelete:
if child is None:
continue
- uowcommit.register_object(child, isdelete=True,
+ uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete', child):
uowcommit.register_object(state, operation="add", prop=self.prop)
if self.cascade.delete_orphan:
history = uowcommit.get_attribute_history(
- state,
- self.key,
+ state,
+ self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
- uowcommit.register_object(child, isdelete=True,
+ uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
not self.cascade.delete_orphan and \
not self.passive_deletes == 'all':
- # post_update means we have to update our
+ # post_update means we have to update our
# row to not reference the child object
# before we can DELETE the row
for state in states:
self._synchronize(state, None, None, True, uowcommit)
if state and self.post_update:
history = uowcommit.get_attribute_history(
- state,
- self.key,
+ state,
+ self.key,
self._passive_delete_flag)
if history:
self._post_update(state, uowcommit, history.sum())
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
- state,
+ state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.added:
- self._synchronize(state, child, None, False,
+ self._synchronize(state, child, None, False,
uowcommit, "add")
if self.post_update:
not uowcommit.session._contains_state(child):
util.warn(
"Object of type %s not in session, %s "
- "operation along '%s' won't proceed" %
+ "operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return
sync.clear(state, self.parent, self.prop.synchronize_pairs)
else:
self._verify_canload(child)
- sync.populate(child, self.mapper, state,
- self.parent,
- self.prop.synchronize_pairs,
+ sync.populate(child, self.mapper, state,
+ self.parent,
+ self.prop.synchronize_pairs,
uowcommit,
- False)
+ False)
class DetectKeySwitch(DependencyProcessor):
- """For many-to-one relationships with no one-to-many backref,
+ """For many-to-one relationships with no one-to-many backref,
searches for parents through the unit of work when a primary
key has changed and updates them.
def per_property_flush_actions(self, uow):
parent_saves = unitofwork.SaveUpdateAll(
- uow,
+ uow,
self.parent.base_mapper)
after_save = unitofwork.ProcessAll(uow, self, False, False)
uow.dependencies.update([
def _key_switchers(self, uow, states):
switched, notswitched = uow.memo(
- ('pk_switchers', self),
+ ('pk_switchers', self),
lambda: (set(), set())
)
related is not None:
related_state = attributes.instance_state(dict_[self.key])
if related_state in switchers:
- uowcommit.register_object(state,
- False,
+ uowcommit.register_object(state,
+ False,
self.passive_updates)
sync.populate(
- related_state,
- self.mapper, state,
- self.parent, self.prop.synchronize_pairs,
+ related_state,
+ self.mapper, state,
+ self.parent, self.prop.synchronize_pairs,
uowcommit, self.passive_updates)
def _pks_changed(self, uowcommit, state):
- return bool(state.key) and sync.source_modified(uowcommit,
- state,
- self.mapper,
+ return bool(state.key) and sync.source_modified(uowcommit,
+ state,
+ self.mapper,
self.prop.synchronize_pairs)
class ManyToManyDP(DependencyProcessor):
- def per_property_dependencies(self, uow, parent_saves,
- child_saves,
- parent_deletes,
- child_deletes,
- after_save,
+ def per_property_dependencies(self, uow, parent_saves,
+ child_saves,
+ parent_deletes,
+ child_deletes,
+ after_save,
before_delete
):
(child_saves, after_save),
(after_save, child_deletes),
- # a rowswitch on the parent from deleted to saved
- # can make this one occur, as the "save" may remove
- # an element from the
+ # a rowswitch on the parent from deleted to saved
+ # can make this one occur, as the "save" may remove
+ # an element from the
# "deleted" list before we have a chance to
# process its child rows
(before_delete, parent_saves),
(before_delete, child_saves),
])
- def per_state_dependencies(self, uow,
- save_parent,
- delete_parent,
- child_action,
- after_save, before_delete,
+ def per_state_dependencies(self, uow,
+ save_parent,
+ delete_parent,
+ child_action,
+ after_save, before_delete,
isdelete, childisdelete):
if not isdelete:
if childisdelete:
# TODO: no tests fail if this whole
# thing is removed !!!!
if not self.passive_deletes:
- # if no passive deletes, load history on
+ # if no passive deletes, load history on
# the collection, so that prop_has_changes()
# returns True
for state in states:
uowcommit.get_attribute_history(
- state,
- self.key,
+ state,
+ self.key,
self._passive_delete_flag)
def presort_saves(self, uowcommit, states):
if not self.passive_updates:
- # if no passive updates, load history on
+ # if no passive updates, load history on
# each collection where parent has changed PK,
# so that prop_has_changes() returns True
for state in states:
if self._pks_changed(uowcommit, state):
history = uowcommit.get_attribute_history(
- state,
- self.key,
+ state,
+ self.key,
attributes.PASSIVE_OFF)
if not self.cascade.delete_orphan:
# if delete_orphan check is turned on.
for state in states:
history = uowcommit.get_attribute_history(
- state,
- self.key,
+ state,
+ self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
- uowcommit.register_object(child, isdelete=True,
+ uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
- 'delete',
+ 'delete',
child):
uowcommit.register_object(
st_, isdelete=True)
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
- # this history should be cached already, as
+ # this history should be cached already, as
# we loaded it in preprocess_deletes
history = uowcommit.get_attribute_history(
- state,
- self.key,
+ state,
+ self.key,
self._passive_delete_flag)
if history:
for child in history.non_added():
if child is None or \
- (processed is not None and
+ (processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(
- state,
- child,
- associationrow,
+ state,
+ child,
+ associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
if processed is not None:
processed.update(tmp)
- self._run_crud(uowcommit, secondary_insert,
+ self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def process_saves(self, uowcommit, states):
for state in states:
need_cascade_pks = not self.passive_updates and \
- self._pks_changed(uowcommit, state)
+ self._pks_changed(uowcommit, state)
if need_cascade_pks:
passive = attributes.PASSIVE_OFF
else:
if history:
for child in history.added:
if child is None or \
- (processed is not None and
+ (processed is not None and
(state, child) in processed):
continue
associationrow = {}
- if not self._synchronize(state,
- child,
- associationrow,
+ if not self._synchronize(state,
+ child,
+ associationrow,
False, uowcommit, "add"):
continue
secondary_insert.append(associationrow)
for child in history.deleted:
if child is None or \
- (processed is not None and
+ (processed is not None and
(state, child) in processed):
continue
associationrow = {}
- if not self._synchronize(state,
- child,
- associationrow,
+ if not self._synchronize(state,
+ child,
+ associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
- tmp.update((c, state)
+ tmp.update((c, state)
for c in history.added + history.deleted)
if need_cascade_pks:
for child in history.unchanged:
associationrow = {}
- sync.update(state,
- self.parent,
- associationrow,
- "old_",
+ sync.update(state,
+ self.parent,
+ associationrow,
+ "old_",
self.prop.synchronize_pairs)
- sync.update(child,
- self.mapper,
- associationrow,
- "old_",
+ sync.update(child,
+ self.mapper,
+ associationrow,
+ "old_",
self.prop.secondary_synchronize_pairs)
secondary_update.append(associationrow)
if processed is not None:
processed.update(tmp)
- self._run_crud(uowcommit, secondary_insert,
+ self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
- def _run_crud(self, uowcommit, secondary_insert,
+ def _run_crud(self, uowcommit, secondary_insert,
secondary_update, secondary_delete):
connection = uowcommit.transaction.connection(self.mapper)
if secondary_delete:
associationrow = secondary_delete[0]
statement = self.secondary.delete(sql.and_(*[
- c == sql.bindparam(c.key, type_=c.type)
- for c in self.secondary.c
+ c == sql.bindparam(c.key, type_=c.type)
+ for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_delete)
result.rowcount != len(secondary_delete):
raise exc.StaleDataError(
"DELETE statement on table '%s' expected to delete %d row(s); "
- "Only %d were matched." %
+ "Only %d were matched." %
(self.secondary.description, len(secondary_delete),
result.rowcount)
)
if secondary_update:
associationrow = secondary_update[0]
statement = self.secondary.update(sql.and_(*[
- c == sql.bindparam("old_" + c.key, type_=c.type)
- for c in self.secondary.c
+ c == sql.bindparam("old_" + c.key, type_=c.type)
+ for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_update)
result.rowcount != len(secondary_update):
raise exc.StaleDataError(
"UPDATE statement on table '%s' expected to update %d row(s); "
- "Only %d were matched." %
+ "Only %d were matched." %
(self.secondary.description, len(secondary_update),
result.rowcount)
)
statement = self.secondary.insert()
connection.execute(statement, secondary_insert)
- def _synchronize(self, state, child, associationrow,
+ def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit, operation):
if associationrow is None:
return
if not child.deleted:
util.warn(
"Object of type %s not in session, %s "
- "operation along '%s' won't proceed" %
+ "operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return False
self._verify_canload(child)
- sync.populate_dict(state, self.parent, associationrow,
+ sync.populate_dict(state, self.parent, associationrow,
self.prop.synchronize_pairs)
sync.populate_dict(child, self.mapper, associationrow,
self.prop.secondary_synchronize_pairs)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
- uowcommit,
- state,
- self.parent,
+ uowcommit,
+ state,
+ self.parent,
self.prop.synchronize_pairs)
_direction_to_processor = {
attribute, as well as that attribute as it appears on individual
instances of the class, including attribute instrumentation,
attribute access, loading behavior, and dependency calculations.
-
+
The most common occurrences of :class:`.MapperProperty` are the
- mapped :class:`.Column`, which is represented in a mapping as
+ mapped :class:`.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`.relationship`,
represented in the mapping as an instance of :class:`.RelationshipProperty`.
-
+
"""
cascade = ()
pass
- def create_row_processor(self, context, path,
+ def create_row_processor(self, context, path,
mapper, row, adapter):
"""Return a 3-tuple consisting of three row processing functions.
"""
pass
- def per_property_preprocessors(self, uow):
- pass
def is_primary(self):
"""Return True if this ``MapperProperty``'s mapper is the
"""Return true if this collection contains any member that meets the
given criterion.
- The usual implementation of ``any()`` is
+ The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
- :param criterion: an optional ClauseElement formulated against the
+ :param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
- :param \**kwargs: key/value pairs corresponding to member class attribute
+ :param \**kwargs: key/value pairs corresponding to member class attribute
names which will be compared via equality to the corresponding
values.
"""Return true if this element references a member which meets the
given criterion.
- The usual implementation of ``has()`` is
+ The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
- :param criterion: an optional ClauseElement formulated against the
+ :param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
- :param \**kwargs: key/value pairs corresponding to member class attribute
+ :param \**kwargs: key/value pairs corresponding to member class attribute
names which will be compared via equality to the corresponding
values.
def setup(self, context, entity, path, adapter, **kwargs):
self._get_context_strategy(context, path).\
- setup_query(context, entity, path,
+ setup_query(context, entity, path,
adapter, **kwargs)
def create_row_processor(self, context, path, mapper, row, adapter):
return self._get_context_strategy(context, path).\
- create_row_processor(context, path,
+ create_row_processor(context, path,
mapper, row, adapter)
def do_init(self):
"""Describe a modification to a Query."""
propagate_to_loaders = False
- """if True, indicate this option should be carried along
+ """if True, indicate this option should be carried along
Query object generated by scalar or object lazy loaders.
"""
else:
raise sa_exc.ArgumentError(
"Can't find property '%s' on any entity "
- "specified in this Query. Note the full path "
- "from root (%s) to target entity must be specified."
- % (token, ",".join(str(x) for
+ "specified in this Query. Note the full path "
+ "from root (%s) to target entity must be specified."
+ % (token, ",".join(str(x) for
x in query._mapper_entities))
)
else:
def _process_paths(self, query, raiseerr):
"""reconcile the 'key' for this PropertyOption with
the current path and entities of the query.
-
+
Return a list of affected paths.
-
+
"""
path = orm_util.PathRegistry.root
entity = None
paths = []
no_result = []
- # _current_path implies we're in a
+ # _current_path implies we're in a
# secondary load with an existing path
current_path = list(query._current_path.path)
if not entity:
entity = self._find_entity_basestring(
- query,
- token,
+ query,
+ token,
raiseerr)
if entity is None:
return no_result
if not entity:
entity = self._find_entity_prop_comparator(
query,
- prop.key,
- token.parententity,
+ prop.key,
+ token.parententity,
raiseerr)
if not entity:
return no_result
path_element = mapper = ext_info.mapper
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
- ext_info.mapper.base_mapper,
+ ext_info.mapper.base_mapper,
ext_info.mapper, aliased=True)
ext_info = orm_util._extended_entity_info(ac)
path.set(query, "path_with_polymorphic", ext_info)
)
if current_path:
- # ran out of tokens before
+ # ran out of tokens before
# current_path was exhausted.
assert not tokens
return no_result
def setup_query(self, context, entity, path, adapter, **kwargs):
pass
- def create_row_processor(self, context, path, mapper,
+ def create_row_processor(self, context, path, mapper,
row, adapter):
"""Return row processing functions which fulfill the contract
specified by MapperProperty.create_row_processor.
from .. import sql, util, log, exc as sa_exc, event, schema
from ..sql import expression, visitors, operators, util as sql_util
from . import instrumentation, attributes, \
- exc as orm_exc, unitofwork, events, loading
+ exc as orm_exc, events, loading
from .interfaces import MapperProperty
from .util import _INSTRUMENTOR, _class_to_mapper, \
self.always_refresh = always_refresh
self.version_id_col = version_id_col
self.version_id_generator = version_id_generator or \
- (lambda x:(x or 0) + 1)
+ (lambda x: (x or 0) + 1)
self.concrete = concrete
self.single = False
self.inherits = inherits
self.batch = batch
self.eager_defaults = eager_defaults
self.column_prefix = column_prefix
- self.polymorphic_on = expression._clause_element_as_expr(polymorphic_on)
+ self.polymorphic_on = expression._clause_element_as_expr(
+ polymorphic_on)
self._dependency_processors = []
self.validators = util.immutabledict()
self.passive_updates = passive_updates
ret[t] = table_to_mapper[t]
return ret
- def _per_mapper_flush_actions(self, uow):
- saves = unitofwork.SaveUpdateAll(uow, self.base_mapper)
- deletes = unitofwork.DeleteAll(uow, self.base_mapper)
- uow.dependencies.add((saves, deletes))
-
- for dep in self._dependency_processors:
- dep.per_property_preprocessors(uow)
-
- for prop in self._props.values():
- prop.per_property_preprocessors(uow)
-
- def _per_state_flush_actions(self, uow, states, isdelete):
-
- base_mapper = self.base_mapper
- save_all = unitofwork.SaveUpdateAll(uow, base_mapper)
- delete_all = unitofwork.DeleteAll(uow, base_mapper)
- for state in states:
- # keep saves before deletes -
- # this ensures 'row switch' operations work
- if isdelete:
- action = unitofwork.DeleteState(uow, state, base_mapper)
- uow.dependencies.add((save_all, action))
- else:
- action = unitofwork.SaveUpdateState(uow, state, base_mapper)
- uow.dependencies.add((action, delete_all))
-
- yield action
-
def _memo(self, key, callable_):
if key in self._memoized_values:
return self._memoized_values[key]
from .. import sql, util, log, exc as sa_exc
from ..sql import operators, expression
from . import (
- attributes, dependency, mapper,
- strategies, configure_mappers, relationships
- )
-from .util import (
- CascadeOptions, \
- _orm_annotate, _orm_deannotate, _orm_full_deannotate,
- _entity_info
+ attributes, mapper,
+ strategies, configure_mappers, relationships,
+ dependency
)
+from .util import CascadeOptions, \
+ _orm_annotate, _orm_deannotate, _orm_full_deannotate, _entity_info
+
+from .interfaces import MANYTOMANY, MANYTOONE, ONETOMANY,\
+ PropComparator, StrategizedProperty
-from .interfaces import (
- MANYTOMANY, MANYTOONE, MapperProperty, ONETOMANY,
- PropComparator, StrategizedProperty
- )
mapperlib = util.importlater("sqlalchemy.orm", "mapperlib")
NoneType = type(None)
from descriptor_props import CompositeProperty, SynonymProperty, \
- ComparableProperty,ConcreteInheritedProperty
+ ComparableProperty, ConcreteInheritedProperty
-__all__ = ('ColumnProperty', 'CompositeProperty', 'SynonymProperty',
- 'ComparableProperty', 'RelationshipProperty', 'RelationProperty')
+__all__ = ['ColumnProperty', 'CompositeProperty', 'SynonymProperty',
+ 'ComparableProperty', 'RelationshipProperty', 'RelationProperty']
class ColumnProperty(StrategizedProperty):
"""
self._orig_columns = [expression._labeled(c) for c in columns]
- self.columns = [expression._labeled(_orm_full_deannotate(c))
+ self.columns = [expression._labeled(_orm_full_deannotate(c))
for c in columns]
self.group = kwargs.pop('group', None)
self.deferred = kwargs.pop('deferred', False)
if kwargs:
raise TypeError(
"%s received unexpected keyword argument(s): %s" % (
- self.__class__.__name__,
+ self.__class__.__name__,
', '.join(sorted(kwargs.keys()))))
util.set_creation_order(self)
return
attributes.register_descriptor(
- mapper.class_,
- self.key,
- comparator=self.comparator_factory(self, mapper),
+ mapper.class_,
+ self.key,
+ comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc
)
def copy(self):
return ColumnProperty(
- deferred=self.deferred,
- group=self.group,
+ deferred=self.deferred,
+ group=self.group,
active_history=self.active_history,
*self.columns)
- def _getcommitted(self, state, dict_, column,
+ def _getcommitted(self, state, dict_, column,
passive=attributes.PASSIVE_OFF):
return state.get_impl(self.key).\
get_committed_value(state, dict_, passive=passive)
- def merge(self, session, source_state, source_dict, dest_state,
+ def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive):
if not self.instrument:
return
def __init__(self, argument,
secondary=None, primaryjoin=None,
- secondaryjoin=None,
+ secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
active_history=False,
cascade_backrefs=True,
load_on_pending=False,
- strategy_class=None, _local_remote_pairs=None,
+ strategy_class=None, _local_remote_pairs=None,
query_class=None):
self.uselist = uselist
self.cascade = CascadeOptions("save-update, merge")
if self.passive_deletes == 'all' and \
- ("delete" in self.cascade or
+ ("delete" in self.cascade or
"delete-orphan" in self.cascade):
raise sa_exc.ArgumentError(
"Can't set passive_deletes='all' in conjunction "
def instrument_class(self, mapper):
attributes.register_descriptor(
- mapper.class_,
- self.key,
- comparator=self.comparator_factory(self, mapper),
+ mapper.class_,
+ self.key,
+ comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
"""
return RelationshipProperty.Comparator(
- self.property,
- self.mapper,
+ self.property,
+ self.mapper,
cls, adapter=self.adapter)
def in_(self, other):
- """Produce an IN clause - this is not implemented
+ """Produce an IN clause - this is not implemented
for :func:`~.orm.relationship`-based attributes at this time.
"""
mytable.related_id == <some id>
- Where ``<some id>`` is the primary key of the given
+ Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
- * Compared to a scalar one-to-many, will produce a
+ * Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
- the given target.
+ the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
- # only have the "joined left side" of what we
+ # only have the "joined left side" of what we
# return be subject to Query adaption. The right
- # side of it is used for an exists() subquery and
+ # side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
Will produce a query like::
SELECT * FROM my_table WHERE
- EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
+ EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
- """Return a simple expression that tests a collection for
+ """Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
:func:`~.orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
- When used in a simple one-to-many context, an
+ When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
adapt(x) == None)
for (x, y) in self.property.local_remote_pairs])
- criterion = sql.and_(*[x==y for (x, y) in
+ criterion = sql.and_(*[x==y for (x, y) in
zip(
self.property.mapper.primary_key,
self.property.\
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`~.expression.not_`.
- * Compared to a scalar one-to-many, will produce a
+ * Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
- the given target.
+ the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
configure_mappers()
return self.prop
- def compare(self, op, value,
- value_is_parent=False,
+ def compare(self, op, value,
+ value_is_parent=False,
alias_secondary=True):
if op == operators.eq:
if value is None:
if self.uselist:
return ~sql.exists([1], self.primaryjoin)
else:
- return self._optimized_compare(None,
+ return self._optimized_compare(None,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
- return self._optimized_compare(value,
+ return self._optimized_compare(value,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return op(self.comparator, value)
- def _optimized_compare(self, value, value_is_parent=False,
- adapt_source=None,
+ def _optimized_compare(self, value, value_is_parent=False,
+ adapt_source=None,
alias_secondary=True):
if value is not None:
value = attributes.instance_state(value)
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
- def merge(self,
+ def merge(self,
session,
source_state,
source_dict,
dest_state,
- dest_dict,
+ dest_dict,
load, _recursive):
if load:
dest_state.get_impl(self.key).set(dest_state,
dest_dict, obj, None)
- def _value_as_iterable(self, state, dict_, key,
+ def _value_as_iterable(self, state, dict_, key,
passive=attributes.PASSIVE_OFF):
"""Return a list of tuples (state, obj) for the given
key.
return []
elif hasattr(impl, 'get_collection'):
return [
- (attributes.instance_state(o), o) for o in
+ (attributes.instance_state(o), o) for o in
impl.get_collection(state, dict_, x, passive=passive)
]
else:
raise AssertionError("Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'" % (
- self.key,
- self.parent.class_,
+ self.key,
+ self.parent.class_,
c.__class__
))
@util.memoized_property
def mapper(self):
- """Return the targeted :class:`.Mapper` for this
+ """Return the targeted :class:`.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
@util.memoized_property
@util.deprecated("0.7", "Use .target")
def table(self):
- """Return the selectable linked to this
- :class:`.RelationshipProperty` object's target
+ """Return the selectable linked to this
+ :class:`.RelationshipProperty` object's target
:class:`.Mapper`."""
return self.target
super(RelationshipProperty, self).do_init()
def _process_dependent_arguments(self):
- """Convert incoming configuration arguments to their
+ """Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
- expression._only_column_elements(x, "order_by")
+ expression._only_column_elements(x, "order_by")
for x in
util.to_list(self.order_by)]
self._user_defined_foreign_keys = \
util.column_set(
- expression._only_column_elements(x, "foreign_keys")
+ expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(
self._user_defined_foreign_keys
))
self.remote_side = \
util.column_set(
- expression._only_column_elements(x, "remote_side")
+ expression._only_column_elements(x, "remote_side")
for x in
util.to_column_set(self.remote_side))
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
def _check_conflicts(self):
- """Test that this relationship is legal, warn about
+ """Test that this relationship is legal, warn about
inheritance conflicts."""
if not self.is_primary() \
% self)
def _columns_are_mapped(self, *cols):
- """Return True if all columns in the given collection are
+ """Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
return True
def _generate_backref(self):
- """Interpret the 'backref' instruction to create a
+ """Interpret the 'backref' instruction to create a
:func:`.relationship` complementary to this one."""
if not self.is_primary():
pj = kwargs.pop('primaryjoin', self._join_condition.secondaryjoin)
sj = kwargs.pop('secondaryjoin', self._join_condition.primaryjoin)
else:
- pj = kwargs.pop('primaryjoin',
+ pj = kwargs.pop('primaryjoin',
self._join_condition.primaryjoin_reverse_remote)
sj = kwargs.pop('secondaryjoin', None)
if sj:
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
- def per_property_preprocessors(self, uow):
- if not self.viewonly and self._dependency_processor:
- self._dependency_processor.per_property_preprocessors(uow)
-
- def _create_joins(self, source_polymorphic=False,
- source_selectable=None, dest_polymorphic=False,
+ def _create_joins(self, source_polymorphic=False,
+ source_selectable=None, dest_polymorphic=False,
dest_selectable=None, of_type=None):
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
from .. import util, event
from ..util import topological
-from . import attributes, interfaces, persistence, util as orm_util
-session = util.importlater("sqlalchemy.orm", "session")
+from . import attributes, persistence, util as orm_util
+
+sessionlib = util.importlater("sqlalchemy.orm", "session")
def track_cascade_events(descriptor, prop):
"""Establish event listeners on object attributes which handle
key = prop.key
def append(state, item, initiator):
- # process "save_update" cascade rules for when
+ # process "save_update" cascade rules for when
# an instance is appended to the list of another instance
- sess = session._state_session(state)
+ sess = sessionlib._state_session(state)
if sess:
prop = state.manager.mapper._props[key]
item_state = attributes.instance_state(item)
return item
def remove(state, item, initiator):
- sess = session._state_session(state)
+ sess = sessionlib._state_session(state)
if sess:
prop = state.manager.mapper._props[key]
# expunge pending orphans
sess.expunge(item)
def set_(state, newvalue, oldvalue, initiator):
- # process "save_update" cascade rules for when an instance
+ # process "save_update" cascade rules for when an instance
# is attached to another instance
if oldvalue is newvalue:
return newvalue
- sess = session._state_session(state)
+ sess = sessionlib._state_session(state)
if sess:
prop = state.manager.mapper._props[key]
if newvalue is not None:
def __init__(self, session):
self.session = session
- # dictionary used by external actors to
+ # dictionary used by external actors to
# store arbitrary state information.
self.attributes = {}
- # dictionary of mappers to sets of
- # DependencyProcessors, which are also
+ # dictionary of mappers to sets of
+ # DependencyProcessors, which are also
# set to be part of the sorted flush actions,
# which have that mapper as a parent.
self.deps = util.defaultdict(set)
# and determine if a flush action is needed
self.presort_actions = {}
- # dictionary of PostSortRec objects, each
+ # dictionary of PostSortRec objects, each
# one issues work during the flush within
# a certain ordering.
self.postsort_actions = {}
# tracks InstanceStates which will be receiving
# a "post update" call. Keys are mappers,
- # values are a set of states and a set of the
+ # values are a set of states and a set of the
# columns which should be included in the update.
self.post_update_states = util.defaultdict(lambda: (set(), set()))
return bool(self.states)
def is_deleted(self, state):
- """return true if the given state is marked as deleted
+ """return true if the given state is marked as deleted
within this uowtransaction."""
return state in self.states and self.states[state][0]
self.states[state] = (isdelete, True)
- def get_attribute_history(self, state, key,
+ def get_attribute_history(self, state, key,
passive=attributes.PASSIVE_NO_INITIALIZE):
- """facade to attributes.get_state_history(), including caching of results."""
+ """facade to attributes.get_state_history(), including
+ caching of results."""
hashkey = ("history", state, key)
if hashkey in self.attributes:
history, state_history, cached_passive = self.attributes[hashkey]
- # if the cached lookup was "passive" and now
+ # if the cached lookup was "passive" and now
# we want non-passive, do a non-passive lookup and re-cache
if not cached_passive & attributes.SQL_OK \
and passive & attributes.SQL_OK:
impl = state.manager[key].impl
- history = impl.get_history(state, state.dict,
+ history = impl.get_history(state, state.dict,
attributes.PASSIVE_OFF)
if history and impl.uses_objects:
state_history = history.as_state()
if key not in self.presort_actions:
self.presort_actions[key] = Preprocess(processor, fromparent)
- def register_object(self, state, isdelete=False,
+ def register_object(self, state, isdelete=False,
listonly=False, cancel_delete=False,
operation=None, prop=None):
if not self.session._contains_state(state):
if not state.deleted and operation is not None:
util.warn("Object of type %s not in session, %s operation "
- "along '%s' will not proceed" %
+ "along '%s' will not proceed" %
(orm_util.state_class_str(state), operation, prop))
return False
mapper = state.manager.mapper
if mapper not in self.mappers:
- mapper._per_mapper_flush_actions(self)
+ self._per_mapper_flush_actions(mapper)
self.mappers[mapper].add(state)
self.states[state] = (isdelete, listonly)
states.add(state)
cols.update(post_update_cols)
+ def _per_mapper_flush_actions(self, mapper):
+ saves = SaveUpdateAll(self, mapper.base_mapper)
+ deletes = DeleteAll(self, mapper.base_mapper)
+ self.dependencies.add((saves, deletes))
+
+ for dep in mapper._dependency_processors:
+ dep.per_property_preprocessors(self)
+
+ for prop in mapper.relationships:
+ if prop.viewonly:
+ continue
+ dep = prop._dependency_processor
+ dep.per_property_preprocessors(self)
+
@util.memoized_property
def _mapper_for_dep(self):
- """return a dynamic mapping of (Mapper, DependencyProcessor) to
- True or False, indicating if the DependencyProcessor operates
+ """return a dynamic mapping of (Mapper, DependencyProcessor) to
+ True or False, indicating if the DependencyProcessor operates
on objects of that Mapper.
The result is stored in the dictionary persistently once
"""
return util.PopulateDict(
- lambda tup:tup[0]._props.get(tup[1].key) is tup[1].prop
+ lambda tup: tup[0]._props.get(tup[1].key) is tup[1].prop
)
def filter_states_for_dep(self, dep, states):
- """Filter the given list of InstanceStates to those relevant to the
+ """Filter the given list of InstanceStates to those relevant to the
given DependencyProcessor.
"""
# see if the graph of mapper dependencies has cycles.
self.cycles = cycles = topological.find_cycles(
- self.dependencies,
+ self.dependencies,
self.postsort_actions.values())
if cycles:
# execute
if self.cycles:
for set_ in topological.sort_as_subsets(
- self.dependencies,
+ self.dependencies,
postsort_actions):
while set_:
n = set_.pop()
n.execute_aggregate(self, set_)
else:
for rec in topological.sort(
- self.dependencies,
+ self.dependencies,
postsort_actions):
rec.execute(self)
def finalize_flush_changes(self):
- """mark processed objects as clean / deleted after a successful flush().
+ """mark processed objects as clean / deleted after a successful
+ flush().
this method is called within the flush() method after the
execute() method has succeeded and the transaction has been committed.
def _mappers(self, uow):
if self.fromparent:
return iter(
- m for m in self.dependency_processor.parent.self_and_descendants
+ m for m in
+ self.dependency_processor.parent.self_and_descendants
if uow._mapper_for_dep[(m, self.dependency_processor)]
)
else:
self.dependency_processor = dependency_processor
self.delete = delete
self.fromparent = fromparent
- uow.deps[dependency_processor.parent.base_mapper].add(dependency_processor)
+ uow.deps[dependency_processor.parent.base_mapper].\
+ add(dependency_processor)
def execute(self, uow):
states = self._elements(uow)
assert mapper is mapper.base_mapper
def execute(self, uow):
- persistence.save_obj(self.mapper,
+ persistence.save_obj(self.mapper,
uow.states_for_mapper_hierarchy(self.mapper, False, False),
uow
)
+
def per_state_flush_actions(self, uow):
- states = list(uow.states_for_mapper_hierarchy(self.mapper, False, False))
- for rec in self.mapper._per_state_flush_actions(
- uow,
- states,
- False):
- yield rec
+ states = list(uow.states_for_mapper_hierarchy(
+ self.mapper, False, False))
+ base_mapper = self.mapper.base_mapper
+ delete_all = DeleteAll(uow, base_mapper)
+ for state in states:
+ # keep saves before deletes -
+ # this ensures 'row switch' operations work
+ action = SaveUpdateState(uow, state, base_mapper)
+ uow.dependencies.add((action, delete_all))
+ yield action
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
)
def per_state_flush_actions(self, uow):
- states = list(uow.states_for_mapper_hierarchy(self.mapper, True, False))
- for rec in self.mapper._per_state_flush_actions(
- uow,
- states,
- True):
- yield rec
+ states = list(uow.states_for_mapper_hierarchy(
+ self.mapper, True, False))
+ base_mapper = self.mapper.base_mapper
+ save_all = SaveUpdateAll(uow, base_mapper)
+ for state in states:
+ # keep saves before deletes -
+ # this ensures 'row switch' operations work
+ action = DeleteState(uow, state, base_mapper)
+ uow.dependencies.add((save_all, action))
+ yield action
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
cls_ = self.__class__
dependency_processor = self.dependency_processor
delete = self.delete
- our_recs = [r for r in recs
- if r.__class__ is cls_ and
+ our_recs = [r for r in recs
+ if r.__class__ is cls_ and
r.dependency_processor is dependency_processor and
r.delete is delete]
recs.difference_update(our_recs)
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
- our_recs = [r for r in recs
- if r.__class__ is cls_ and
+ our_recs = [r for r in recs
+ if r.__class__ is cls_ and
r.mapper is mapper]
recs.difference_update(our_recs)
persistence.save_obj(mapper,
- [self.state] +
- [r.state for r in our_recs],
+ [self.state] +
+ [r.state for r in our_recs],
uow)
def __repr__(self):
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
- our_recs = [r for r in recs
- if r.__class__ is cls_ and
+ our_recs = [r for r in recs
+ if r.__class__ is cls_ and
r.mapper is mapper]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
persistence.delete_obj(mapper,
- [s for s in states if uow.states[s][0]],
+ [s for s in states if uow.states[s][0]],
uow)
def __repr__(self):
def test_profile_1_create_tables(self):
self.test_baseline_1_create_tables()
- @profiling.function_call_count(5786, {'2.7+cextension':5683,
+ @profiling.function_call_count(5786, {'2.7+cextension':5683,
'2.6+cextension':5992})
def test_profile_1a_populate(self):
self.test_baseline_1a_populate()
- @profiling.function_call_count(413, {'3.2':398})
+ @profiling.function_call_count(388, {'3.2':378})
def test_profile_2_insert(self):
self.test_baseline_2_insert()
addresses = Table('addresses', metadata,
Column('email', String(50), primary_key=True),
- Column('username', String(50),
+ Column('username', String(50),
ForeignKey('users.username', **fk_args)),
test_needs_fk=True)
items = Table('items', metadata,
Column('itemname', String(50), primary_key=True),
- Column('description', String(100)),
+ Column('description', String(100)),
test_needs_fk=True)
users_to_items = Table('users_to_items', metadata,
- Column('username', String(50),
+ Column('username', String(50),
ForeignKey('users.username', **fk_args),
primary_key=True),
- Column('itemname', String(50),
+ Column('itemname', String(50),
ForeignKey('items.itemname', **fk_args),
primary_key=True),
test_needs_fk=True)
def go():
sess.flush()
if not passive_updates:
- # test passive_updates=False;
+ # test passive_updates=False;
#load addresses, update user, update 2 addresses
- self.assert_sql_count(testing.db, go, 4)
+ self.assert_sql_count(testing.db, go, 4)
else:
# test passive_updates=True; update user
- self.assert_sql_count(testing.db, go, 1)
+ self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
assert User(username='jack', addresses=[
- Address(username='jack'),
+ Address(username='jack'),
Address(username='jack')]) == \
sess.query(User).get('jack')
u1 = sess.query(User).get('fred')
eq_(User(username='fred', fullname='jack'), u1)
-
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_manytoone_passive(self):
def test_manytoone_nonpassive(self):
self._test_manytoone(False)
+ def test_manytoone_nonpassive_cold_mapping(self):
+ """test that the mapper-level m2o dependency processor
+ is set up even if the opposite side relationship
+ hasn't yet been part of a flush.
+
+ """
+ users, Address, addresses, User = (self.tables.users,
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User)
+
+ with testing.db.begin() as conn:
+ conn.execute(users.insert(),
+ username='jack', fullname='jack'
+ )
+ conn.execute(addresses.insert(),
+ email='jack1', username='jack'
+ )
+ conn.execute(addresses.insert(),
+ email='jack2', username='jack'
+ )
+
+ mapper(User, users)
+ mapper(Address, addresses, properties={
+ 'user': relationship(User,
+ passive_updates=False)
+ })
+
+ sess = create_session()
+ u1 = sess.query(User).first()
+ a1, a2 = sess.query(Address).all()
+ u1.username = 'ed'
+
+ def go():
+ sess.flush()
+ self.assert_sql_count(testing.db, go, 3)
+
def _test_manytoone(self, passive_updates):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
mapper(User, users)
mapper(Address, addresses, properties={
- 'user':relationship(User, passive_updates=passive_updates)
+ 'user': relationship(User, passive_updates=passive_updates)
})
sess = create_session()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
+
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_onetoone_passive(self):
def test_manytomany_passive(self):
self._test_manytomany(True)
- # mysqldb executemany() of the association table fails to
+ # mysqldb executemany() of the association table fails to
# report the correct row count
- @testing.fails_if(lambda: testing.against('mysql')
+ @testing.fails_if(lambda: testing.against('mysql')
and not testing.against('+zxjdbc'))
def test_manytomany_nonpassive(self):
self._test_manytomany(False)
session.add(a_editable)
session.commit()
- # do the switch in both directions -
+ # do the switch in both directions -
# one or the other should raise the error
# based on platform dictionary ordering
a_published.status = ARCHIVED
class SelfReferentialTest(fixtures.MappedTest):
- # mssql, mysql don't allow
+ # mssql, mysql don't allow
# ON UPDATE on self-referential keys
- __unsupported_on__ = ('mssql','mysql')
+ __unsupported_on__ = ('mssql','mysql')
@classmethod
def define_tables(cls, metadata):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
- 'parentnode':relationship(Node,
- remote_side=nodes.c.name,
+ 'parentnode':relationship(Node,
+ remote_side=nodes.c.name,
passive_updates=passive)
}
)
u1.username = 'ed'
sess.flush()
assert u1.addresses[0].username == 'ed'
- eq_(sa.select([addresses.c.username]).execute().fetchall(),
+ eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('ed',), ('ed',)])
sess.expunge_all()
def go():
sess.flush()
if not passive_updates:
- # test passive_updates=False; load addresses,
+ # test passive_updates=False; load addresses,
# update user, update 2 addresses
- self.assert_sql_count(testing.db, go, 4)
+ self.assert_sql_count(testing.db, go, 4)
else:
# test passive_updates=True; update user
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
- assert User(username='jack',
+ assert User(username='jack',
addresses=[Address(username='jack'),
Address(username='jack')]) == \
sess.query(User).get(u1.id)
a1 = sess.query(Address).get(a1.id)
eq_(a1.username, None)
- eq_(sa.select([addresses.c.username]).execute().fetchall(),
+ eq_(sa.select([addresses.c.username]).execute().fetchall(),
[(None,), (None,)])
u1 = sess.query(User).get(u1.id)
test_needs_fk=True)
Table('addresses', metadata,
- Column('username', String(50),
+ Column('username', String(50),
ForeignKey('users.username', **fk_args),
primary_key=True
),
def _test_o2m_change(self, passive_updates):
"""Change the PK of a related entity to another.
- "on update cascade" is not involved here, so the mapper has
+ "on update cascade" is not involved here, so the mapper has
to do the UPDATE itself.
"""
def _test_onetomany(self, passive_updates):
"""Change the PK of a related entity via foreign key cascade.
- For databases that require "on update cascade", the mapper
+ For databases that require "on update cascade", the mapper
has to identify the row by the new value, not the old, when
it does the update.
sess.flush()
eq_(a1.username, 'ed')
eq_(a2.username, 'ed')
- eq_(sa.select([addresses.c.username]).execute().fetchall(),
+ eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('ed',), ('ed',)])
u1.username = 'jack'
"""Test cascades of pk->pk/fk on joined table inh."""
# mssql doesn't allow ON UPDATE on self-referential keys
- __unsupported_on__ = ('mssql',)
+ __unsupported_on__ = ('mssql',)
__requires__ = 'skip_mysql_on_windows',
Column('name', String(50), ForeignKey('person.name', **fk_args),
primary_key=True),
Column('primary_language', String(50)),
- Column('boss_name', String(50),
+ Column('boss_name', String(50),
ForeignKey('manager.name', **fk_args)),
test_needs_fk=True
)
Table('manager', metadata,
- Column('name', String(50),
+ Column('name', String(50),
ForeignKey('person.name', **fk_args),
primary_key=True),
Column('paperwork', String(50)),
self.classes.Engineer,
self.tables.engineer)
- mapper(Person, person, polymorphic_on=person.c.type,
+ mapper(Person, person, polymorphic_on=person.c.type,
polymorphic_identity='person',
passive_updates=passive_updates)
mapper(Engineer, engineer, inherits=Person,
polymorphic_identity='engineer', properties={
- 'boss':relationship(Manager,
+ 'boss':relationship(Manager,
primaryjoin=manager.c.name==engineer.c.boss_name,
passive_updates=passive_updates
)
self.classes.Engineer,
self.tables.engineer)
- mapper(Person, person, polymorphic_on=person.c.type,
+ mapper(Person, person, polymorphic_on=person.c.type,
polymorphic_identity='person',
passive_updates=passive_updates)
mapper(Engineer, engineer, inherits=Person,
polymorphic_identity='engineer', properties={
- 'boss':relationship(Manager,
+ 'boss':relationship(Manager,
primaryjoin=manager.c.name==engineer.c.boss_name,
passive_updates=passive_updates
)