hex identifier string. [ticket:1555]
- The visit_pool() method of Dialect is removed, and replaced with
- on_connect(). This method returns a callable which receives
+ connect(). This method returns a callable which receives
the raw DBAPI connection after each one is created. The callable
is assembled into a first_connect/connect pool listener by the
connection strategy if non-None. Provides a simpler interface
joined-table inheritance subclasses, using explicit join
criteria (i.e. not on a relation).
- - @orm.attributes.on_reconstitute and
- MapperExtension.on_reconstitute have been renamed to
+ - @orm.attributes.reconstitute and
+ MapperExtension.reconstitute have been renamed to
@orm.reconstructor and MapperExtension.reconstruct_instance
- Fixed @reconstructor hook for subclasses which inherit from a
The name of an event and the argument signature of a corresponding listener function is derived from
a class bound specification method, which exists bound to a marker class that's described in the documentation.
-For example, the documentation for :meth:`.PoolEvents.on_connect` indicates that the event name is ``"on_connect"``
+For example, the documentation for :meth:`.PoolEvents.connect` indicates that the event name is ``"connect"``
and that a user-defined listener function should receive two positional arguments::
from sqlalchemy.event import listen
def my_on_connect(dbapi_con, connection_record):
print "New DBAPI connection:", dbapi_con
- listen(Pool, 'on_connect', my_on_connect)
+ listen(Pool, 'connect', my_on_connect)
Targets
-------
The :func:`.listen` function is very flexible regarding targets. It generally accepts classes, instances of those
classes, and related classes or objects from which the appropriate target can be derived. For example,
-the above mentioned ``"on_connect"`` event accepts :class:`.Engine` classes and objects as well as :class:`.Pool`
+the above mentioned ``"connect"`` event accepts :class:`.Engine` classes and objects as well as :class:`.Pool`
classes and objects::
from sqlalchemy.event import listen
my_engine = create_engine('postgresql://ed@localhost/test')
# associate listener with all instances of Pool
- listen(Pool, 'on_connect', my_on_connect)
+ listen(Pool, 'connect', my_on_connect)
# associate listener with all instances of Pool
# via the Engine class
- listen(Engine, 'on_connect', my_on_connect)
+ listen(Engine, 'connect', my_on_connect)
# associate listener with my_pool
- listen(my_pool, 'on_connect', my_on_connect)
+ listen(my_pool, 'connect', my_on_connect)
# associate listener with my_engine.pool
- listen(my_engine, 'on_connect', my_on_connect)
+ listen(my_engine, 'connect', my_on_connect)
Modifiers
----------
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
- listen(UserContact.phone, 'on_set', validate_phone, retval=True)
+ listen(UserContact.phone, 'set', validate_phone, retval=True)
Event Reference
----------------
event.listen(
users,
- "on_after_create",
+ "after_create",
AddConstraint(constraint)
)
event.listen(
users,
- "on_before_drop",
+ "before_drop",
DropConstraint(constraint)
)
event.listen(
users,
- 'on_after_create',
+ 'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql')
)
event.listen(
users,
- 'on_before_drop',
+ 'before_drop',
DropConstraint(constraint).execute_if(dialect='postgresql')
)
event.listen(
users,
- "on_after_create",
+ "after_create",
AddConstraint(constraint).execute_if(dialect=('postgresql', 'mysql'))
)
event.listen(
users,
- "on_before_drop",
+ "before_drop",
DropConstraint(constraint).execute_if(dialect=('postgresql', 'mysql'))
)
event.listen(
users,
- "on_after_create",
+ "after_create",
AddConstraint(constraint).execute_if(callable_=should_create)
)
event.listen(
users,
- "on_before_drop",
+ "before_drop",
DropConstraint(constraint).execute_if(callable_=should_drop)
)
event.listen(
metadata,
- "on_after_create",
+ "after_create",
DDL("ALTER TABLE users ADD CONSTRAINT "
"cst_user_name_length "
" CHECK (length(user_name) >= 8)")
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
- self.on_change()
+ self.change()
def __delitem__(self, key):
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
- self.on_change()
+ self.change()
# additional dict methods would be overridden here
It should be noted that insert and update defaults configured on individal
:class:`.Column` objects, such as those configured by the "default",
-"on_update", "server_default" and "server_onupdate" arguments, will continue
+"update", "server_default" and "server_onupdate" arguments, will continue
to function normally even if those :class:`.Column` objects are not mapped.
This functionality is part of the SQL expression and execution system and
occurs below the level of the ORM.
def set_(instance, value, oldvalue, initiator):
instance.receive_change_event("set", key, value, oldvalue)
- event.listen(inst, 'on_append', append)
- event.listen(inst, 'on_remove', remove)
- event.listen(inst, 'on_set', set_)
+ event.listen(inst, 'append', append)
+ event.listen(inst, 'remove', remove)
+ event.listen(inst, 'set', set_)
if __name__ == '__main__':
Base = declarative_base(cls=Base)
- event.listen(Base, 'on_attribute_instrument', configure_listener)
+ event.listen(Base, 'attribute_instrument', configure_listener)
class MyMappedClass(Base):
__tablename__ = "mytable"
raise NotImplementedError()
- def on_connect(self):
+ def connect(self):
"""return a callable which sets up a newly created DBAPI connection.
The callable accepts a single argument "conn" which is the
"""
class EventListenerConnection(cls):
def execute(self, clauseelement, *multiparams, **params):
- for fn in dispatch.on_before_execute:
+ for fn in dispatch.before_execute:
clauseelement, multiparams, params = \
fn(self, clauseelement, multiparams, params)
ret = super(EventListenerConnection, self).\
execute(clauseelement, *multiparams, **params)
- for fn in dispatch.on_after_execute:
+ for fn in dispatch.after_execute:
fn(self, clauseelement, multiparams, params, ret)
return ret
def _before_cursor_execute(self, context, cursor,
statement, parameters):
- for fn in dispatch.on_before_cursor_execute:
+ for fn in dispatch.before_cursor_execute:
statement, parameters = \
fn(self, cursor, statement, parameters,
context, context.executemany)
def _after_cursor_execute(self, context, cursor,
statement, parameters):
- dispatch.on_after_cursor_execute(self, cursor,
+ dispatch.after_cursor_execute(self, cursor,
statement,
parameters,
context,
context.executemany)
def _begin_impl(self):
- dispatch.on_begin(self)
+ dispatch.begin(self)
return super(EventListenerConnection, self).\
_begin_impl()
def _rollback_impl(self):
- dispatch.on_rollback(self)
+ dispatch.rollback(self)
return super(EventListenerConnection, self).\
_rollback_impl()
def _commit_impl(self):
- dispatch.on_commit(self)
+ dispatch.commit(self)
return super(EventListenerConnection, self).\
_commit_impl()
def _savepoint_impl(self, name=None):
- dispatch.on_savepoint(self, name)
+ dispatch.savepoint(self, name)
return super(EventListenerConnection, self).\
_savepoint_impl(name=name)
def _rollback_to_savepoint_impl(self, name, context):
- dispatch.on_rollback_savepoint(self, name, context)
+ dispatch.rollback_savepoint(self, name, context)
return super(EventListenerConnection, self).\
_rollback_to_savepoint_impl(name, context)
def _release_savepoint_impl(self, name, context):
- dispatch.on_release_savepoint(self, name, context)
+ dispatch.release_savepoint(self, name, context)
return super(EventListenerConnection, self).\
_release_savepoint_impl(name, context)
def _begin_twophase_impl(self, xid):
- dispatch.on_begin_twophase(self, xid)
+ dispatch.begin_twophase(self, xid)
return super(EventListenerConnection, self).\
_begin_twophase_impl(xid)
def _prepare_twophase_impl(self, xid):
- dispatch.on_prepare_twophase(self, xid)
+ dispatch.prepare_twophase(self, xid)
return super(EventListenerConnection, self).\
_prepare_twophase_impl(xid)
def _rollback_twophase_impl(self, xid, is_prepared):
- dispatch.on_rollback_twophase(self, xid)
+ dispatch.rollback_twophase(self, xid)
return super(EventListenerConnection, self).\
_rollback_twophase_impl(xid, is_prepared)
def _commit_twophase_impl(self, xid, is_prepared):
- dispatch.on_commit_twophase(self, xid, is_prepared)
+ dispatch.commit_twophase(self, xid, is_prepared)
return super(EventListenerConnection, self).\
_commit_twophase_impl(xid, is_prepared)
tables = metadata.tables.values()
collection = [t for t in sql_util.sort_tables(tables) if self._can_create(t)]
- metadata.dispatch.on_before_create(metadata, self.connection,
+ metadata.dispatch.before_create(metadata, self.connection,
tables=collection)
for table in collection:
self.traverse_single(table, create_ok=True)
- metadata.dispatch.on_after_create(metadata, self.connection,
+ metadata.dispatch.after_create(metadata, self.connection,
tables=collection)
def visit_table(self, table, create_ok=False):
if not create_ok and not self._can_create(table):
return
- table.dispatch.on_before_create(table, self.connection)
+ table.dispatch.before_create(table, self.connection)
for column in table.columns:
if column.default is not None:
for index in table.indexes:
self.traverse_single(index)
- table.dispatch.on_after_create(table, self.connection)
+ table.dispatch.after_create(table, self.connection)
def visit_sequence(self, sequence):
if self.dialect.supports_sequences:
tables = metadata.tables.values()
collection = [t for t in reversed(sql_util.sort_tables(tables)) if self._can_drop(t)]
- metadata.dispatch.on_before_drop(metadata, self.connection,
+ metadata.dispatch.before_drop(metadata, self.connection,
tables=collection)
for table in collection:
self.traverse_single(table, drop_ok=True)
- metadata.dispatch.on_after_drop(metadata, self.connection,
+ metadata.dispatch.after_drop(metadata, self.connection,
tables=collection)
def _can_drop(self, table):
if not drop_ok and not self._can_drop(table):
return
- table.dispatch.on_before_drop(table, self.connection)
+ table.dispatch.before_drop(table, self.connection)
for column in table.columns:
if column.default is not None:
self.connection.execute(schema.DropTable(table))
- table.dispatch.on_after_drop(table, self.connection)
+ table.dispatch.after_drop(table, self.connection)
def visit_sequence(self, sequence):
if self.dialect.supports_sequences:
return
do_on_connect(conn)
- event.listen(pool, 'on_first_connect', on_connect)
- event.listen(pool, 'on_connect', on_connect)
+ event.listen(pool, 'first_connect', on_connect)
+ event.listen(pool, 'connect', on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(engine, connection=dbapi_connection)
dialect.initialize(c)
- event.listen(pool, 'on_first_connect', first_connect)
+ event.listen(pool, 'first_connect', first_connect)
return engine
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
- def on_after_create(target, connection, **kw):
+ def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
- event.listen(some_table, "on_after_create", on_after_create)
+ event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
from sqlalchemy import DDL
event.listen(
some_table,
- "on_after_create",
+ "after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
"""
- def on_before_create(self, target, connection, **kw):
+ def before_create(self, target, connection, **kw):
"""Called before CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
"""
- def on_after_create(self, target, connection, **kw):
+ def after_create(self, target, connection, **kw):
"""Called after CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
"""
- def on_before_drop(self, target, connection, **kw):
+ def before_drop(self, target, connection, **kw):
"""Called before DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
"""
- def on_after_drop(self, target, connection, **kw):
+ def after_drop(self, target, connection, **kw):
"""Called after DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
- events.listen(Pool, 'on_checkout', my_on_checkout)
+ events.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`.Pool` class and :class:`.Pool` instances,
:class:`.PoolEvents` also accepts :class:`.Engine` objects and
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
- events.listen(engine, 'on_checkout', my_on_checkout)
+ events.listen(engine, 'checkout', my_on_checkout)
"""
else:
return target
- def on_connect(self, dbapi_connection, connection_record):
+ def connect(self, dbapi_connection, connection_record):
"""Called once for each new DB-API connection or Pool's ``creator()``.
:param dbapi_con:
"""
- def on_first_connect(self, dbapi_connection, connection_record):
+ def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first DB-API connection.
:param dbapi_con:
"""
- def on_checkout(self, dbapi_connection, connection_record, connection_proxy):
+ def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_con:
using the new connection.
"""
- def on_checkin(self, dbapi_connection, connection_record):
+ def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
from sqlalchemy import event, create_engine
- def on_before_execute(conn, clauseelement, multiparams, params):
+ def before_execute(conn, clauseelement, multiparams, params):
log.info("Received statement: %s" % clauseelement)
engine = create_engine('postgresql://scott:tiger@localhost/test')
- event.listen(engine, "on_before_execute", on_before_execute)
+ event.listen(engine, "before_execute", before_execute)
Some events allow modifiers to the listen() function.
- :param retval=False: Applies to the :meth:`.on_before_execute` and
- :meth:`.on_before_cursor_execute` events only. When True, the
+ :param retval=False: Applies to the :meth:`.before_execute` and
+ :meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
target.dispatch)
if not retval:
- if identifier == 'on_before_execute':
+ if identifier == 'before_execute':
orig_fn = fn
def wrap(conn, clauseelement, multiparams, params):
orig_fn(conn, clauseelement, multiparams, params)
return clauseelement, multiparams, params
fn = wrap
- elif identifier == 'on_before_cursor_execute':
+ elif identifier == 'before_cursor_execute':
orig_fn = fn
def wrap(conn, cursor, statement,
parameters, context, executemany):
return statement, parameters
fn = wrap
- elif retval and identifier not in ('on_before_execute', 'on_before_cursor_execute'):
+ elif retval and identifier not in ('before_execute', 'before_cursor_execute'):
raise exc.ArgumentError(
- "Only the 'on_before_execute' and "
- "'on_before_cursor_execute' engine "
+ "Only the 'before_execute' and "
+ "'before_cursor_execute' engine "
"event listeners accept the 'retval=True' "
"argument.")
event.Events._listen(target, identifier, fn)
- def on_before_execute(self, conn, clauseelement, multiparams, params):
+ def before_execute(self, conn, clauseelement, multiparams, params):
"""Intercept high level execute() events."""
- def on_after_execute(self, conn, clauseelement, multiparams, params, result):
+ def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events."""
- def on_before_cursor_execute(self, conn, cursor, statement,
+ def before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events."""
- def on_after_cursor_execute(self, conn, cursor, statement,
+ def after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events."""
- def on_begin(self, conn):
+ def begin(self, conn):
"""Intercept begin() events."""
- def on_rollback(self, conn):
+ def rollback(self, conn):
"""Intercept rollback() events."""
- def on_commit(self, conn):
+ def commit(self, conn):
"""Intercept commit() events."""
- def on_savepoint(self, conn, name=None):
+ def savepoint(self, conn, name=None):
"""Intercept savepoint() events."""
- def on_rollback_savepoint(self, conn, name, context):
+ def rollback_savepoint(self, conn, name, context):
"""Intercept rollback_savepoint() events."""
- def on_release_savepoint(self, conn, name, context):
+ def release_savepoint(self, conn, name, context):
"""Intercept release_savepoint() events."""
- def on_begin_twophase(self, conn, xid):
+ def begin_twophase(self, conn, xid):
"""Intercept begin_twophase() events."""
- def on_prepare_twophase(self, conn, xid):
+ def prepare_twophase(self, conn, xid):
"""Intercept prepare_twophase() events."""
- def on_rollback_twophase(self, conn, xid, is_prepared):
+ def rollback_twophase(self, conn, xid, is_prepared):
"""Intercept rollback_twophase() events."""
- def on_commit_twophase(self, conn, xid, is_prepared):
+ def commit_twophase(self, conn, xid, is_prepared):
"""Intercept commit_twophase() events."""
return weakref.WeakKeyDictionary()
- def on_change(self):
+ def change(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
key = attribute.key
parent_cls = attribute.class_
- def on_load(state):
+ def load(state):
"""Listen for objects loaded or refreshed.
Wrap the target data member's value with
state.dict[key] = val
val._parents[state.obj()] = key
- def on_set(target, value, oldvalue, initiator):
+ def set(target, value, oldvalue, initiator):
"""Listen for set/replace events on the target
data member.
oldvalue._parents.pop(state.obj(), None)
return value
- event.listen(parent_cls, 'on_load', on_load, raw=True)
- event.listen(parent_cls, 'on_refresh', on_load, raw=True)
- event.listen(attribute, 'on_set', on_set, raw=True, retval=True)
+ event.listen(parent_cls, 'load', load, raw=True)
+ event.listen(parent_cls, 'refresh', load, raw=True)
+ event.listen(attribute, 'set', set, raw=True, retval=True)
# TODO: need a deserialize hook here
cls.associate_with_attribute(getattr(class_, prop.key))
break
- event.listen(mapper, 'on_mapper_configured', listen_for_type)
+ event.listen(mapper, 'mapper_configured', listen_for_type)
@classmethod
def as_mutable(cls, sqltype):
cls.associate_with_attribute(getattr(class_, prop.key))
break
- event.listen(mapper, 'on_mapper_configured', listen_for_type)
+ event.listen(mapper, 'mapper_configured', listen_for_type)
return sqltype
Composite classes, in addition to meeting the usage contract
defined in :ref:`mapper_composite`, also define some system
- of relaying change events to the given :meth:`.on_change`
+ of relaying change events to the given :meth:`.change`
method, which will notify all parents of the change. Below
the special Python method ``__setattr__`` is used to intercept
all changes::
def __setattr__(self, key, value):
object.__setattr__(self, key, value)
- self.on_change()
+ self.change()
def __composite_values__(self):
return self.x, self.y
return weakref.WeakKeyDictionary()
- def on_change(self):
+ def change(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
key = attribute.key
parent_cls = attribute.class_
- def on_load(state):
+ def load(state):
"""Listen for objects loaded or refreshed.
Wrap the target data member's value with
if val is not None:
val._parents[state.obj()] = key
- def on_set(target, value, oldvalue, initiator):
+ def set(target, value, oldvalue, initiator):
"""Listen for set/replace events on the target
data member.
oldvalue._parents.pop(state.obj(), None)
return value
- event.listen(parent_cls, 'on_load', on_load, raw=True)
- event.listen(parent_cls, 'on_refresh', on_load, raw=True)
- event.listen(attribute, 'on_set', on_set, raw=True, retval=True)
+ event.listen(parent_cls, 'load', load, raw=True)
+ event.listen(parent_cls, 'refresh', load, raw=True)
+ event.listen(attribute, 'set', set, raw=True, retval=True)
# TODO: need a deserialize hook here
if hasattr(prop, 'composite_class') and issubclass(prop.composite_class, cls):
cls._listen_on_attribute(getattr(class_, prop.key))
- event.listen(mapper, 'on_mapper_configured', listen_for_type)
+ event.listen(mapper, 'mapper_configured', listen_for_type)
listener = util.as_interface(listener, methods=('connect',
'first_connect', 'checkout', 'checkin'))
if hasattr(listener, 'connect'):
- event.listen(self, 'on_connect', listener.connect)
+ event.listen(self, 'connect', listener.connect)
if hasattr(listener, 'first_connect'):
- event.listen(self, 'on_first_connect', listener.first_connect)
+ event.listen(self, 'first_connect', listener.first_connect)
if hasattr(listener, 'checkout'):
- event.listen(self, 'on_checkout', listener.checkout)
+ event.listen(self, 'checkout', listener.checkout)
if hasattr(listener, 'checkin'):
- event.listen(self, 'on_checkin', listener.checkin)
+ event.listen(self, 'checkin', listener.checkin)
def connect(self, dbapi_con, con_record):
clauseelement, *multiparams,
**params)
- event.listen(self, 'on_before_execute', adapt_execute)
+ event.listen(self, 'before_execute', adapt_execute)
def adapt_cursor_execute(conn, cursor, statement,
parameters,context, executemany, ):
executemany,
)
- event.listen(self, 'on_before_cursor_execute', adapt_cursor_execute)
+ event.listen(self, 'before_cursor_execute', adapt_cursor_execute)
def do_nothing_callback(*arg, **kw):
pass
return util.update_wrapper(go, fn)
- event.listen(self, 'on_begin', adapt_listener(listener.begin))
- event.listen(self, 'on_rollback',
+ event.listen(self, 'begin', adapt_listener(listener.begin))
+ event.listen(self, 'rollback',
adapt_listener(listener.rollback))
- event.listen(self, 'on_commit', adapt_listener(listener.commit))
- event.listen(self, 'on_savepoint',
+ event.listen(self, 'commit', adapt_listener(listener.commit))
+ event.listen(self, 'savepoint',
adapt_listener(listener.savepoint))
- event.listen(self, 'on_rollback_savepoint',
+ event.listen(self, 'rollback_savepoint',
adapt_listener(listener.rollback_savepoint))
- event.listen(self, 'on_release_savepoint',
+ event.listen(self, 'release_savepoint',
adapt_listener(listener.release_savepoint))
- event.listen(self, 'on_begin_twophase',
+ event.listen(self, 'begin_twophase',
adapt_listener(listener.begin_twophase))
- event.listen(self, 'on_prepare_twophase',
+ event.listen(self, 'prepare_twophase',
adapt_listener(listener.prepare_twophase))
- event.listen(self, 'on_rollback_twophase',
+ event.listen(self, 'rollback_twophase',
adapt_listener(listener.rollback_twophase))
- event.listen(self, 'on_commit_twophase',
+ event.listen(self, 'commit_twophase',
adapt_listener(listener.commit_twophase))
else:
old = dict_.get(self.key, NO_VALUE)
- if self.dispatch.on_remove:
+ if self.dispatch.remove:
self.fire_remove_event(state, dict_, old, None)
state.modified_event(dict_, self, old)
del dict_[self.key]
else:
old = dict_.get(self.key, NO_VALUE)
- if self.dispatch.on_set:
+ if self.dispatch.set:
value = self.fire_replace_event(state, dict_,
value, old, initiator)
state.modified_event(dict_, self, old)
dict_[self.key] = value
def fire_replace_event(self, state, dict_, value, previous, initiator):
- for fn in self.dispatch.on_set:
+ for fn in self.dispatch.set:
value = fn(state, value, previous, initiator or self)
return value
def fire_remove_event(self, state, dict_, value, initiator):
- for fn in self.dispatch.on_remove:
+ for fn in self.dispatch.remove:
fn(state, value, initiator or self)
@property
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), False)
- for fn in self.dispatch.on_remove:
+ for fn in self.dispatch.remove:
fn(state, value, initiator or self)
state.modified_event(dict_, self, value)
previous is not PASSIVE_NO_RESULT):
self.sethasparent(instance_state(previous), False)
- for fn in self.dispatch.on_set:
+ for fn in self.dispatch.set:
value = fn(state, value, previous, initiator or self)
state.modified_event(dict_, self, previous)
def fire_append_event(self, state, dict_, value, initiator):
- for fn in self.dispatch.on_append:
+ for fn in self.dispatch.append:
value = fn(state, value, initiator or self)
state.modified_event(dict_, self, NEVER_SET, True)
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), False)
- for fn in self.dispatch.on_remove:
+ for fn in self.dispatch.remove:
fn(state, value, initiator or self)
state.modified_event(dict_, self, NEVER_SET, True)
passive=PASSIVE_NO_FETCH)
if uselist:
- event.listen(attribute, "on_append", append, retval=True, raw=True)
+ event.listen(attribute, "append", append, retval=True, raw=True)
else:
- event.listen(attribute, "on_set", set_, retval=True, raw=True)
+ event.listen(attribute, "set", set_, retval=True, raw=True)
# TODO: need coverage in test/orm/ of remove event
- event.listen(attribute, "on_remove", remove, retval=True, raw=True)
+ event.listen(attribute, "remove", remove, retval=True, raw=True)
class History(tuple):
"""A 3-tuple of added, unchanged and deleted values,
The decorators fall into two groups: annotations and interception recipes.
The annotating decorators (appender, remover, iterator,
- internally_instrumented, on_link) indicate the method's purpose and take no
+ internally_instrumented, link) indicate the method's purpose and take no
arguments. They are not written with parens::
@collection.appender
return fn
@staticmethod
- def on_link(fn):
+ def link(fn):
"""Tag the method as a the "linked to attribute" event handler.
This optional event handler will be called when the collection class
that has been linked, or None if unlinking.
"""
- setattr(fn, '_sa_instrument_role', 'on_link')
+ setattr(fn, '_sa_instrument_role', 'link')
return fn
@staticmethod
if hasattr(method, '_sa_instrument_role'):
role = method._sa_instrument_role
assert role in ('appender', 'remover', 'iterator',
- 'on_link', 'converter')
+ 'link', 'converter')
roles[role] = name
# transfer instrumentation requests from decorated function
def reconstruct(instance):
ls_meth(self, instance)
return reconstruct
- event.listen(self.class_manager, 'on_load',
+ event.listen(self.class_manager, 'load',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_instance':
def go(ls_meth):
self.class_manager.original_init,
instance, args, kwargs)
return init_instance
- event.listen(self.class_manager, 'on_init',
+ event.listen(self.class_manager, 'init',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_failed':
def go(ls_meth):
instance, args, kwargs)
return init_failed
- event.listen(self.class_manager, 'on_init_failure',
+ event.listen(self.class_manager, 'init_failure',
go(ls_meth), raw=False, propagate=True)
else:
- event.listen(self, "on_%s" % meth, ls_meth,
+ event.listen(self, "%s" % meth, ls_meth,
raw=False, retval=True, propagate=True)
@classmethod
def _adapt_listener(cls, self, listener):
- event.listen(self, 'on_before_commit', listener.before_commit)
- event.listen(self, 'on_after_commit', listener.after_commit)
- event.listen(self, 'on_after_rollback', listener.after_rollback)
- event.listen(self, 'on_before_flush', listener.before_flush)
- event.listen(self, 'on_after_flush', listener.after_flush)
- event.listen(self, 'on_after_flush_postexec', listener.after_flush_postexec)
- event.listen(self, 'on_after_begin', listener.after_begin)
- event.listen(self, 'on_after_attach', listener.after_attach)
- event.listen(self, 'on_after_bulk_update', listener.after_bulk_update)
- event.listen(self, 'on_after_bulk_delete', listener.after_bulk_delete)
+ event.listen(self, 'before_commit', listener.before_commit)
+ event.listen(self, 'after_commit', listener.after_commit)
+ event.listen(self, 'after_rollback', listener.after_rollback)
+ event.listen(self, 'before_flush', listener.before_flush)
+ event.listen(self, 'after_flush', listener.after_flush)
+ event.listen(self, 'after_flush_postexec', listener.after_flush_postexec)
+ event.listen(self, 'after_begin', listener.after_begin)
+ event.listen(self, 'after_attach', listener.after_attach)
+ event.listen(self, 'after_bulk_update', listener.after_bulk_update)
+ event.listen(self, 'after_bulk_delete', listener.after_bulk_delete)
def before_commit(self, session):
"""Execute right before commit is called.
@classmethod
def _adapt_listener(cls, self, listener):
- event.listen(self, 'on_append', listener.append,
+ event.listen(self, 'append', listener.append,
active_history=listener.active_history,
raw=True, retval=True)
- event.listen(self, 'on_remove', listener.remove,
+ event.listen(self, 'remove', listener.remove,
active_history=listener.active_history,
raw=True, retval=True)
- event.listen(self, 'on_set', listener.set,
+ event.listen(self, 'set', listener.set,
active_history=listener.active_history,
raw=True, retval=True)
state = attributes.instance_state(instance)
attr = state.manager[self.key]
previous = dict_.get(self.key, attributes.NO_VALUE)
- for fn in attr.dispatch.on_set:
+ for fn in attr.dispatch.set:
value = fn(state, value, previous, attr.impl)
dict_[self.key] = value
if value is None:
dict_ = attributes.instance_dict(instance)
previous = dict_.pop(self.key, attributes.NO_VALUE)
attr = state.manager[self.key]
- attr.dispatch.on_remove(state, previous, attr.impl)
+ attr.dispatch.remove(state, previous, attr.impl)
for key in self._attribute_keys:
setattr(instance, key, None)
self._attribute_keys]
)
- event.listen(self.parent, 'on_after_insert',
+ event.listen(self.parent, 'after_insert',
insert_update_handler, raw=True)
- event.listen(self.parent, 'on_after_update',
+ event.listen(self.parent, 'after_update',
insert_update_handler, raw=True)
- event.listen(self.parent, 'on_load', load_handler, raw=True)
- event.listen(self.parent, 'on_refresh', load_handler, raw=True)
- event.listen(self.parent, "on_expire", expire_handler, raw=True)
+ event.listen(self.parent, 'load', load_handler, raw=True)
+ event.listen(self.parent, 'refresh', load_handler, raw=True)
+ event.listen(self.parent, "expire", expire_handler, raw=True)
# TODO: need a deserialize hook here
collection_history = self._modified_event(state, dict_)
collection_history.added_items.append(value)
- for fn in self.dispatch.on_append:
+ for fn in self.dispatch.append:
value = fn(state, value, initiator or self)
if self.trackparent and value is not None:
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), False)
- for fn in self.dispatch.on_remove:
+ for fn in self.dispatch.remove:
fn(state, value, initiator or self)
def _modified_event(self, state, dict_):
def _remove(cls, identifier, target, fn):
raise NotImplementedError("Removal of instrumentation events not yet implemented")
- def on_class_instrument(self, cls):
+ def class_instrument(self, cls):
"""Called after the given class is instrumented.
To get at the :class:`.ClassManager`, use
"""
- def on_class_uninstrument(self, cls):
+ def class_uninstrument(self, cls):
"""Called before the given class is uninstrumented.
To get at the :class:`.ClassManager`, use
"""
- def on_attribute_instrument(self, cls, key, inst):
+ def attribute_instrument(self, cls, key, inst):
"""Called when an attribute is instrumented."""
class InstanceEvents(event.Events):
def _remove(cls, identifier, target, fn):
raise NotImplementedError("Removal of instance events not yet implemented")
- def on_first_init(self, manager, cls):
+ def first_init(self, manager, cls):
"""Called when the first instance of a particular mapping is called.
"""
- def on_init(self, target, args, kwargs):
+ def init(self, target, args, kwargs):
"""Receive an instance when it's constructor is called.
This method is only called during a userland construction of
"""
- def on_init_failure(self, target, args, kwargs):
+ def init_failure(self, target, args, kwargs):
"""Receive an instance when it's constructor has been called,
and raised an exception.
"""
- def on_load(self, target):
+ def load(self, target):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
"""
- def on_refresh(self, target):
+ def refresh(self, target):
"""Receive an object instance after one or more attributes have
been refreshed.
"""
- def on_expire(self, target, keys):
+ def expire(self, target, keys):
"""Receive an object instance after its attributes or some subset
have been expired.
"""
- def on_resurrect(self, target):
+ def resurrect(self, target):
"""Receive an object instance as it is 'resurrected' from
garbage collection, which occurs when a "dirty" state falls
out of scope."""
% target.special_number)
# associate the listener function with SomeMappedClass,
- # to execute during the "on_before_insert" hook
- event.listen(SomeMappedClass, 'on_before_insert', my_before_insert_listener)
+ # to execute during the "before_insert" hook
+ event.listen(SomeMappedClass, 'before_insert', my_before_insert_listener)
Available targets include mapped classes, instances of
:class:`.Mapper` (i.e. returned by :func:`.mapper`,
log.debug("Instance %s being inserted" % target)
# attach to all mappers
- event.listen(mapper, 'on_before_insert', some_listener)
+ event.listen(mapper, 'before_insert', some_listener)
Mapper events provide hooks into critical sections of the
mapper, including those related to object instrumentation,
object loading, and object persistence. In particular, the
- persistence methods :meth:`~.MapperEvents.on_before_insert`,
- and :meth:`~.MapperEvents.on_before_update` are popular
+ persistence methods :meth:`~.MapperEvents.before_insert`,
+ and :meth:`~.MapperEvents.before_update` are popular
places to augment the state being persisted - however, these
methods operate with several significant restrictions. The
user is encouraged to evaluate the
- :meth:`.SessionEvents.on_before_flush` and
- :meth:`.SessionEvents.on_after_flush` methods as more
+ :meth:`.SessionEvents.before_flush` and
+ :meth:`.SessionEvents.after_flush` methods as more
flexible and user-friendly hooks in which to apply
additional database state during a flush.
* ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent
event handlers in the chain.
* other values - the return value specified by specific listeners,
- such as :meth:`~.MapperEvents.on_translate_row` or
- :meth:`~.MapperEvents.on_create_instance`.
+ such as :meth:`~.MapperEvents.translate_row` or
+ :meth:`~.MapperEvents.create_instance`.
"""
else:
event.Events._listen(target, identifier, fn)
- def on_instrument_class(self, mapper, class_):
+ def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
"""
- def on_mapper_configured(self, mapper, class_):
+ def mapper_configured(self, mapper, class_):
"""Called when the mapper for the class is fully configured.
This event is the latest phase of mapper construction.
"""
# TODO: need coverage for this event
- def on_translate_row(self, mapper, context, row):
+ def translate_row(self, mapper, context, row):
"""Perform pre-processing on the given result row and return a
new row instance.
"""
- def on_create_instance(self, mapper, context, row, class_):
+ def create_instance(self, mapper, context, row, class_):
"""Receive a row when a new object instance is about to be
created from that row.
"""
- def on_append_result(self, mapper, context, row, target,
+ def append_result(self, mapper, context, row, target,
result, **flags):
"""Receive an object instance before that instance is appended
to a result list.
"""
- def on_populate_instance(self, mapper, context, row,
+ def populate_instance(self, mapper, context, row,
target, **flags):
"""Receive an instance before that instance has
its attributes populated.
Most usages of this hook are obsolete. For a
generic "object has been newly created from a row" hook, use
- :meth:`.InstanceEvents.on_load`.
+ :meth:`.InstanceEvents.load`.
:param mapper: the :class:`.Mapper` which is the target
of this event.
"""
- def on_before_insert(self, mapper, connection, target):
+ def before_insert(self, mapper, connection, target):
"""Receive an object instance before an INSERT statement
is emitted corresponding to that instance.
"""
- def on_after_insert(self, mapper, connection, target):
+ def after_insert(self, mapper, connection, target):
"""Receive an object instance after an INSERT statement
is emitted corresponding to that instance.
"""
- def on_before_update(self, mapper, connection, target):
+ def before_update(self, mapper, connection, target):
"""Receive an object instance before an UPDATE statement
is emitted corresponding to that instance.
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
- being sent to :meth:`~.MapperEvents.on_before_update` is
+ being sent to :meth:`~.MapperEvents.before_update` is
*not* a guarantee that an UPDATE statement will be
issued, although you can affect the outcome here by
modifying attributes so that a net change in value does
:return: No return value is supported by this event.
"""
- def on_after_update(self, mapper, connection, target):
+ def after_update(self, mapper, connection, target):
"""Receive an object instance after an UPDATE statement
is emitted corresponding to that instance.
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
- being sent to :meth:`~.MapperEvents.on_after_update` is
+ being sent to :meth:`~.MapperEvents.after_update` is
*not* a guarantee that an UPDATE statement has been
issued.
"""
- def on_before_delete(self, mapper, connection, target):
+ def before_delete(self, mapper, connection, target):
"""Receive an object instance before a DELETE statement
is emitted corresponding to that instance.
"""
- def on_after_delete(self, mapper, connection, target):
+ def after_delete(self, mapper, connection, target):
"""Receive an object instance after a DELETE statement
has been emitted corresponding to that instance.
Session = sessionmaker()
- event.listen(Session, "on_before_commit", my_before_commit)
+ event.listen(Session, "before_commit", my_before_commit)
The :func:`~.event.listen` function will accept
:class:`.Session` objects as well as the return result
def _remove(cls, identifier, target, fn):
raise NotImplementedError("Removal of session events not yet implemented")
- def on_before_commit(self, session):
+ def before_commit(self, session):
"""Execute before commit is called.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
- def on_after_commit(self, session):
+ def after_commit(self, session):
"""Execute after a commit has occured.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
- def on_after_rollback(self, session):
+ def after_rollback(self, session):
"""Execute after a rollback has occured.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
- def on_before_flush( self, session, flush_context, instances):
+ def before_flush( self, session, flush_context, instances):
"""Execute before flush process has started.
`instances` is an optional list of objects which were passed to
the ``flush()`` method. """
- def on_after_flush(self, session, flush_context):
+ def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes."""
- def on_after_flush_postexec(self, session, flush_context):
+ def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
occured, depending on whether or not the flush started its own
transaction or participated in a larger transaction. """
- def on_after_begin( self, session, transaction, connection):
+ def after_begin( self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
`transaction` is the SessionTransaction. This method is called
after an engine level transaction is begun on a connection. """
- def on_after_attach(self, session, instance):
+ def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge. """
- def on_after_bulk_update( self, session, query, query_context, result):
+ def after_bulk_update( self, session, query, query_context, result):
"""Execute after a bulk update operation to the session.
This is called after a session.query(...).update()
`result` is the result object returned from the bulk operation.
"""
- def on_after_bulk_delete( self, session, query, query_context, result):
+ def after_bulk_delete( self, session, query, query_context, result):
"""Execute after a bulk delete operation to the session.
This is called after a session.query(...).delete()
def my_append_listener(target, value, initiator):
print "received append event for target: %s" % target
- event.listen(MyClass.collection, 'on_append', my_append_listener)
+ event.listen(MyClass.collection, 'append', my_append_listener)
Listeners have the option to return a possibly modified version
of the value, when the ``retval=True`` flag is passed
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
- listen(UserContact.phone, 'on_set', validate_phone, retval=True)
+ listen(UserContact.phone, 'set', validate_phone, retval=True)
A validation function like the above can also raise an exception
such as :class:`ValueError` to halt the operation.
Several modifiers are available to the :func:`~.event.listen` function.
:param active_history=False: When True, indicates that the
- "on_set" event would like to receive the "old" value being
+ "set" event would like to receive the "old" value being
replaced unconditionally, even if this requires firing off
database loads. Note that ``active_history`` can also be
set directly via :func:`.column_property` and
def _remove(cls, identifier, target, fn):
raise NotImplementedError("Removal of attribute events not yet implemented")
- def on_append(self, target, value, initiator):
+ def append(self, target, value, initiator):
"""Receive a collection append event.
:param target: the object instance receiving the event.
"""
- def on_remove(self, target, value, initiator):
+ def remove(self, target, value, initiator):
"""Receive a collection remove event.
:param target: the object instance receiving the event.
:return: No return value is defined for this event.
"""
- def on_set(self, target, value, oldvalue, initiator):
+ def set(self, target, value, oldvalue, initiator):
"""Receive a scalar set event.
:param target: the object instance receiving the event.
@util.memoized_property
def _state_constructor(self):
- self.dispatch.on_first_init(self, self.class_)
+ self.dispatch.first_init(self, self.class_)
if self.mutable_attributes:
return state.MutableAttrInstanceState
else:
def post_configure_attribute(self, key):
instrumentation_registry.dispatch.\
- on_attribute_instrument(self.class_, key, self[key])
+ attribute_instrument(self.class_, key, self[key])
def uninstrument_attribute(self, key, propagated=False):
if key not in self:
self._state_finders[class_] = manager.state_getter()
self._dict_finders[class_] = manager.dict_getter()
- self.dispatch.on_class_instrument(class_)
+ self.dispatch.class_instrument(class_)
return manager
def unregister(self, class_):
if class_ in self._manager_finders:
manager = self.manager_of_class(class_)
- self.dispatch.on_class_uninstrument(class_)
+ self.dispatch.class_uninstrument(class_)
manager.unregister()
manager.dispose()
del self._manager_finders[class_]
_mapper_registry[self] = True
- self.dispatch.on_instrument_class(self, self.class_)
+ self.dispatch.instrument_class(self, self.class_)
if manager is None:
manager = instrumentation.register_class(self.class_,
if manager.info.get(_INSTRUMENTOR, False):
return
- event.listen(manager, 'on_first_init', _event_on_first_init, raw=True)
- event.listen(manager, 'on_init', _event_on_init, raw=True)
- event.listen(manager, 'on_resurrect', _event_on_resurrect, raw=True)
+ event.listen(manager, 'first_init', _event_on_first_init, raw=True)
+ event.listen(manager, 'init', _event_on_init, raw=True)
+ event.listen(manager, 'resurrect', _event_on_resurrect, raw=True)
for key, method in util.iterate_attributes(self.class_):
if isinstance(method, types.FunctionType):
if hasattr(method, '__sa_reconstructor__'):
self._reconstructor = method
- event.listen(manager, 'on_load', _event_on_load, raw=True)
+ event.listen(manager, 'load', _event_on_load, raw=True)
elif hasattr(method, '__sa_validators__'):
for name in method.__sa_validators__:
self._validators[name] = method
# call before_XXX extensions
if not has_identity:
- mapper.dispatch.on_before_insert(mapper, conn, state)
+ mapper.dispatch.before_insert(mapper, conn, state)
else:
- mapper.dispatch.on_before_update(mapper, conn, state)
+ mapper.dispatch.before_update(mapper, conn, state)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# call after_XXX extensions
if not has_identity:
- mapper.dispatch.on_after_insert(mapper, connection, state)
+ mapper.dispatch.after_insert(mapper, connection, state)
else:
- mapper.dispatch.on_after_update(mapper, connection, state)
+ mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(self, uowtransaction, table,
state, dict_, prefetch_cols, postfetch_cols,
else:
conn = connection
- mapper.dispatch.on_before_delete(mapper, conn, state)
+ mapper.dispatch.before_delete(mapper, conn, state)
tups.append((state,
state.dict,
)
for state, state_dict, mapper, has_identity, connection in tups:
- mapper.dispatch.on_after_delete(mapper, connection, state)
+ mapper.dispatch.after_delete(mapper, connection, state)
def _instance_processor(self, context, path, reduced_path, adapter,
polymorphic_from=None,
listeners = self.dispatch
- translate_row = listeners.on_translate_row or None
- create_instance = listeners.on_create_instance or None
- populate_instance = listeners.on_populate_instance or None
- append_result = listeners.on_append_result or None
+ translate_row = listeners.translate_row or None
+ create_instance = listeners.create_instance or None
+ populate_instance = listeners.populate_instance or None
+ append_result = listeners.append_result or None
populate_existing = context.populate_existing or self.always_refresh
if self.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
populate_state(state, dict_, row, isnew, attrs)
if loaded_instance:
- state.manager.dispatch.on_load(state)
+ state.manager.dispatch.load(state)
elif isnew:
- state.manager.dispatch.on_refresh(state)
+ state.manager.dispatch.refresh(state)
if result is not None:
if append_result:
try:
mapper._post_configure_properties()
mapper._expire_memoizations()
- mapper.dispatch.on_mapper_configured(mapper, mapper.class_)
+ mapper.dispatch.mapper_configured(mapper, mapper.class_)
except:
exc = sys.exc_info()[1]
if not hasattr(exc, '_configure_failed'):
filter = None
custom_rows = single_entity and \
- self._entities[0].mapper.dispatch.on_append_result
+ self._entities[0].mapper.dispatch.append_result
(process, labels) = \
zip(*[
)
)
- session.dispatch.on_after_bulk_delete(session, self, context, result)
+ session.dispatch.after_bulk_delete(session, self, context, result)
return result.rowcount
[_attr_as_key(k) for k in values]
)
- session.dispatch.on_after_bulk_update(session, self, context, result)
+ session.dispatch.after_bulk_update(session, self, context, result)
return result.rowcount
self._connections[conn] = self._connections[conn.engine] = \
(conn, transaction, conn is not bind)
- self.session.dispatch.on_after_begin(self.session, self, conn)
+ self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
def _prepare_impl(self):
self._assert_is_active()
if self._parent is None or self.nested:
- self.session.dispatch.on_before_commit(self.session)
+ self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for t in set(self._connections.values()):
t[1].commit()
- self.session.dispatch.on_after_commit(self.session)
+ self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
if self.session._enable_transaction_accounting:
self._restore_snapshot()
- self.session.dispatch.on_after_rollback(self.session)
+ self.session.dispatch.after_rollback(self.session)
def _deactivate(self):
self._active = False
merged_state.commit_all(merged_dict, self.identity_map)
if new_instance:
- merged_state.manager.dispatch.on_load(merged_state)
+ merged_state.manager.dispatch.load(merged_state)
return merged
@classmethod
if state.session_id != self.hash_key:
state.session_id = self.hash_key
- if self.dispatch.on_after_attach:
- self.dispatch.on_after_attach(self, state.obj())
+ if self.dispatch.after_attach:
+ self.dispatch.after_attach(self, state.obj())
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
flush_context = UOWTransaction(self)
- if self.dispatch.on_before_flush:
- self.dispatch.on_before_flush(self, flush_context, objects)
+ if self.dispatch.before_flush:
+ self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
try:
flush_context.execute()
- self.dispatch.on_after_flush(self, flush_context)
+ self.dispatch.after_flush(self, flush_context)
transaction.commit()
except:
transaction.rollback(_capture_exception=True)
# self.identity_map._modified.difference(objects)
#self.identity_map._modified.clear()
- self.dispatch.on_after_flush_postexec(self, flush_context)
+ self.dispatch.after_flush_postexec(self, flush_context)
def is_modified(self, instance, include_collections=True, passive=False):
"""Return ``True`` if instance has modified attributes.
self, instance, args = mixed[0], mixed[1], mixed[2:]
manager = self.manager
- manager.dispatch.on_init(self, args, kwargs)
+ manager.dispatch.init(self, args, kwargs)
#if manager.mutable_attributes:
# assert self.__class__ is MutableAttrInstanceState
try:
return manager.original_init(*mixed[1:], **kwargs)
except:
- manager.dispatch.on_init_failure(self, args, kwargs)
+ manager.dispatch.init_failure(self, args, kwargs)
raise
def get_history(self, key, **kwargs):
self.callables[key] = self
dict_.pop(key, None)
- self.manager.dispatch.on_expire(self, None)
+ self.manager.dispatch.expire(self, None)
def expire_attributes(self, dict_, attribute_names):
pending = self.__dict__.get('pending', None)
if pending:
pending.pop(key, None)
- self.manager.dispatch.on_expire(self, attribute_names)
+ self.manager.dispatch.expire(self, attribute_names)
def __call__(self, passive):
"""__call__ allows the InstanceState to act as a deferred
obj.__dict__.update(self.mutable_dict)
# re-establishes identity attributes from the key
- self.manager.dispatch.on_resurrect(self)
+ self.manager.dispatch.resurrect(self)
return obj
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
- event.listen(desc, 'on_append', append, raw=True, retval=True, active_history=True)
- event.listen(desc, 'on_set', set_, raw=True, retval=True, active_history=True)
+ event.listen(desc, 'append', append, raw=True, retval=True, active_history=True)
+ event.listen(desc, 'set', set_, raw=True, retval=True, active_history=True)
sess.expunge(oldvalue)
return newvalue
- event.listen(descriptor, 'on_append', append, raw=True, retval=True)
- event.listen(descriptor, 'on_remove', remove, raw=True, retval=True)
- event.listen(descriptor, 'on_set', set_, raw=True, retval=True)
+ event.listen(descriptor, 'append', append, raw=True, retval=True)
+ event.listen(descriptor, 'remove', remove, raw=True, retval=True)
+ event.listen(descriptor, 'set', set_, raw=True, retval=True)
class UOWTransaction(object):
def set_(state, value, oldvalue, initiator):
return validator(state.obj(), key, value)
- event.listen(desc, 'on_append', append, raw=True, retval=True)
- event.listen(desc, 'on_set', set_, raw=True, retval=True)
+ event.listen(desc, 'append', append, raw=True, retval=True)
+ event.listen(desc, 'set', set_, raw=True, retval=True)
def polymorphic_union(table_map, typecolname, aliasname='p_union'):
"""Create a ``UNION`` statement used by a polymorphic mapper.
self.connection = self.__connect()
self.info = {}
- pool.dispatch.on_first_connect.exec_once(self.connection, self)
- pool.dispatch.on_connect(self.connection, self)
+ pool.dispatch.first_connect.exec_once(self.connection, self)
+ pool.dispatch.connect(self.connection, self)
def close(self):
if self.connection is not None:
if self.connection is None:
self.connection = self.__connect()
self.info.clear()
- if self.__pool.dispatch.on_connect:
- self.__pool.dispatch.on_connect(self.connection, self)
+ if self.__pool.dispatch.connect:
+ self.__pool.dispatch.connect(self.connection, self)
elif self.__pool._recycle > -1 and \
time.time() - self.starttime > self.__pool._recycle:
self.__pool.logger.info(
self.__close()
self.connection = self.__connect()
self.info.clear()
- if self.__pool.dispatch.on_connect:
- self.__pool.dispatch.on_connect(self.connection, self)
+ if self.__pool.dispatch.connect:
+ self.__pool.dispatch.connect(self.connection, self)
return self.connection
def __close(self):
if echo:
pool.logger.debug("Connection %r being returned to pool",
connection)
- if pool.dispatch.on_checkin:
- pool.dispatch.on_checkin(connection, connection_record)
+ if pool.dispatch.checkin:
+ pool.dispatch.checkin(connection, connection_record)
pool._return_conn(connection_record)
_refs = set()
raise exc.InvalidRequestError("This connection is closed")
self.__counter += 1
- if not self._pool.dispatch.on_checkout or self.__counter != 1:
+ if not self._pool.dispatch.checkout or self.__counter != 1:
return self
# Pool listeners can trigger a reconnection on checkout
attempts = 2
while attempts > 0:
try:
- self._pool.dispatch.on_checkout(self.connection,
+ self._pool.dispatch.checkout(self.connection,
self._connection_record,
self)
return self
def adapt_listener(target, connection, **kw):
listener(event_name, target, connection, **kw)
- event.listen(self, "on_" + event_name.replace('-', '_'), adapt_listener)
+ event.listen(self, "" + event_name.replace('-', '_'), adapt_listener)
def _set_parent(self, metadata):
metadata._add_table(self.name, self.schema, self)
return table in set(kw['tables']) and \
bind.dialect.supports_alter
- event.listen(table.metadata, "on_after_create", AddConstraint(self, on=supports_alter))
- event.listen(table.metadata, "on_before_drop", DropConstraint(self, on=supports_alter))
+ event.listen(table.metadata, "after_create", AddConstraint(self, on=supports_alter))
+ event.listen(table.metadata, "before_drop", DropConstraint(self, on=supports_alter))
def copy(self, **kw):
def adapt_listener(target, connection, **kw):
listener(event, target, connection, **kw)
- event.listen(self, "on_" + event_name.replace('-', '_'), adapt_listener)
+ event.listen(self, "" + event_name.replace('-', '_'), adapt_listener)
def create_all(self, bind=None, tables=None, checkfirst=True):
"""Create all tables stored in this metadata.
event.listen(
users,
- 'on_after_create',
+ 'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql')
)
target, connection, **kw):
return connection.execute(self.against(target))
- event.listen(target, "on_" + event_name.replace('-', '_'), call_event)
+ event.listen(target, "" + event_name.replace('-', '_'), call_event)
@expression._generative
def against(self, target):
event.listen(
metadata,
- 'on_before_create',
+ 'before_create',
DDL("my_ddl").execute_if(dialect='postgresql')
)
from sqlalchemy import event, DDL
tbl = Table('users', metadata, Column('uid', Integer))
- event.listen(tbl, 'on_before_create', DDL('DROP TRIGGER users_trigger'))
+ event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
- event.listen(tbl, 'on_after_create', spow.execute_if(dialect='somedb'))
+ event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
connection.execute(drop_spow)
def setUp(self):
global Target
- assert 'on_event_one' not in event._registrars
- assert 'on_event_two' not in event._registrars
+ assert 'event_one' not in event._registrars
+ assert 'event_two' not in event._registrars
class TargetEvents(event.Events):
- def on_event_one(self, x, y):
+ def event_one(self, x, y):
pass
- def on_event_two(self, x):
+ def event_two(self, x):
pass
class Target(object):
def listen(x, y):
pass
- event.listen(Target, "on_event_one", listen)
+ event.listen(Target, "event_one", listen)
- eq_(len(Target().dispatch.on_event_one), 1)
- eq_(len(Target().dispatch.on_event_two), 0)
+ eq_(len(Target().dispatch.event_one), 1)
+ eq_(len(Target().dispatch.event_two), 0)
def test_register_instance(self):
def listen(x, y):
pass
t1 = Target()
- event.listen(t1, "on_event_one", listen)
+ event.listen(t1, "event_one", listen)
- eq_(len(Target().dispatch.on_event_one), 0)
- eq_(len(t1.dispatch.on_event_one), 1)
- eq_(len(Target().dispatch.on_event_two), 0)
- eq_(len(t1.dispatch.on_event_two), 0)
+ eq_(len(Target().dispatch.event_one), 0)
+ eq_(len(t1.dispatch.event_one), 1)
+ eq_(len(Target().dispatch.event_two), 0)
+ eq_(len(t1.dispatch.event_two), 0)
def test_register_class_instance(self):
def listen_one(x, y):
def listen_two(x, y):
pass
- event.listen(Target, "on_event_one", listen_one)
+ event.listen(Target, "event_one", listen_one)
t1 = Target()
- event.listen(t1, "on_event_one", listen_two)
+ event.listen(t1, "event_one", listen_two)
- eq_(len(Target().dispatch.on_event_one), 1)
- eq_(len(t1.dispatch.on_event_one), 2)
- eq_(len(Target().dispatch.on_event_two), 0)
- eq_(len(t1.dispatch.on_event_two), 0)
+ eq_(len(Target().dispatch.event_one), 1)
+ eq_(len(t1.dispatch.event_one), 2)
+ eq_(len(Target().dispatch.event_two), 0)
+ eq_(len(t1.dispatch.event_two), 0)
def listen_three(x, y):
pass
- event.listen(Target, "on_event_one", listen_three)
- eq_(len(Target().dispatch.on_event_one), 2)
- eq_(len(t1.dispatch.on_event_one), 3)
+ event.listen(Target, "event_one", listen_three)
+ eq_(len(Target().dispatch.event_one), 2)
+ eq_(len(t1.dispatch.event_one), 3)
class TestAcceptTargets(TestBase):
"""Test default target acceptance."""
global TargetOne, TargetTwo
class TargetEventsOne(event.Events):
- def on_event_one(self, x, y):
+ def event_one(self, x, y):
pass
class TargetEventsTwo(event.Events):
- def on_event_one(self, x, y):
+ def event_one(self, x, y):
pass
class TargetOne(object):
def listen_four(x, y):
pass
- event.listen(TargetOne, "on_event_one", listen_one)
- event.listen(TargetTwo, "on_event_one", listen_two)
+ event.listen(TargetOne, "event_one", listen_one)
+ event.listen(TargetTwo, "event_one", listen_two)
eq_(
- list(TargetOne().dispatch.on_event_one),
+ list(TargetOne().dispatch.event_one),
[listen_one]
)
eq_(
- list(TargetTwo().dispatch.on_event_one),
+ list(TargetTwo().dispatch.event_one),
[listen_two]
)
t1 = TargetOne()
t2 = TargetTwo()
- event.listen(t1, "on_event_one", listen_three)
- event.listen(t2, "on_event_one", listen_four)
+ event.listen(t1, "event_one", listen_three)
+ event.listen(t2, "event_one", listen_four)
eq_(
- list(t1.dispatch.on_event_one),
+ list(t1.dispatch.event_one),
[listen_one, listen_three]
)
eq_(
- list(t2.dispatch.on_event_one),
+ list(t2.dispatch.event_one),
[listen_two, listen_four]
)
else:
return None
- def on_event_one(self, x, y):
+ def event_one(self, x, y):
pass
class Target(object):
def listen(x, y):
pass
- event.listen("one", "on_event_one", listen)
+ event.listen("one", "event_one", listen)
eq_(
- list(Target().dispatch.on_event_one),
+ list(Target().dispatch.event_one),
[listen]
)
assert_raises(
exc.InvalidRequestError,
event.listen,
- listen, "on_event_one", Target
+ listen, "event_one", Target
)
class TestListenOverride(TestBase):
event.Events._listen(target, identifier, adapt)
- def on_event_one(self, x, y):
+ def event_one(self, x, y):
pass
class Target(object):
def listen_two(x, y):
result.append((x, y))
- event.listen(Target, "on_event_one", listen_one, add=True)
- event.listen(Target, "on_event_one", listen_two)
+ event.listen(Target, "event_one", listen_one, add=True)
+ event.listen(Target, "event_one", listen_two)
t1 = Target()
- t1.dispatch.on_event_one(5, 7)
- t1.dispatch.on_event_one(10, 5)
+ t1.dispatch.event_one(5, 7)
+ t1.dispatch.event_one(10, 5)
eq_(result,
[
global Target
class TargetEvents(event.Events):
- def on_event_one(self, arg):
+ def event_one(self, arg):
pass
- def on_event_two(self, arg):
+ def event_two(self, arg):
pass
class Target(object):
t1 = Target()
- event.listen(t1, "on_event_one", listen_one, propagate=True)
- event.listen(t1, "on_event_two", listen_two)
+ event.listen(t1, "event_one", listen_one, propagate=True)
+ event.listen(t1, "event_two", listen_two)
t2 = Target()
t2.dispatch._update(t1.dispatch)
- t2.dispatch.on_event_one(t2, 1)
- t2.dispatch.on_event_two(t2, 2)
+ t2.dispatch.event_one(t2, 1)
+ t2.dispatch.event_two(t2, 2)
eq_(result, [(t2, 1)])
cursor.execute("set sql_mode='%s'" % (",".join(modes)))
e = engines.testing_engine(options={
'pool_events':[
- (connect, 'on_first_connect'),
- (connect, 'on_connect')
+ (connect, 'first_connect'),
+ (connect, 'connect')
]
})
return e
def test_table_create_before(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
- event.listen(table, 'on_before_create', canary.before_create)
+ event.listen(table, 'before_create', canary.before_create)
table.create(bind)
assert canary.state == 'before-create'
def test_table_create_after(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
- event.listen(table, 'on_after_create', canary.after_create)
+ event.listen(table, 'after_create', canary.after_create)
canary.state = 'skipped'
table.create(bind)
def test_table_create_both(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
- event.listen(table, 'on_before_create', canary.before_create)
- event.listen(table, 'on_after_create', canary.after_create)
+ event.listen(table, 'before_create', canary.before_create)
+ event.listen(table, 'after_create', canary.after_create)
table.create(bind)
assert canary.state == 'after-create'
def test_table_drop_before(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
- event.listen(table, 'on_before_drop', canary.before_drop)
+ event.listen(table, 'before_drop', canary.before_drop)
table.create(bind)
assert canary.state is None
def test_table_drop_after(self):
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
- event.listen(table, 'on_after_drop', canary.after_drop)
+ event.listen(table, 'after_drop', canary.after_drop)
table.create(bind)
assert canary.state is None
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
- event.listen(table, 'on_before_drop', canary.before_drop)
- event.listen(table, 'on_after_drop', canary.after_drop)
+ event.listen(table, 'before_drop', canary.before_drop)
+ event.listen(table, 'after_drop', canary.after_drop)
table.create(bind)
assert canary.state is None
table, bind = self.table, self.bind
canary = self.Canary(table, bind)
- event.listen(table, 'on_before_create', canary.before_create)
- event.listen(table, 'on_after_create', canary.after_create)
- event.listen(table, 'on_before_drop', canary.before_drop)
- event.listen(table, 'on_after_drop', canary.after_drop)
+ event.listen(table, 'before_create', canary.before_create)
+ event.listen(table, 'after_create', canary.after_create)
+ event.listen(table, 'before_drop', canary.before_drop)
+ event.listen(table, 'after_drop', canary.after_drop)
assert canary.state is None
table.create(bind)
def test_table_create_before(self):
metadata, bind = self.metadata, self.bind
canary = self.Canary(metadata, bind)
- event.listen(metadata, 'on_before_create', canary.before_create)
+ event.listen(metadata, 'before_create', canary.before_create)
metadata.create_all(bind)
assert canary.state == 'before-create'
def test_metadata_create_after(self):
metadata, bind = self.metadata, self.bind
canary = self.Canary(metadata, bind)
- event.listen(metadata, 'on_after_create', canary.after_create)
+ event.listen(metadata, 'after_create', canary.after_create)
canary.state = 'skipped'
metadata.create_all(bind)
metadata, bind = self.metadata, self.bind
canary = self.Canary(metadata, bind)
- event.listen(metadata, 'on_before_create', canary.before_create)
- event.listen(metadata, 'on_after_create', canary.after_create)
+ event.listen(metadata, 'before_create', canary.before_create)
+ event.listen(metadata, 'after_create', canary.after_create)
metadata.create_all(bind)
assert canary.state == 'after-create'
metadata, table, bind = self.metadata, self.table, self.bind
table_canary = self.Canary(table, bind)
- event.listen(table, 'on_before_create', table_canary.before_create)
+ event.listen(table, 'before_create', table_canary.before_create)
metadata_canary = self.Canary(metadata, bind)
- event.listen(metadata, 'on_before_create', metadata_canary.before_create)
+ event.listen(metadata, 'before_create', metadata_canary.before_create)
self.table.create(self.bind)
assert metadata_canary.state == None
def test_table_standalone(self):
users, engine = self.users, self.engine
- event.listen(users, 'on_before_create', DDL('mxyzptlk'))
- event.listen(users, 'on_after_create', DDL('klptzyxm'))
- event.listen(users, 'on_before_drop', DDL('xyzzy'))
- event.listen(users, 'on_after_drop', DDL('fnord'))
+ event.listen(users, 'before_create', DDL('mxyzptlk'))
+ event.listen(users, 'after_create', DDL('klptzyxm'))
+ event.listen(users, 'before_drop', DDL('xyzzy'))
+ event.listen(users, 'after_drop', DDL('fnord'))
users.create()
strings = [str(x) for x in engine.mock]
def test_table_by_metadata(self):
metadata, users, engine = self.metadata, self.users, self.engine
- event.listen(users, 'on_before_create', DDL('mxyzptlk'))
- event.listen(users, 'on_after_create', DDL('klptzyxm'))
- event.listen(users, 'on_before_drop', DDL('xyzzy'))
- event.listen(users, 'on_after_drop', DDL('fnord'))
+ event.listen(users, 'before_create', DDL('mxyzptlk'))
+ event.listen(users, 'after_create', DDL('klptzyxm'))
+ event.listen(users, 'before_drop', DDL('xyzzy'))
+ event.listen(users, 'after_drop', DDL('fnord'))
metadata.create_all()
strings = [str(x) for x in engine.mock]
def test_metadata(self):
metadata, engine = self.metadata, self.engine
- event.listen(metadata, 'on_before_create', DDL('mxyzptlk'))
- event.listen(metadata, 'on_after_create', DDL('klptzyxm'))
- event.listen(metadata, 'on_before_drop', DDL('xyzzy'))
- event.listen(metadata, 'on_after_drop', DDL('fnord'))
+ event.listen(metadata, 'before_create', DDL('mxyzptlk'))
+ event.listen(metadata, 'after_create', DDL('klptzyxm'))
+ event.listen(metadata, 'before_drop', DDL('xyzzy'))
+ event.listen(metadata, 'after_drop', DDL('fnord'))
metadata.create_all()
strings = [str(x) for x in engine.mock]
event.listen(
users,
- 'on_after_create',
+ 'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql'),
)
event.listen(
users,
- 'on_before_drop',
+ 'before_drop',
DropConstraint(constraint).execute_if(dialect='postgresql'),
)
engines.testing_engine(options=dict(implicit_returning=False,
strategy='threadlocal'))
]:
- event.listen(engine, 'on_before_execute', execute)
- event.listen(engine, 'on_before_cursor_execute', cursor_execute)
+ event.listen(engine, 'before_execute', execute)
+ event.listen(engine, 'before_cursor_execute', cursor_execute)
m = MetaData(engine)
t1 = Table('t1', m,
def test_options(self):
canary = []
- def on_execute(conn, *args, **kw):
+ def execute(conn, *args, **kw):
canary.append('execute')
- def on_cursor_execute(conn, *args, **kw):
+ def cursor_execute(conn, *args, **kw):
canary.append('cursor_execute')
engine = engines.testing_engine()
- event.listen(engine, 'on_before_execute', on_execute)
- event.listen(engine, 'on_before_cursor_execute', on_cursor_execute)
+ event.listen(engine, 'before_execute', execute)
+ event.listen(engine, 'before_cursor_execute', cursor_execute)
conn = engine.connect()
c2 = conn.execution_options(foo='bar')
eq_(c2._execution_options, {'foo':'bar'})
canary.append(name)
return go
- def on_execute(conn, clauseelement, multiparams, params):
+ def execute(conn, clauseelement, multiparams, params):
canary.append('execute')
return clauseelement, multiparams, params
- def on_cursor_execute(conn, cursor, statement,
+ def cursor_execute(conn, cursor, statement,
parameters, context, executemany):
canary.append('cursor_execute')
return statement, parameters
assert_raises(
tsa.exc.ArgumentError,
- event.listen, engine, "on_begin", tracker("on_begin"), retval=True
+ event.listen, engine, "begin", tracker("begin"), retval=True
)
- event.listen(engine, "on_before_execute", on_execute, retval=True)
- event.listen(engine, "on_before_cursor_execute", on_cursor_execute, retval=True)
+ event.listen(engine, "before_execute", execute, retval=True)
+ event.listen(engine, "before_cursor_execute", cursor_execute, retval=True)
engine.execute(select([1]))
eq_(
canary, ['execute', 'cursor_execute']
return go
engine = engines.testing_engine()
- event.listen(engine, 'on_before_execute', tracker('execute'))
- event.listen(engine, 'on_before_cursor_execute', tracker('cursor_execute'))
- event.listen(engine, 'on_begin', tracker('begin'))
- event.listen(engine, 'on_commit', tracker('commit'))
- event.listen(engine, 'on_rollback', tracker('rollback'))
+ event.listen(engine, 'before_execute', tracker('execute'))
+ event.listen(engine, 'before_cursor_execute', tracker('cursor_execute'))
+ event.listen(engine, 'begin', tracker('begin'))
+ event.listen(engine, 'commit', tracker('commit'))
+ event.listen(engine, 'rollback', tracker('rollback'))
conn = engine.connect()
trans = conn.begin()
'rollback_savepoint', 'release_savepoint',
'rollback', 'begin_twophase',
'prepare_twophase', 'commit_twophase']:
- event.listen(engine, 'on_%s' % name, tracker(name))
+ event.listen(engine, '%s' % name, tracker(name))
conn = engine.connect()
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
- def on_first_connect(*arg, **kw):
+ def first_connect(*arg, **kw):
canary.append('first_connect')
- event.listen(p, 'on_first_connect', on_first_connect)
+ event.listen(p, 'first_connect', first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
- def on_connect(*arg, **kw):
+ def connect(*arg, **kw):
canary.append('connect')
- event.listen(p, 'on_connect', on_connect)
+ event.listen(p, 'connect', connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
- def on_checkout(*arg, **kw):
+ def checkout(*arg, **kw):
canary.append('checkout')
- event.listen(p, 'on_checkout', on_checkout)
+ event.listen(p, 'checkout', checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
- def on_checkin(*arg, **kw):
+ def checkin(*arg, **kw):
canary.append('checkin')
- event.listen(p, 'on_checkin', on_checkin)
+ event.listen(p, 'checkin', checkin)
return p, canary
canary.append("listen_four")
engine = create_engine(testing.db.url)
- event.listen(pool.Pool, 'on_connect', listen_one)
- event.listen(engine.pool, 'on_connect', listen_two)
- event.listen(engine, 'on_connect', listen_three)
- event.listen(engine.__class__, 'on_connect', listen_four)
+ event.listen(pool.Pool, 'connect', listen_one)
+ event.listen(engine.pool, 'connect', listen_two)
+ event.listen(engine, 'connect', listen_three)
+ event.listen(engine.__class__, 'connect', listen_four)
engine.execute(select([1])).close()
eq_(
def listen_three(*args):
canary.append("listen_three")
- event.listen(pool.Pool, 'on_connect', listen_one)
- event.listen(pool.QueuePool, 'on_connect', listen_two)
- event.listen(pool.SingletonThreadPool, 'on_connect', listen_three)
+ event.listen(pool.Pool, 'connect', listen_one)
+ event.listen(pool.QueuePool, 'connect', listen_two)
+ event.listen(pool.SingletonThreadPool, 'connect', listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
- assert listen_one in p1.dispatch.on_connect
- assert listen_two in p1.dispatch.on_connect
- assert listen_three not in p1.dispatch.on_connect
- assert listen_one in p2.dispatch.on_connect
- assert listen_two not in p2.dispatch.on_connect
- assert listen_three in p2.dispatch.on_connect
+ assert listen_one in p1.dispatch.connect
+ assert listen_two in p1.dispatch.connect
+ assert listen_three not in p1.dispatch.connect
+ assert listen_one in p2.dispatch.connect
+ assert listen_two not in p2.dispatch.connect
+ assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
- self.assert_(len(instance.dispatch.on_connect) == conn)
- self.assert_(len(instance.dispatch.on_first_connect) == fconn)
- self.assert_(len(instance.dispatch.on_checkout) == cout)
- self.assert_(len(instance.dispatch.on_checkin) == cin)
+ self.assert_(len(instance.dispatch.connect) == conn)
+ self.assert_(len(instance.dispatch.first_connect) == fconn)
+ self.assert_(len(instance.dispatch.checkout) == cout)
+ self.assert_(len(instance.dispatch.checkin) == cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0, 0)
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
- eq_(len(instance.dispatch.on_connect), conn)
- eq_(len(instance.dispatch.on_checkout), cout)
- eq_(len(instance.dispatch.on_checkin), cin)
+ eq_(len(instance.dispatch.connect), conn)
+ eq_(len(instance.dispatch.checkout), cout)
+ eq_(len(instance.dispatch.checkin), cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
- self.on_change()
+ self.change()
def __delitem__(self, key):
dict.__delitem__(self, key)
- self.on_change()
+ self.change()
return MutationDict
@testing.resolve_artifact_names
def __setattr__(self, key, value):
object.__setattr__(self, key, value)
- self.on_change()
+ self.change()
def __composite_values__(self):
return self.x, self.y
options = options or config.db_opts
engine = create_engine(url, **options)
- event.listen(engine, 'on_after_execute', asserter.execute)
- event.listen(engine, 'on_after_cursor_execute', asserter.cursor_execute)
- event.listen(engine.pool, 'on_checkout', testing_reaper.checkout)
+ event.listen(engine, 'after_execute', asserter.execute)
+ event.listen(engine, 'after_cursor_execute', asserter.cursor_execute)
+ event.listen(engine.pool, 'checkout', testing_reaper.checkout)
# may want to call this, results
# in first-connect initializers
class Bar(object):
pass
- def on_append(state, child, initiator):
+ def append(state, child, initiator):
b2 = Bar()
b2.data = b1.data + " appended"
return b2
attributes.register_attribute(Foo, 'barset', typecallable=set, uselist=True, useobject=True)
attributes.register_attribute(Bar, 'data', uselist=False, useobject=False)
- event.listen(Foo.data, 'on_set', on_set, retval=True)
- event.listen(Foo.barlist, 'on_append', on_append, retval=True)
- event.listen(Foo.barset, 'on_append', on_append, retval=True)
+ event.listen(Foo.data, 'set', on_set, retval=True)
+ event.listen(Foo.barlist, 'append', append, retval=True)
+ event.listen(Foo.barset, 'append', append, retval=True)
f1 = Foo()
f1.data = "some data"
def attr_c():
attributes.register_attribute(classes[2], 'attrib', uselist=False, useobject=False)
- def on_set(state, value, oldvalue, initiator):
+ def set(state, value, oldvalue, initiator):
canary.append(value)
def events_a():
- event.listen(classes[0].attrib, 'on_set', on_set, propagate=True)
+ event.listen(classes[0].attrib, 'set', set, propagate=True)
def teardown():
classes[:] = [None, None, None]
bind.engine.name not in ('oracle', 'mssql', 'sqlite')
),
):
- event.listen(dt, 'on_after_create', ins)
+ event.listen(dt, 'after_create', ins)
- event.listen(dt, 'on_before_drop', sa.DDL("DROP TRIGGER dt_ins"))
+ event.listen(dt, 'before_drop', sa.DDL("DROP TRIGGER dt_ins"))
for up in (
sa.DDL("CREATE TRIGGER dt_up AFTER UPDATE ON dt "
bind.engine.name not in ('oracle', 'mssql', 'sqlite')
),
):
- event.listen(dt, 'on_after_create', up)
+ event.listen(dt, 'after_create', up)
- event.listen(dt, 'on_before_drop', sa.DDL("DROP TRIGGER dt_up"))
+ event.listen(dt, 'before_drop', sa.DDL("DROP TRIGGER dt_up"))
@classmethod
instrumentation.register_class(cls)
ne_(cls.__init__, original_init)
manager = instrumentation.manager_of_class(cls)
- def on_init(state, args, kwargs):
- canary.append((cls, 'on_init', state.class_))
- event.listen(manager, 'on_init', on_init, raw=True)
+ def init(state, args, kwargs):
+ canary.append((cls, 'init', state.class_))
+ event.listen(manager, 'init', init, raw=True)
def test_ai(self):
inits = []
self.register(A, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A)])
+ eq_(inits, [(A, 'init', A)])
def test_Ai(self):
inits = []
self.register(A, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A), (A, '__init__')])
+ eq_(inits, [(A, 'init', A), (A, '__init__')])
def test_ai_B(self):
inits = []
del inits[:]
obj = B()
- eq_(inits, [(B, 'on_init', B), (A, '__init__')])
+ eq_(inits, [(B, 'init', B), (A, '__init__')])
def test_ai_Bi(self):
inits = []
del inits[:]
obj = B()
- eq_(inits, [(B, 'on_init', B), (B, '__init__'), (A, '__init__')])
+ eq_(inits, [(B, 'init', B), (B, '__init__'), (A, '__init__')])
def test_Ai_bi(self):
inits = []
super(B, self).__init__()
obj = A()
- eq_(inits, [(A, 'on_init', A), (A, '__init__')])
+ eq_(inits, [(A, 'init', A), (A, '__init__')])
del inits[:]
obj = B()
- eq_(inits, [(B, '__init__'), (A, 'on_init', B), (A, '__init__')])
+ eq_(inits, [(B, '__init__'), (A, 'init', B), (A, '__init__')])
def test_Ai_Bi(self):
inits = []
self.register(B, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A), (A, '__init__')])
+ eq_(inits, [(A, 'init', A), (A, '__init__')])
del inits[:]
obj = B()
- eq_(inits, [(B, 'on_init', B), (B, '__init__'), (A, '__init__')])
+ eq_(inits, [(B, 'init', B), (B, '__init__'), (A, '__init__')])
def test_Ai_B(self):
inits = []
self.register(B, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A), (A, '__init__')])
+ eq_(inits, [(A, 'init', A), (A, '__init__')])
del inits[:]
obj = B()
- eq_(inits, [(B, 'on_init', B), (A, '__init__')])
+ eq_(inits, [(B, 'init', B), (A, '__init__')])
def test_Ai_Bi_Ci(self):
inits = []
self.register(C, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A), (A, '__init__')])
+ eq_(inits, [(A, 'init', A), (A, '__init__')])
del inits[:]
obj = B()
- eq_(inits, [(B, 'on_init', B), (B, '__init__'), (A, '__init__')])
+ eq_(inits, [(B, 'init', B), (B, '__init__'), (A, '__init__')])
del inits[:]
obj = C()
- eq_(inits, [(C, 'on_init', C), (C, '__init__'), (B, '__init__'),
+ eq_(inits, [(C, 'init', C), (C, '__init__'), (B, '__init__'),
(A, '__init__')])
def test_Ai_bi_Ci(self):
self.register(C, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A), (A, '__init__')])
+ eq_(inits, [(A, 'init', A), (A, '__init__')])
del inits[:]
obj = B()
- eq_(inits, [(B, '__init__'), (A, 'on_init', B), (A, '__init__')])
+ eq_(inits, [(B, '__init__'), (A, 'init', B), (A, '__init__')])
del inits[:]
obj = C()
- eq_(inits, [(C, 'on_init', C), (C, '__init__'), (B, '__init__'),
+ eq_(inits, [(C, 'init', C), (C, '__init__'), (B, '__init__'),
(A, '__init__')])
def test_Ai_b_Ci(self):
self.register(C, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A), (A, '__init__')])
+ eq_(inits, [(A, 'init', A), (A, '__init__')])
del inits[:]
obj = B()
- eq_(inits, [(A, 'on_init', B), (A, '__init__')])
+ eq_(inits, [(A, 'init', B), (A, '__init__')])
del inits[:]
obj = C()
- eq_(inits, [(C, 'on_init', C), (C, '__init__'), (A, '__init__')])
+ eq_(inits, [(C, 'init', C), (C, '__init__'), (A, '__init__')])
def test_Ai_B_Ci(self):
inits = []
self.register(C, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A), (A, '__init__')])
+ eq_(inits, [(A, 'init', A), (A, '__init__')])
del inits[:]
obj = B()
- eq_(inits, [(B, 'on_init', B), (A, '__init__')])
+ eq_(inits, [(B, 'init', B), (A, '__init__')])
del inits[:]
obj = C()
- eq_(inits, [(C, 'on_init', C), (C, '__init__'), (A, '__init__')])
+ eq_(inits, [(C, 'init', C), (C, '__init__'), (A, '__init__')])
def test_Ai_B_C(self):
inits = []
self.register(C, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A), (A, '__init__')])
+ eq_(inits, [(A, 'init', A), (A, '__init__')])
del inits[:]
obj = B()
- eq_(inits, [(B, 'on_init', B), (A, '__init__')])
+ eq_(inits, [(B, 'init', B), (A, '__init__')])
del inits[:]
obj = C()
- eq_(inits, [(C, 'on_init', C), (A, '__init__')])
+ eq_(inits, [(C, 'init', C), (A, '__init__')])
def test_A_Bi_C(self):
inits = []
self.register(C, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A)])
+ eq_(inits, [(A, 'init', A)])
del inits[:]
obj = B()
- eq_(inits, [(B, 'on_init', B), (B, '__init__')])
+ eq_(inits, [(B, 'init', B), (B, '__init__')])
del inits[:]
obj = C()
- eq_(inits, [(C, 'on_init', C), (B, '__init__')])
+ eq_(inits, [(C, 'init', C), (B, '__init__')])
def test_A_B_Ci(self):
inits = []
self.register(C, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A)])
+ eq_(inits, [(A, 'init', A)])
del inits[:]
obj = B()
- eq_(inits, [(B, 'on_init', B)])
+ eq_(inits, [(B, 'init', B)])
del inits[:]
obj = C()
- eq_(inits, [(C, 'on_init', C), (C, '__init__')])
+ eq_(inits, [(C, 'init', C), (C, '__init__')])
def test_A_B_C(self):
inits = []
self.register(C, inits)
obj = A()
- eq_(inits, [(A, 'on_init', A)])
+ eq_(inits, [(A, 'init', A)])
del inits[:]
obj = B()
- eq_(inits, [(B, 'on_init', B)])
+ eq_(inits, [(B, 'init', B)])
del inits[:]
obj = C()
- eq_(inits, [(C, 'on_init', C)])
+ eq_(inits, [(C, 'init', C)])
def test_defaulted_init(self):
class X(object):
assert_raises_message(TypeError, "multiple instrumentation implementations", instrumentation.register_class, B1)
class OnLoadTest(_base.ORMTest):
- """Check that Events.on_load is not hit in regular attributes operations."""
+ """Check that Events.load is not hit in regular attributes operations."""
def test_basic(self):
import pickle
try:
instrumentation.register_class(A)
manager = instrumentation.manager_of_class(A)
- event.listen(manager, 'on_load', canary)
+ event.listen(manager, 'load', canary)
a = A()
p_a = pickle.dumps(a)
mapper(A, users)
mapper(B, addresses, inherits=A)
- def on_init_a(target, args, kwargs):
- canary.append(('on_init_a', target))
+ def init_a(target, args, kwargs):
+ canary.append(('init_a', target))
- def on_init_b(target, args, kwargs):
- canary.append(('on_init_b', target))
+ def init_b(target, args, kwargs):
+ canary.append(('init_b', target))
- def on_init_c(target, args, kwargs):
- canary.append(('on_init_c', target))
+ def init_c(target, args, kwargs):
+ canary.append(('init_c', target))
- def on_init_d(target, args, kwargs):
- canary.append(('on_init_d', target))
+ def init_d(target, args, kwargs):
+ canary.append(('init_d', target))
- def on_init_e(target, args, kwargs):
- canary.append(('on_init_e', target))
+ def init_e(target, args, kwargs):
+ canary.append(('init_e', target))
- event.listen(mapper, 'on_init', on_init_a)
- event.listen(Mapper, 'on_init', on_init_b)
- event.listen(class_mapper(A), 'on_init', on_init_c)
- event.listen(A, 'on_init', on_init_d)
- event.listen(A, 'on_init', on_init_e, propagate=True)
+ event.listen(mapper, 'init', init_a)
+ event.listen(Mapper, 'init', init_b)
+ event.listen(class_mapper(A), 'init', init_c)
+ event.listen(A, 'init', init_d)
+ event.listen(A, 'init', init_e, propagate=True)
a = A()
- eq_(canary, [('on_init_a', a),('on_init_b', a),
- ('on_init_c', a),('on_init_d', a),('on_init_e', a)])
+ eq_(canary, [('init_a', a),('init_b', a),
+ ('init_c', a),('init_d', a),('init_e', a)])
# test propagate flag
canary[:] = []
b = B()
- eq_(canary, [('on_init_a', b), ('on_init_b', b),('on_init_e', b)])
+ eq_(canary, [('init_a', b), ('init_b', b),('init_e', b)])
def teardown(self):
# TODO: need to get remove() functionality
return go
for meth in [
- 'on_init',
- 'on_init_failure',
- 'on_translate_row',
- 'on_create_instance',
- 'on_append_result',
- 'on_populate_instance',
- 'on_load',
- 'on_refresh',
- 'on_expire',
- 'on_before_insert',
- 'on_after_insert',
- 'on_before_update',
- 'on_after_update',
- 'on_before_delete',
- 'on_after_delete'
+ 'init',
+ 'init_failure',
+ 'translate_row',
+ 'create_instance',
+ 'append_result',
+ 'populate_instance',
+ 'load',
+ 'refresh',
+ 'expire',
+ 'before_insert',
+ 'after_insert',
+ 'before_update',
+ 'after_update',
+ 'before_delete',
+ 'after_delete'
]:
event.listen(mapper, meth, evt(meth), **kw)
return canary
sess.delete(u)
sess.flush()
eq_(canary,
- ['on_init', 'on_before_insert',
- 'on_after_insert', 'on_expire', 'on_translate_row', 'on_populate_instance',
- 'on_refresh',
- 'on_append_result', 'on_translate_row', 'on_create_instance',
- 'on_populate_instance', 'on_load', 'on_append_result',
- 'on_before_update', 'on_after_update', 'on_before_delete', 'on_after_delete'])
+ ['init', 'before_insert',
+ 'after_insert', 'expire', 'translate_row', 'populate_instance',
+ 'refresh',
+ 'append_result', 'translate_row', 'create_instance',
+ 'populate_instance', 'load', 'append_result',
+ 'before_update', 'after_update', 'before_delete', 'after_delete'])
@testing.resolve_artifact_names
def test_inheritance(self):
sess.flush()
sess.delete(am)
sess.flush()
- eq_(canary1, ['on_init', 'on_before_insert', 'on_after_insert',
- 'on_translate_row', 'on_populate_instance','on_refresh',
- 'on_append_result', 'on_translate_row', 'on_create_instance'
- , 'on_populate_instance', 'on_load', 'on_append_result',
- 'on_before_update', 'on_after_update', 'on_before_delete',
- 'on_after_delete'])
+ eq_(canary1, ['init', 'before_insert', 'after_insert',
+ 'translate_row', 'populate_instance','refresh',
+ 'append_result', 'translate_row', 'create_instance'
+ , 'populate_instance', 'load', 'append_result',
+ 'before_update', 'after_update', 'before_delete',
+ 'after_delete'])
eq_(canary2, [])
- eq_(canary3, ['on_init', 'on_before_insert', 'on_after_insert',
- 'on_translate_row', 'on_populate_instance','on_refresh',
- 'on_append_result', 'on_translate_row', 'on_create_instance'
- , 'on_populate_instance', 'on_load', 'on_append_result',
- 'on_before_update', 'on_after_update', 'on_before_delete',
- 'on_after_delete'])
+ eq_(canary3, ['init', 'before_insert', 'after_insert',
+ 'translate_row', 'populate_instance','refresh',
+ 'append_result', 'translate_row', 'create_instance'
+ , 'populate_instance', 'load', 'append_result',
+ 'before_update', 'after_update', 'before_delete',
+ 'after_delete'])
@testing.resolve_artifact_names
def test_before_after_only_collection(self):
- """on_before_update is called on parent for collection modifications,
- on_after_update is called even if no columns were updated.
+ """before_update is called on parent for collection modifications,
+ after_update is called even if no columns were updated.
"""
sess.add(k1)
sess.flush()
eq_(canary1,
- ['on_init',
- 'on_before_insert', 'on_after_insert'])
+ ['init',
+ 'before_insert', 'after_insert'])
eq_(canary2,
- ['on_init',
- 'on_before_insert', 'on_after_insert'])
+ ['init',
+ 'before_insert', 'after_insert'])
canary1[:]= []
canary2[:]= []
i1.keywords.append(k1)
sess.flush()
- eq_(canary1, ['on_before_update', 'on_after_update'])
+ eq_(canary1, ['before_update', 'after_update'])
eq_(canary2, [])
return u
mapper(User, users)
- event.listen(User, 'on_create_instance', create_instance, retval=True)
+ event.listen(User, 'create_instance', create_instance, retval=True)
sess = create_session()
u1 = User()
u1.name = 'ed'
@testing.resolve_artifact_names
def test_instrument_event(self):
canary = []
- def on_instrument_class(mapper, cls):
+ def instrument_class(mapper, cls):
canary.append(cls)
- event.listen(Mapper, 'on_instrument_class', on_instrument_class)
+ event.listen(Mapper, 'instrument_class', instrument_class)
mapper(User, users)
eq_(canary, [User])
run_inserts = None
- def on_load_tracker(self, cls, canary=None):
+ def load_tracker(self, cls, canary=None):
if canary is None:
def canary(instance):
canary.called += 1
canary.called = 0
- event.listen(cls, 'on_load', canary)
+ event.listen(cls, 'load', canary)
return canary
def test_transient_to_pending(self):
mapper(User, users)
sess = create_session()
- on_load = self.on_load_tracker(User)
+ load = self.load_tracker(User)
u = User(id=7, name='fred')
- eq_(on_load.called, 0)
+ eq_(load.called, 0)
u2 = sess.merge(u)
- eq_(on_load.called, 1)
+ eq_(load.called, 1)
assert u2 in sess
eq_(u2, User(id=7, name='fred'))
sess.flush()
'addresses': relationship(Address, backref='user',
collection_class=OrderedSet)})
mapper(Address, addresses)
- on_load = self.on_load_tracker(User)
- self.on_load_tracker(Address, on_load)
+ load = self.load_tracker(User)
+ self.load_tracker(Address, load)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
]))
- eq_(on_load.called, 0)
+ eq_(load.called, 0)
sess = create_session()
sess.merge(u)
- eq_(on_load.called, 3)
+ eq_(load.called, 3)
merged_users = [e for e in sess if isinstance(e, User)]
eq_(len(merged_users), 1)
@testing.resolve_artifact_names
def test_transient_to_persistent(self):
mapper(User, users)
- on_load = self.on_load_tracker(User)
+ load = self.load_tracker(User)
sess = create_session()
u = User(id=7, name='fred')
sess.flush()
sess.expunge_all()
- eq_(on_load.called, 0)
+ eq_(load.called, 0)
_u2 = u2 = User(id=7, name='fred jones')
- eq_(on_load.called, 0)
+ eq_(load.called, 0)
u2 = sess.merge(u2)
assert u2 is not _u2
- eq_(on_load.called, 1)
+ eq_(load.called, 1)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(), User(id=7, name='fred jones'))
- eq_(on_load.called, 2)
+ eq_(load.called, 2)
@testing.resolve_artifact_names
def test_transient_to_persistent_collection(self):
})
mapper(Address, addresses)
- on_load = self.on_load_tracker(User)
- self.on_load_tracker(Address, on_load)
+ load = self.load_tracker(User)
+ self.load_tracker(Address, load)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=1, email_address='fred1'),
sess.flush()
sess.expunge_all()
- eq_(on_load.called, 0)
+ eq_(load.called, 0)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=3, email_address='fred3'),
# 2.,3. merges Address ids 3 & 4, saves into session.
# 4.,5. loads pre-existing elements in "addresses" collection,
# marks as deleted, Address ids 1 and 2.
- eq_(on_load.called, 5)
+ eq_(load.called, 5)
eq_(u,
User(id=7, name='fred', addresses=OrderedSet([
order_by=addresses.c.id,
collection_class=OrderedSet)})
mapper(Address, addresses)
- on_load = self.on_load_tracker(User)
- self.on_load_tracker(Address, on_load)
+ load = self.load_tracker(User)
+ self.load_tracker(Address, load)
a = Address(id=1, email_address='fred1')
u = User(id=7, name='fred', addresses=OrderedSet([
u.addresses.add(Address(id=3, email_address='fred3'))
u.addresses.remove(a)
- eq_(on_load.called, 0)
+ eq_(load.called, 0)
u = sess.merge(u)
- eq_(on_load.called, 4)
+ eq_(load.called, 4)
sess.flush()
sess.expunge_all()
'addresses':relationship(mapper(Address, addresses),
cascade="all", backref="user")
})
- on_load = self.on_load_tracker(User)
- self.on_load_tracker(Address, on_load)
+ load = self.load_tracker(User)
+ self.load_tracker(Address, load)
sess = create_session()
u = User(id=7, name='fred')
u.addresses.append(a2)
u2 = sess.merge(u)
- eq_(on_load.called, 3)
+ eq_(load.called, 3)
eq_(u,
User(id=7, name='fred', addresses=[
eq_(u2, User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@bar.com')]))
- eq_(on_load.called, 6)
+ eq_(load.called, 6)
@testing.resolve_artifact_names
def test_merge_empty_attributes(self):
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses), backref='user')
})
- on_load = self.on_load_tracker(User)
- self.on_load_tracker(Address, on_load)
+ load = self.load_tracker(User)
+ self.load_tracker(Address, load)
sess = create_session()
u.name = 'fred2'
u.addresses[1].email_address = 'hoho@lalala.com'
- eq_(on_load.called, 3)
+ eq_(load.called, 3)
# new session, merge modified data into session
sess3 = create_session()
u3 = sess3.merge(u)
- eq_(on_load.called, 6)
+ eq_(load.called, 6)
# ensure local changes are pending
eq_(u3, User(id=7, name='fred2', addresses=[
eq_(u, User(id=7, name='fred2', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@lalala.com')]))
- eq_(on_load.called, 9)
+ eq_(load.called, 9)
# merge persistent object into another session
sess4 = create_session()
sess4.flush()
# no changes; therefore flush should do nothing
self.assert_sql_count(testing.db, go, 0)
- eq_(on_load.called, 12)
+ eq_(load.called, 12)
# test with "dontload" merge
sess5 = create_session()
# but also, load=False wipes out any difference in committed state,
# so no flush at all
self.assert_sql_count(testing.db, go, 0)
- eq_(on_load.called, 15)
+ eq_(load.called, 15)
sess4 = create_session()
u = sess4.merge(u, load=False)
sess4.flush()
# afafds change flushes
self.assert_sql_count(testing.db, go, 1)
- eq_(on_load.called, 18)
+ eq_(load.called, 18)
sess5 = create_session()
u2 = sess5.query(User).get(u.id)
eq_(u2.name, 'fred2')
eq_(u2.addresses[1].email_address, 'afafds')
- eq_(on_load.called, 21)
+ eq_(load.called, 21)
@testing.resolve_artifact_names
def test_no_relationship_cascade(self):
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses))})
- on_load = self.on_load_tracker(User)
- self.on_load_tracker(Address, on_load)
+ load = self.load_tracker(User)
+ self.load_tracker(Address, load)
sess = create_session()
u = User(name='fred')
sess.add(u)
sess.flush()
- eq_(on_load.called, 0)
+ eq_(load.called, 0)
sess2 = create_session()
u2 = sess2.query(User).get(u.id)
- eq_(on_load.called, 1)
+ eq_(load.called, 1)
u.addresses[1].email_address = 'addr 2 modified'
sess2.merge(u)
eq_(u2.addresses[1].email_address, 'addr 2 modified')
- eq_(on_load.called, 3)
+ eq_(load.called, 3)
sess3 = create_session()
u3 = sess3.query(User).get(u.id)
- eq_(on_load.called, 4)
+ eq_(load.called, 4)
u.name = 'also fred'
sess3.merge(u)
- eq_(on_load.called, 6)
+ eq_(load.called, 6)
eq_(u3.name, 'also fred')
@testing.resolve_artifact_names
mapper(Order, orders, properties={
'items':relationship(mapper(Item, items), secondary=order_items)})
- on_load = self.on_load_tracker(Order)
- self.on_load_tracker(Item, on_load)
+ load = self.load_tracker(Order)
+ self.load_tracker(Item, load)
sess = create_session()
sess.add(o)
sess.flush()
- eq_(on_load.called, 0)
+ eq_(load.called, 0)
sess2 = create_session()
o2 = sess2.query(Order).get(o.id)
- eq_(on_load.called, 1)
+ eq_(load.called, 1)
o.items[1].description = 'item 2 modified'
sess2.merge(o)
eq_(o2.items[1].description, 'item 2 modified')
- eq_(on_load.called, 3)
+ eq_(load.called, 3)
sess3 = create_session()
o3 = sess3.query(Order).get(o.id)
- eq_( on_load.called, 4)
+ eq_( load.called, 4)
o.description = 'desc modified'
sess3.merge(o)
- eq_(on_load.called, 6)
+ eq_(load.called, 6)
eq_(o3.description, 'desc modified')
@testing.resolve_artifact_names
mapper(User, users, properties={
'address':relationship(mapper(Address, addresses),uselist = False)
})
- on_load = self.on_load_tracker(User)
- self.on_load_tracker(Address, on_load)
+ load = self.load_tracker(User)
+ self.load_tracker(Address, load)
sess = create_session()
u = User()
sess.add(u)
sess.flush()
- eq_(on_load.called, 0)
+ eq_(load.called, 0)
sess2 = create_session()
u2 = sess2.query(User).get(7)
- eq_(on_load.called, 1)
+ eq_(load.called, 1)
u2.name = 'fred2'
u2.address.email_address = 'hoho@lalala.com'
- eq_(on_load.called, 2)
+ eq_(load.called, 2)
u3 = sess.merge(u2)
- eq_(on_load.called, 2)
+ eq_(load.called, 2)
assert u3 is u
@testing.resolve_artifact_names
def my_listener(*arg, **kw):
pass
- event.listen(Session, 'on_before_flush', my_listener)
+ event.listen(Session, 'before_flush', my_listener)
s = Session()
- assert my_listener in s.dispatch.on_before_flush
+ assert my_listener in s.dispatch.before_flush
def test_sessionmaker_listen(self):
"""test that listen can be applied to individual scoped_session() classes."""
S1 = sessionmaker()
S2 = sessionmaker()
- event.listen(Session, 'on_before_flush', my_listener_one)
- event.listen(S1, 'on_before_flush', my_listener_two)
+ event.listen(Session, 'before_flush', my_listener_one)
+ event.listen(S1, 'before_flush', my_listener_two)
s1 = S1()
- assert my_listener_one in s1.dispatch.on_before_flush
- assert my_listener_two in s1.dispatch.on_before_flush
+ assert my_listener_one in s1.dispatch.before_flush
+ assert my_listener_two in s1.dispatch.before_flush
s2 = S2()
- assert my_listener_one in s2.dispatch.on_before_flush
- assert my_listener_two not in s2.dispatch.on_before_flush
+ assert my_listener_one in s2.dispatch.before_flush
+ assert my_listener_two not in s2.dispatch.before_flush
def test_scoped_session_invalid_callable(self):
from sqlalchemy.orm import scoped_session
sa.exc.ArgumentError,
"Session event listen on a ScopedSession "
"requries that its creation callable is a Session subclass.",
- event.listen, scope, "on_before_flush", my_listener_one
+ event.listen, scope, "before_flush", my_listener_one
)
def test_scoped_session_invalid_class(self):
sa.exc.ArgumentError,
"Session event listen on a ScopedSession "
"requries that its creation callable is a Session subclass.",
- event.listen, scope, "on_before_flush", my_listener_one
+ event.listen, scope, "before_flush", my_listener_one
)
def test_scoped_session_listen(self):
pass
scope = scoped_session(sessionmaker())
- event.listen(scope, "on_before_flush", my_listener_one)
+ event.listen(scope, "before_flush", my_listener_one)
- assert my_listener_one in scope().dispatch.on_before_flush
+ assert my_listener_one in scope().dispatch.before_flush
def _listener_fixture(self, **kw):
canary = []
sess = Session(**kw)
for evt in [
- 'on_before_commit',
- 'on_after_commit',
- 'on_after_rollback',
- 'on_before_flush',
- 'on_after_flush',
- 'on_after_flush_postexec',
- 'on_after_begin',
- 'on_after_attach',
- 'on_after_bulk_update',
- 'on_after_bulk_delete'
+ 'before_commit',
+ 'after_commit',
+ 'after_rollback',
+ 'before_flush',
+ 'after_flush',
+ 'after_flush_postexec',
+ 'after_begin',
+ 'after_attach',
+ 'after_bulk_update',
+ 'after_bulk_delete'
]:
event.listen(sess, evt, listener(evt))
sess.flush()
eq_(
canary,
- [ 'on_after_attach', 'on_before_flush', 'on_after_begin',
- 'on_after_flush', 'on_before_commit', 'on_after_commit',
- 'on_after_flush_postexec', ]
+ [ 'after_attach', 'before_flush', 'after_begin',
+ 'after_flush', 'before_commit', 'after_commit',
+ 'after_flush_postexec', ]
)
@testing.resolve_artifact_names
u = User(name='u1')
sess.add(u)
sess.flush()
- eq_(canary, ['on_after_attach', 'on_before_flush', 'on_after_begin',
- 'on_after_flush', 'on_after_flush_postexec'])
+ eq_(canary, ['after_attach', 'before_flush', 'after_begin',
+ 'after_flush', 'after_flush_postexec'])
@testing.resolve_artifact_names
def test_flush_in_commit_hook(self):
u.name = 'ed'
sess.commit()
- eq_(canary, ['on_before_commit', 'on_before_flush', 'on_after_flush',
- 'on_after_flush_postexec', 'on_after_commit'])
+ eq_(canary, ['before_commit', 'before_flush', 'after_flush',
+ 'after_flush_postexec', 'after_commit'])
def test_standalone_on_commit_hook(self):
sess, canary = self._listener_fixture()
sess.commit()
- eq_(canary, ['on_before_commit', 'on_after_commit'])
+ eq_(canary, ['before_commit', 'after_commit'])
@testing.resolve_artifact_names
def test_on_bulk_update_hook(self):
sess, canary = self._listener_fixture()
mapper(User, users)
sess.query(User).update({'name': 'foo'})
- eq_(canary, ['on_after_begin', 'on_after_bulk_update'])
+ eq_(canary, ['after_begin', 'after_bulk_update'])
@testing.resolve_artifact_names
def test_on_bulk_delete_hook(self):
sess, canary = self._listener_fixture()
mapper(User, users)
sess.query(User).delete()
- eq_(canary, ['on_after_begin', 'on_after_bulk_delete'])
+ eq_(canary, ['after_begin', 'after_bulk_delete'])
def test_connection_emits_after_begin(self):
sess, canary = self._listener_fixture(bind=testing.db)
conn = sess.connection()
- eq_(canary, ['on_after_begin'])
+ eq_(canary, ['after_begin'])
@testing.resolve_artifact_names
def test_reentrant_flush(self):
session.flush()
sess = Session()
- event.listen(sess, 'on_before_flush', before_flush)
+ event.listen(sess, 'before_flush', before_flush)
sess.add(User(name='foo'))
assert_raises_message(sa.exc.InvalidRequestError,
'already flushing', sess.flush)
session.delete(x)
sess = Session()
- event.listen(sess, 'on_before_flush', before_flush)
+ event.listen(sess, 'before_flush', before_flush)
u = User(name='u1')
sess.add(u)
obj.name += " modified"
sess = Session(autoflush=True)
- event.listen(sess, 'on_before_flush', before_flush)
+ event.listen(sess, 'before_flush', before_flush)
u = User(name='u1')
sess.add(u)
mapper(User, users, batch=False)
evt = Events()
- event.listen(User, "on_before_insert", evt.before_insert)
- event.listen(User, "on_after_insert", evt.after_insert)
+ event.listen(User, "before_insert", evt.before_insert)
+ event.listen(User, "after_insert", evt.after_insert)
u1 = User(name='user1')
u2 = User(name='user2')
m = mapper(User, users)
evt = Events()
- event.listen(User, "on_before_insert", evt.before_insert)
- event.listen(User, "on_after_insert", evt.after_insert)
+ event.listen(User, "before_insert", evt.before_insert)
+ event.listen(User, "after_insert", evt.after_insert)
u1 = User(name='user1')
u2 = User(name='user2')