after a dispose() occurs.
- Engine gains an "execution_options" argument and
update_execution_options() method, which will apply to
all connections generated by this engine.
- Added more aggressive caching to the mapper's usage of
UPDATE, INSERT, and DELETE expressions. Assuming the
statement has no per-object SQL expressions attached,
the expression objects are cached by the mapper after
the first create, and their compiled form is stored
persistently in a cache dictionary for the duration of
the related Engine.
- change #3 required change #1 so that we could test
a set of mappers operating over the course of many engines without
memory usage increase.
full PK happened to be expired and then was asked
to refresh. [ticket:1797]
+ - Added more aggressive caching to the mapper's usage of
+ UPDATE, INSERT, and DELETE expressions. Assuming the
+ statement has no per-object SQL expressions attached,
+ the expression objects are cached by the mapper after
+ the first create, and their compiled form is stored
+ persistently in a cache dictionary for the duration of
+ the related Engine.
+
- sql
- expr.in_() now accepts a text() construct as the argument.
Grouping parenthesis are added automatically, i.e. usage
- engines
- Fixed building the C extensions on Python 2.4. [ticket:1781]
+ - Pool classes will reuse the same "pool_logging_name" setting
+ after a dispose() occurs.
+
+ - Engine gains an "execution_options" argument and
+ update_execution_options() method, which will apply to
+ all connections generated by this engine.
+
- mysql
- func.sysdate() emits "SYSDATE()", i.e. with the ending
parenthesis, on MySQL. [ticket:1794]
translations, both by engine-wide unicode conversion as well as
the ``Unicode`` type object.
+ :param execution_options: Dictionary execution options which will
+ be applied to all connections. See
+ :meth:`~sqlalchemy.engine.base.Connection.execution_options`
+
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
Provides execution support for string-based SQL statements as well
as ClauseElement, Compiled and DefaultGenerator objects. Provides
- a begin method to return Transaction objects.
+ a :meth:`begin` method to return Transaction objects.
The Connection object is **not** thread-safe.
single: thread safety; Connection
"""
- _execution_options = util.frozendict()
def __init__(self, engine, connection=None, close_with_result=False,
_branch=False, _execution_options=None):
self._echo = self.engine._should_log_info()
if _execution_options:
self._execution_options =\
- self._execution_options.union(_execution_options)
+ engine._execution_options.union(_execution_options)
+ else:
+ self._execution_options = engine._execution_options
def _branch(self):
"""Return a new Connection which references this Connection's
"""
+ _execution_options = util.frozendict()
+
def __init__(self, pool, dialect, url,
- logging_name=None, echo=None, proxy=None):
+ logging_name=None, echo=None, proxy=None,
+ execution_options=None
+ ):
self.pool = pool
self.url = url
self.dialect = dialect
self.Connection = _proxy_connection_cls(Connection, proxy)
else:
self.Connection = Connection
+ if execution_options:
+ self.update_execution_options(**execution_options)
+
+ def update_execution_options(self, **opt):
+ """update the execution_options dictionary of this :class:`Engine`.
+
+ For details on execution_options, see
+ :meth:`Connection.execution_options` as well as
+ :meth:`sqlalchemy.sql.expression.Executable.execution_options`.
+
+
+ """
+ self._execution_options = \
+ self._execution_options.union(opt)
@property
def name(self):
self._connections = util.threading.local()
proxy = kwargs.get('proxy')
if proxy:
- self.TLConnection = base._proxy_connection_cls(TLConnection, proxy)
+ self.TLConnection = base._proxy_connection_cls(
+ TLConnection, proxy)
else:
self.TLConnection = TLConnection
self._clause_adapter = None
self._requires_row_aliasing = False
self._inherits_equated_pairs = None
-
+ self._memoized_values = {}
+
if allow_null_pks:
util.warn_deprecated('the allow_null_pks option to Mapper() is '
'deprecated. It is now allow_partial_pks=False|True, '
for mapper in self.iterate_to_root():
util.reset_memoized(mapper, '_equivalent_columns')
util.reset_memoized(mapper, '_sorted_tables')
+ util.reset_memoized(mapper, '_compiled_cache')
if self.order_by is False and not self.concrete and self.inherits.order_by is not False:
self.order_by = self.inherits.order_by
self.mapped_table._reset_exported()
mc = self.mapped_table.corresponding_column(c)
if mc is None:
- raise sa_exc.ArgumentError("When configuring property '%s' on %s, "
- "column '%s' is not represented in the mapper's table. "
- "Use the `column_property()` function to force this column "
- "to be mapped as a read-only attribute." % (key, self, c))
+ raise sa_exc.ArgumentError(
+ "When configuring property '%s' on %s, "
+ "column '%s' is not represented in the mapper's table. "
+ "Use the `column_property()` function to force this column "
+ "to be mapped as a read-only attribute." % (key, self, c))
mapped_column.append(mc)
prop = ColumnProperty(*mapped_column)
else:
- raise sa_exc.ArgumentError("WARNING: when configuring property '%s' on %s, column '%s' "
+ raise sa_exc.ArgumentError(
+ "WARNING: when configuring property '%s' on %s, column '%s' "
"conflicts with property '%r'. "
"To resolve this, map the column to the class under a different "
"name in the 'properties' dictionary. Or, to remove all awareness "
return
if leftcol.table not in tables:
- leftval = self._get_committed_state_attr_by_column(state, state.dict, leftcol, passive=True)
+ leftval = self._get_committed_state_attr_by_column(
+ state, state.dict,
+ leftcol, passive=True)
if leftval is attributes.PASSIVE_NO_RESULT:
raise ColumnsNotAvailable()
binary.left = sql.bindparam(None, leftval, type_=binary.right.type)
elif rightcol.table not in tables:
- rightval = self._get_committed_state_attr_by_column(state, state.dict, rightcol, passive=True)
+ rightval = self._get_committed_state_attr_by_column(
+ state, state.dict,
+ rightcol, passive=True)
if rightval is attributes.PASSIVE_NO_RESULT:
raise ColumnsNotAvailable()
binary.right = sql.bindparam(None, rightval, type_=binary.right.type)
if mapper.local_table in tables:
start = True
if start and not mapper.single:
- allconds.append(visitors.cloned_traverse(mapper.inherit_condition, {}, {'binary':visit_binary}))
+ allconds.append(visitors.cloned_traverse(
+ mapper.inherit_condition,
+ {},
+ {'binary':visit_binary}
+ )
+ )
except ColumnsNotAvailable:
return None
except StopIteration:
visitables.pop()
+ @util.memoized_property
+ def _compiled_cache(self):
+ return weakref.WeakKeyDictionary()
+
@util.memoized_property
def _sorted_tables(self):
table_to_mapper = {}
uow.dependencies.add((action, delete_all))
yield action
-
+
+ def _memo(self, key, callable_):
+ if key in self._memoized_values:
+ return self._memoized_values[key]
+ else:
+ self._memoized_values[key] = value = callable_()
+ return value
+
def _save_obj(self, states, uowtransaction, postupdate=False,
post_update_cols=None, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects.
connection_callable = None
tups = []
+
for state in _sort_states(states):
- conn = connection_callable and \
- connection_callable(self, state.obj()) or \
- connection
+ if connection_callable:
+ conn = connection_callable(self, state.obj())
+ else:
+ conn = connection
has_identity = state.has_identity
mapper = _state_mapper(state)
table_to_mapper = self._sorted_tables
+ compiled_cache = self._compiled_cache
+
for table in table_to_mapper:
insert = []
update = []
else:
hasdata = True
elif col in pks:
- params[col._label] = mapper._get_state_attr_by_column(state, state_dict, col)
+ params[col._label] = mapper._get_state_attr_by_column(
+ state, state_dict, col)
if hasdata:
update.append((state, state_dict, params, mapper,
connection, value_params))
if update:
- mapper = table_to_mapper[table]
- clause = sql.and_()
- for col in mapper._pks_by_table[table]:
- clause.clauses.append(
- col ==
- sql.bindparam(col._label, type_=col.type)
- )
+ mapper = table_to_mapper[table]
needs_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
- if needs_version_id:
- clause.clauses.append(mapper.version_id_col ==\
- sql.bindparam(mapper.version_id_col._label, type_=col.type))
-
- statement = table.update(clause)
+ def update_stmt():
+ clause = sql.and_()
+
+ for col in mapper._pks_by_table[table]:
+ clause.clauses.append(
+ col == sql.bindparam(col._label,
+ type_=col.type)
+ )
- if len(update) > 1:
- compiled_cache = {}
- else:
- compiled_cache = None
+ if needs_version_id:
+ clause.clauses.append(mapper.version_id_col ==\
+ sql.bindparam(mapper.version_id_col._label,
+ type_=col.type))
+ return table.update(clause)
+
+ statement = self._memo(('update', table), update_stmt)
+
rows = 0
- for state, state_dict, params, mapper, connection, value_params in update:
- if not value_params and compiled_cache is not None:
- c = connection.\
- execution_options(
- compiled_cache=compiled_cache).\
- execute(statement, params)
- else:
+ for state, state_dict, params, mapper, \
+ connection, value_params in update:
+
+ if value_params:
c = connection.execute(statement.values(value_params), params)
+ else:
+ c = connection.\
+ execution_options(compiled_cache=\
+ compiled_cache.setdefault(
+ connection.engine, {})
+ ).execute(statement, params)
mapper._postfetch(uowtransaction, table,
state, state_dict, c,
stacklevel=12)
if insert:
- statement = table.insert()
- if len(insert) > 1:
- compiled_cache = {}
- else:
- compiled_cache = None
-
- for state, state_dict, params, mapper, connection, value_params in insert:
- if not value_params and compiled_cache is not None:
- c = connection.\
- execution_options(
- compiled_cache=compiled_cache).\
- execute(statement, params)
- else:
+ statement = self._memo(('insert', table), table.insert)
+
+ for state, state_dict, params, mapper, \
+ connection, value_params in insert:
+
+ if value_params:
c = connection.execute(statement.values(value_params), params)
+ else:
+ c = connection.\
+ execution_options(compiled_cache=\
+ compiled_cache.setdefault(
+ connection.engine, {})
+ ).execute(statement, params)
+
primary_key = c.inserted_primary_key
if primary_key is not None:
tups = []
for state in _sort_states(states):
mapper = _state_mapper(state)
+
+ if connection_callable:
+ conn = connection_callable(self, state.obj())
+ else:
+ conn = connection
- conn = connection_callable and \
- connection_callable(self, state.obj()) or \
- connection
-
if 'before_delete' in mapper.extension:
mapper.extension.before_delete(mapper, conn, state.obj())
conn))
table_to_mapper = self._sorted_tables
-
+
+ compiled_cache = self._compiled_cache
+
for table in reversed(table_to_mapper.keys()):
delete = util.defaultdict(list)
for state, state_dict, mapper, has_identity, connection in tups:
if mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col):
params[mapper.version_id_col.key] = \
- mapper._get_state_attr_by_column(state, state_dict, mapper.version_id_col)
+ mapper._get_state_attr_by_column(state, state_dict,
+ mapper.version_id_col)
- for connection, del_objects in delete.iteritems():
- mapper = table_to_mapper[table]
+ mapper = table_to_mapper[table]
+ need_version_id = mapper.version_id_col is not None and \
+ table.c.contains_column(mapper.version_id_col)
+
+ def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col.key, type_=col.type))
- need_version_id = mapper.version_id_col is not None and \
- table.c.contains_column(mapper.version_id_col)
-
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
)
)
- statement = table.delete(clause)
+ return table.delete(clause)
+
+ for connection, del_objects in delete.iteritems():
+ statement = self._memo(('delete', table), delete_stmt)
rows = -1
+ connection = connection.execution_options(
+ compiled_cache=compiled_cache.setdefault(
+ connection.engine,
+ {}))
+
if need_version_id and \
not connection.dialect.supports_sane_multi_rowcount:
# TODO: need test coverage for this [ticket:1761]
method is used.
"""
- _execution_options = self._execution_options.copy()
- for key, value in kwargs.items():
- _execution_options[key] = value
- self._execution_options = _execution_options
+ self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
Options:
- autocommit
- Defaults to ``False``. When ``True``, the ``Session`` does not keep a
- persistent transaction running, and will acquire connections from the
- engine on an as-needed basis, returning them immediately after their
- use. Flushes will begin and commit (or possibly rollback) their own
- transaction if no transaction is present. When using this mode, the
- `session.begin()` method may be used to begin a transaction explicitly.
+ :param autocommit: Defaults to ``False``. When ``True``, the ``Session``
+ does not keep a persistent transaction running, and will acquire
+ connections from the engine on an as-needed basis, returning them
+ immediately after their use. Flushes will begin and commit (or possibly
+ rollback) their own transaction if no transaction is present. When using
+ this mode, the `session.begin()` method may be used to begin a
+ transaction explicitly.
Leaving it on its default value of ``False`` means that the ``Session``
will acquire a connection and begin a transaction the first time it is
by any of these methods, the ``Session`` is ready for the next usage,
which will again acquire and maintain a new connection/transaction.
- autoflush
- When ``True``, all query operations will issue a ``flush()`` call to
- this ``Session`` before proceeding. This is a convenience feature so
- that ``flush()`` need not be called repeatedly in order for database
- queries to retrieve results. It's typical that ``autoflush`` is used in
- conjunction with ``autocommit=False``. In this scenario, explicit calls
- to ``flush()`` are rarely needed; you usually only need to call
- ``commit()`` (which flushes) to finalize changes.
-
- bind
- An optional ``Engine`` or ``Connection`` to which this ``Session``
- should be bound. When specified, all SQL operations performed by this
- session will execute via this connectable.
-
- binds
- An optional dictionary, which contains more granular "bind" information
- than the ``bind`` parameter provides. This dictionary can map individual
- ``Table`` instances as well as ``Mapper`` instances to individual
- ``Engine`` or ``Connection`` objects. Operations which proceed relative
- to a particular ``Mapper`` will consult this dictionary for the direct
- ``Mapper`` instance as well as the mapper's ``mapped_table`` attribute
- in order to locate an connectable to use. The full resolution is
- described in the ``get_bind()`` method of ``Session``. Usage looks
- like::
+ :param autoflush: When ``True``, all query operations will issue a
+ ``flush()`` call to this ``Session`` before proceeding. This is a
+ convenience feature so that ``flush()`` need not be called repeatedly
+ in order for database queries to retrieve results. It's typical that
+ ``autoflush`` is used in conjunction with ``autocommit=False``. In this
+ scenario, explicit calls to ``flush()`` are rarely needed; you usually
+ only need to call ``commit()`` (which flushes) to finalize changes.
+
+ :param bind: An optional ``Engine`` or ``Connection`` to which this
+ ``Session`` should be bound. When specified, all SQL operations
+ performed by this session will execute via this connectable.
+
+ :param binds: An optional dictionary which contains more granular "bind"
+ information than the ``bind`` parameter provides. This dictionary can
+ map individual ``Table`` instances as well as ``Mapper`` instances to
+ individual ``Engine`` or ``Connection`` objects. Operations which
+ proceed relative to a particular ``Mapper`` will consult this
+ dictionary for the direct ``Mapper`` instance as well as the mapper's
+ ``mapped_table`` attribute in order to locate an connectable to use.
+ The full resolution is described in the ``get_bind()`` method of
+ ``Session``. Usage looks like::
sess = Session(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
Also see the ``bind_mapper()`` and ``bind_table()`` methods.
- \class_
- Specify an alternate class other than ``sqlalchemy.orm.session.Session``
- which should be used by the returned class. This is the only argument
- that is local to the ``sessionmaker()`` function, and is not sent
- directly to the constructor for ``Session``.
-
- _enable_transaction_accounting
- Defaults to ``True``. A legacy-only flag which when ``False``
- disables *all* 0.5-style object accounting on transaction boundaries,
- including auto-expiry of instances on rollback and commit, maintenance of
- the "new" and "deleted" lists upon rollback, and autoflush
- of pending changes upon begin(), all of which are interdependent.
-
- expire_on_commit
- Defaults to ``True``. When ``True``, all instances will be fully expired after
- each ``commit()``, so that all attribute/object access subsequent to a completed
- transaction will load from the most recent database state.
-
- extension
- An optional :class:`~sqlalchemy.orm.session.SessionExtension` instance, or
- a list of such instances, which
- will receive pre- and post- commit and flush events, as well as a
- post-rollback event. User- defined code may be placed within these
- hooks using a user-defined subclass of ``SessionExtension``.
-
- query_cls
- Class which should be used to create new Query objects, as returned
- by the ``query()`` method. Defaults to :class:`~sqlalchemy.orm.query.Query`.
-
- twophase
- When ``True``, all transactions will be started using
- :mod:`~sqlalchemy.engine_TwoPhaseTransaction`. During a ``commit()``, after
- ``flush()`` has been issued for all attached databases, the
- ``prepare()`` method on each database's ``TwoPhaseTransaction`` will be
- called. This allows each database to roll back the entire transaction,
- before each transaction is committed.
-
- weak_identity_map
- When set to the default value of ``True``, a weak-referencing map is
- used; instances which are not externally referenced will be garbage
- collected immediately. For dereferenced instances which have pending
- changes present, the attribute management system will create a temporary
- strong-reference to the object which lasts until the changes are flushed
- to the database, at which point it's again dereferenced. Alternatively,
- when using the value ``False``, the identity map uses a regular Python
- dictionary to store instances. The session will maintain all instances
- present until they are removed using expunge(), clear(), or purge().
+ :param \class_: Specify an alternate class other than
+ ``sqlalchemy.orm.session.Session`` which should be used by the returned
+ class. This is the only argument that is local to the
+ ``sessionmaker()`` function, and is not sent directly to the
+ constructor for ``Session``.
+
+ :param _enable_transaction_accounting: Defaults to ``True``. A
+ legacy-only flag which when ``False`` disables *all* 0.5-style object
+ accounting on transaction boundaries, including auto-expiry of
+ instances on rollback and commit, maintenance of the "new" and
+ "deleted" lists upon rollback, and autoflush of pending changes upon
+ begin(), all of which are interdependent.
+
+ :param expire_on_commit: Defaults to ``True``. When ``True``, all
+ instances will be fully expired after each ``commit()``, so that all
+ attribute/object access subsequent to a completed transaction will load
+ from the most recent database state.
+
+ :param extension: An optional
+ :class:`~sqlalchemy.orm.session.SessionExtension` instance, or a list
+ of such instances, which will receive pre- and post- commit and flush
+ events, as well as a post-rollback event. User- defined code may be
+ placed within these hooks using a user-defined subclass of
+ ``SessionExtension``.
+
+ :param query_cls: Class which should be used to create new Query objects,
+ as returned by the ``query()`` method. Defaults to
+ :class:`~sqlalchemy.orm.query.Query`.
+
+ :param twophase: When ``True``, all transactions will be started using
+ :mod:`~sqlalchemy.engine_TwoPhaseTransaction`. During a ``commit()``,
+ after ``flush()`` has been issued for all attached databases, the
+ ``prepare()`` method on each database's ``TwoPhaseTransaction`` will
+ be called. This allows each database to roll back the entire
+ transaction, before each transaction is committed.
+
+ :param weak_identity_map: When set to the default value of ``True``, a
+ weak-referencing map is used; instances which are not externally
+ referenced will be garbage collected immediately. For dereferenced
+ instances which have pending changes present, the attribute management
+ system will create a temporary strong-reference to the object which
+ lasts until the changes are flushed to the database, at which point
+ it's again dereferenced. Alternatively, when using the value ``False``,
+ the identity map uses a regular Python dictionary to store instances.
+ The session will maintain all instances present until they are removed
+ using expunge(), clear(), or purge().
"""
kwargs['bind'] = bind
public_methods = (
'__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested',
'close', 'commit', 'connection', 'delete', 'execute', 'expire',
- 'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind', 'is_modified',
+ 'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind',
+ 'is_modified',
'merge', 'query', 'refresh', 'rollback',
'scalar')
-
+
+
def __init__(self, bind=None, autoflush=True, expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False, twophase=False,
- weak_identity_map=True, binds=None, extension=None, query_cls=query.Query):
+ weak_identity_map=True, binds=None, extension=None,
+ query_cls=query.Query):
"""Construct a new Session.
Arguments to ``Session`` are described using the
self.extensions = util.to_list(extension) or []
self._query_cls = query_cls
self._mapper_flush_opts = {}
-
if binds is not None:
for mapperortable, bind in binds.iteritems():
if isinstance(mapperortable, (type, Mapper)):
"""
if logging_name:
- self.logging_name = logging_name
+ self.logging_name = self._orig_logging_name = logging_name
+ else:
+ self._orig_logging_name = None
+
self.logger = log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
+ logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
listeners=self.listeners)
return QueuePool(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow, timeout=self._timeout,
recycle=self._recycle, echo=self.echo,
+ logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal, listeners=self.listeners)
def do_return_conn(self, conn):
return NullPool(self._creator,
recycle=self._recycle,
echo=self.echo,
+ logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
listeners=self.listeners)
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
+ logging_name=self._orig_logging_name,
listeners=self.listeners)
def create_connection(self):
def recreate(self):
self.logger.info("Pool recreating")
return AssertionPool(self._creator, echo=self.echo,
+ logging_name=self._orig_logging_name,
listeners=self.listeners)
def do_get(self):
from sqlalchemy.test.testing import eq_
-from sqlalchemy.orm import mapper, relationship, create_session, clear_mappers, sessionmaker
+from sqlalchemy.orm import mapper, relationship, create_session, clear_mappers, \
+ sessionmaker, class_mapper
from sqlalchemy.orm.mapper import _mapper_registry
from sqlalchemy.orm.session import _sessions
from sqlalchemy.util import jython
import operator
-from sqlalchemy.test import testing
-from sqlalchemy import MetaData, Integer, String, ForeignKey, PickleType
+from sqlalchemy.test import testing, engines
+from sqlalchemy import MetaData, Integer, String, ForeignKey, PickleType, create_engine
from sqlalchemy.test.schema import Table, Column
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.test.util import gc_collect
import gc
+import weakref
from test.orm import _base
if jython:
def profile_memory(func):
# run the test 50 times. if length of gc.get_objects()
# keeps growing, assert false
+
def profile(*args):
gc_collect()
samples = [0 for x in range(0, 50)]
func(*args)
gc_collect()
samples[x] = len(gc.get_objects())
+
print "sample gc sizes:", samples
assert len(_sessions) == 0
del m1, m2, m3
assert_no_mappers()
+ @testing.crashes('sqlite', ':memory: connection not suitable here')
+ def test_orm_many_engines(self):
+ metadata = MetaData(testing.db)
+
+ table1 = Table("mytable", metadata,
+ Column('col1', Integer, primary_key=True, test_needs_autoincrement=True),
+ Column('col2', String(30)))
+
+ table2 = Table("mytable2", metadata,
+ Column('col1', Integer, primary_key=True, test_needs_autoincrement=True),
+ Column('col2', String(30)),
+ Column('col3', Integer, ForeignKey("mytable.col1")))
+
+ metadata.create_all()
+
+ m1 = mapper(A, table1, properties={
+ "bs":relationship(B, cascade="all, delete", order_by=table2.c.col1)},
+ order_by=table1.c.col1)
+ m2 = mapper(B, table2)
+
+ m3 = mapper(A, table1, non_primary=True)
+
+ @profile_memory
+ def go():
+ engine = engines.testing_engine(options={'logging_name':'FOO', 'pool_logging_name':'BAR'})
+ sess = create_session(bind=engine)
+
+ a1 = A(col2="a1")
+ a2 = A(col2="a2")
+ a3 = A(col2="a3")
+ a1.bs.append(B(col2="b1"))
+ a1.bs.append(B(col2="b2"))
+ a3.bs.append(B(col2="b3"))
+ for x in [a1,a2,a3]:
+ sess.add(x)
+ sess.flush()
+ sess.expunge_all()
+
+ alist = sess.query(A).all()
+ eq_(
+ [
+ A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
+ A(col2="a2", bs=[]),
+ A(col2="a3", bs=[B(col2="b3")])
+ ],
+ alist)
+
+ for a in alist:
+ sess.delete(a)
+ sess.flush()
+ sess.close()
+ engine.dispose()
+ go()
+
+ metadata.drop_all()
+ del m1, m2, m3
+ assert_no_mappers()
+
def test_mapper_reset(self):
metadata = MetaData(testing.db)
def test_profile_1a_populate(self):
self.test_baseline_1a_populate()
- @profiling.function_call_count(848)
+ @profiling.function_call_count(640)
def test_profile_2_insert(self):
self.test_baseline_2_insert()
eq_(testing.db.execute(users_autoinc.select()).fetchall(), [
(1, None)
])
+
+ def test_engine_level_options(self):
+ eng = engines.testing_engine(options={
+ 'execution_options':{'foo':'bar'}
+ })
+ conn = eng.contextual_connect()
+ eq_(conn._execution_options['foo'], 'bar')
+ eq_(conn.execution_options(bat='hoho')._execution_options['foo'], 'bar')
+ eq_(conn.execution_options(bat='hoho')._execution_options['bat'], 'hoho')
+ eq_(conn.execution_options(foo='hoho')._execution_options['foo'], 'hoho')
+
+ eng.update_execution_options(foo='hoho')
+ conn = eng.contextual_connect()
+ eq_(conn._execution_options['foo'], 'hoho')
+
class CompiledCacheTest(TestBase):
@classmethod
}
eng = engines.testing_engine(options=options)
self._test_logger(eng, "myenginename", "mypoolname")
+
+ eng.dispose()
+ self._test_logger(eng, "myenginename", "mypoolname")
+
def test_unnamed_logger(self):
eng = engines.testing_engine(options={'echo':'debug', 'echo_pool':'debug'})