]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
- Pool classes will reuse the same "pool_logging_name" setting
authorMike Bayer <mike_mp@zzzcomputing.com>
Mon, 31 May 2010 00:24:08 +0000 (20:24 -0400)
committerMike Bayer <mike_mp@zzzcomputing.com>
Mon, 31 May 2010 00:24:08 +0000 (20:24 -0400)
after a dispose() occurs.

- Engine gains an "execution_options" argument and
update_execution_options() method, which will apply to
all connections generated by this engine.

- Added more aggressive caching to the mapper's usage of
UPDATE, INSERT, and DELETE expressions.  Assuming the
statement has no per-object SQL expressions attached,
the expression objects are cached by the mapper after
the first create, and their compiled form is stored
persistently in a cache dictionary for the duration of
the related Engine.

- change #3 required change #1 so that we could test
a set of mappers operating over the course of many engines without
memory usage increase.

CHANGES
lib/sqlalchemy/engine/__init__.py
lib/sqlalchemy/engine/base.py
lib/sqlalchemy/engine/threadlocal.py
lib/sqlalchemy/orm/mapper.py
lib/sqlalchemy/orm/query.py
lib/sqlalchemy/orm/session.py
lib/sqlalchemy/pool.py
test/aaa_profiling/test_memusage.py
test/aaa_profiling/test_zoomark_orm.py
test/engine/test_execute.py

diff --git a/CHANGES b/CHANGES
index 18691e0376b29fd9cfd8e713d2407bc9de62c537..ff379d197320e76033f501314a420fc4ac0e4967 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -24,6 +24,14 @@ CHANGES
     full PK happened to be expired and then was asked
     to refresh. [ticket:1797]
 
+  - Added more aggressive caching to the mapper's usage of
+    UPDATE, INSERT, and DELETE expressions.  Assuming the 
+    statement has no per-object SQL expressions attached,
+    the expression objects are cached by the mapper after 
+    the first create, and their compiled form is stored
+    persistently in a cache dictionary for the duration of
+    the related Engine.
+    
 - sql
   - expr.in_() now accepts a text() construct as the argument.
     Grouping parenthesis are added automatically, i.e. usage
@@ -66,6 +74,13 @@ CHANGES
 - engines
   - Fixed building the C extensions on Python 2.4. [ticket:1781]
 
+  - Pool classes will reuse the same "pool_logging_name" setting
+    after a dispose() occurs.
+    
+  - Engine gains an "execution_options" argument and 
+    update_execution_options() method, which will apply to 
+    all connections generated by this engine.
+    
 - mysql
   - func.sysdate() emits "SYSDATE()", i.e. with the ending
     parenthesis, on MySQL.  [ticket:1794]
index 9b3dbedd8b1e90befdb3784d9255ea476b6e20fc..18b25fbaa3f81d72972e29cf591df33e98c54885 100644 (file)
@@ -164,6 +164,10 @@ def create_engine(*args, **kwargs):
         translations, both by engine-wide unicode conversion as well as
         the ``Unicode`` type object.
 
+    :param execution_options: Dictionary execution options which will
+        be applied to all connections.  See
+        :meth:`~sqlalchemy.engine.base.Connection.execution_options`
+        
     :param label_length=None: optional integer value which limits
         the size of dynamically generated column labels to that many
         characters. If less than 6, labels are generated as
index d39880cbfe3d9f91fbb0bb64e693815b5d487a5c..a02cb81a07d36912bec6558b99f5743bf43617b3 100644 (file)
@@ -799,7 +799,7 @@ class Connection(Connectable):
 
     Provides execution support for string-based SQL statements as well
     as ClauseElement, Compiled and DefaultGenerator objects.  Provides
-    a begin method to return Transaction objects.
+    a :meth:`begin` method to return Transaction objects.
 
     The Connection object is **not** thread-safe.
 
@@ -807,7 +807,6 @@ class Connection(Connectable):
       single: thread safety; Connection
       
     """
-    _execution_options = util.frozendict()
     
     def __init__(self, engine, connection=None, close_with_result=False,
                  _branch=False, _execution_options=None):
@@ -828,7 +827,9 @@ class Connection(Connectable):
         self._echo = self.engine._should_log_info()
         if _execution_options:
             self._execution_options =\
-                self._execution_options.union(_execution_options)
+                engine._execution_options.union(_execution_options)
+        else:
+            self._execution_options = engine._execution_options
 
     def _branch(self):
         """Return a new Connection which references this Connection's
@@ -1557,8 +1558,12 @@ class Engine(Connectable, log.Identified):
 
     """
 
+    _execution_options = util.frozendict()
+
     def __init__(self, pool, dialect, url, 
-                        logging_name=None, echo=None, proxy=None):
+                        logging_name=None, echo=None, proxy=None,
+                        execution_options=None
+                        ):
         self.pool = pool
         self.url = url
         self.dialect = dialect
@@ -1571,6 +1576,20 @@ class Engine(Connectable, log.Identified):
             self.Connection = _proxy_connection_cls(Connection, proxy)
         else:
             self.Connection = Connection
+        if execution_options:
+            self.update_execution_options(**execution_options)
+    
+    def update_execution_options(self, **opt):
+        """update the execution_options dictionary of this :class:`Engine`.
+        
+        For details on execution_options, see
+        :meth:`Connection.execution_options` as well as
+        :meth:`sqlalchemy.sql.expression.Executable.execution_options`.
+        
+        
+        """
+        self._execution_options = \
+                self._execution_options.union(opt)
 
     @property
     def name(self):
index 001caee2a74ea56f7dc6e95fd0d40d670d47c455..ec2b4f302e2915c8b6742f5e8b88730e24d9f205 100644 (file)
@@ -37,7 +37,8 @@ class TLEngine(base.Engine):
         self._connections = util.threading.local()
         proxy = kwargs.get('proxy')
         if proxy:
-            self.TLConnection = base._proxy_connection_cls(TLConnection, proxy)
+            self.TLConnection = base._proxy_connection_cls(
+                                        TLConnection, proxy)
         else:
             self.TLConnection = TLConnection
 
index a0f234057c58977889a66de27bd72e8c3309b965..ffccfabf9cf822b585b3a7d667cfd7f4ca46cc0e 100644 (file)
@@ -139,7 +139,8 @@ class Mapper(object):
         self._clause_adapter = None
         self._requires_row_aliasing = False
         self._inherits_equated_pairs = None
-
+        self._memoized_values = {}
+        
         if allow_null_pks:
             util.warn_deprecated('the allow_null_pks option to Mapper() is '
                                 'deprecated.  It is now allow_partial_pks=False|True, '
@@ -259,6 +260,7 @@ class Mapper(object):
             for mapper in self.iterate_to_root():
                 util.reset_memoized(mapper, '_equivalent_columns')
                 util.reset_memoized(mapper, '_sorted_tables')
+                util.reset_memoized(mapper, '_compiled_cache')
                 
             if self.order_by is False and not self.concrete and self.inherits.order_by is not False:
                 self.order_by = self.inherits.order_by
@@ -560,14 +562,16 @@ class Mapper(object):
                             self.mapped_table._reset_exported()
                         mc = self.mapped_table.corresponding_column(c)
                         if mc is None:
-                            raise sa_exc.ArgumentError("When configuring property '%s' on %s, "
-                                "column '%s' is not represented in the mapper's table.  "
-                                "Use the `column_property()` function to force this column "
-                                "to be mapped as a read-only attribute." % (key, self, c))
+                            raise sa_exc.ArgumentError(
+                            "When configuring property '%s' on %s, "
+                            "column '%s' is not represented in the mapper's table.  "
+                            "Use the `column_property()` function to force this column "
+                            "to be mapped as a read-only attribute." % (key, self, c))
                     mapped_column.append(mc)
                 prop = ColumnProperty(*mapped_column)
             else:
-                raise sa_exc.ArgumentError("WARNING: when configuring property '%s' on %s, column '%s' "
+                raise sa_exc.ArgumentError(
+                    "WARNING: when configuring property '%s' on %s, column '%s' "
                     "conflicts with property '%r'.  "
                     "To resolve this, map the column to the class under a different "
                     "name in the 'properties' dictionary.  Or, to remove all awareness "
@@ -1186,12 +1190,16 @@ class Mapper(object):
                 return
 
             if leftcol.table not in tables:
-                leftval = self._get_committed_state_attr_by_column(state, state.dict, leftcol, passive=True)
+                leftval = self._get_committed_state_attr_by_column(
+                                                    state, state.dict, 
+                                                    leftcol, passive=True)
                 if leftval is attributes.PASSIVE_NO_RESULT:
                     raise ColumnsNotAvailable()
                 binary.left = sql.bindparam(None, leftval, type_=binary.right.type)
             elif rightcol.table not in tables:
-                rightval = self._get_committed_state_attr_by_column(state, state.dict, rightcol, passive=True)
+                rightval = self._get_committed_state_attr_by_column(
+                                                    state, state.dict, 
+                                                    rightcol, passive=True)
                 if rightval is attributes.PASSIVE_NO_RESULT:
                     raise ColumnsNotAvailable()
                 binary.right = sql.bindparam(None, rightval, type_=binary.right.type)
@@ -1204,7 +1212,12 @@ class Mapper(object):
                 if mapper.local_table in tables:
                     start = True
                 if start and not mapper.single:
-                    allconds.append(visitors.cloned_traverse(mapper.inherit_condition, {}, {'binary':visit_binary}))
+                    allconds.append(visitors.cloned_traverse(
+                                                mapper.inherit_condition, 
+                                                {}, 
+                                                {'binary':visit_binary}
+                                        )
+                                    )
         except ColumnsNotAvailable:
             return None
 
@@ -1249,6 +1262,10 @@ class Mapper(object):
             except StopIteration:
                 visitables.pop()
 
+    @util.memoized_property
+    def _compiled_cache(self):
+        return weakref.WeakKeyDictionary()
+
     @util.memoized_property
     def _sorted_tables(self):
         table_to_mapper = {}
@@ -1289,7 +1306,14 @@ class Mapper(object):
                 uow.dependencies.add((action, delete_all))
             
             yield action
-        
+    
+    def _memo(self, key, callable_):
+        if key in self._memoized_values:
+            return self._memoized_values[key]
+        else:
+            self._memoized_values[key] = value = callable_()
+            return value
+    
     def _save_obj(self, states, uowtransaction, postupdate=False, 
                                 post_update_cols=None, single=False):
         """Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects.
@@ -1326,10 +1350,12 @@ class Mapper(object):
             connection_callable = None
 
         tups = []
+        
         for state in _sort_states(states):
-            conn = connection_callable and \
-                connection_callable(self, state.obj()) or \
-                connection
+            if connection_callable:
+                conn = connection_callable(self, state.obj())
+            else:
+                conn = connection
 
             has_identity = state.has_identity
             mapper = _state_mapper(state)
@@ -1381,6 +1407,8 @@ class Mapper(object):
 
         table_to_mapper = self._sorted_tables
 
+        compiled_cache = self._compiled_cache
+
         for table in table_to_mapper:
             insert = []
             update = []
@@ -1489,45 +1517,50 @@ class Mapper(object):
                                 else:
                                     hasdata = True
                             elif col in pks:
-                                params[col._label] = mapper._get_state_attr_by_column(state, state_dict, col)
+                                params[col._label] = mapper._get_state_attr_by_column(
+                                                                    state, state_dict, col)
                     if hasdata:
                         update.append((state, state_dict, params, mapper, 
                                         connection, value_params))
 
             
             if update:
-                mapper = table_to_mapper[table]
-                clause = sql.and_()
                 
-                for col in mapper._pks_by_table[table]:
-                    clause.clauses.append(
-                                        col == 
-                                        sql.bindparam(col._label, type_=col.type)
-                                    )
+                mapper = table_to_mapper[table]
 
                 needs_version_id = mapper.version_id_col is not None and \
                             table.c.contains_column(mapper.version_id_col)
 
-                if needs_version_id:
-                    clause.clauses.append(mapper.version_id_col ==\
-                            sql.bindparam(mapper.version_id_col._label, type_=col.type))
-
-                statement = table.update(clause)
+                def update_stmt():
+                    clause = sql.and_()
+                
+                    for col in mapper._pks_by_table[table]:
+                        clause.clauses.append(
+                                            col ==  sql.bindparam(col._label,
+                                                        type_=col.type)
+                                        )
 
-                if len(update) > 1:
-                    compiled_cache = {}
-                else:
-                    compiled_cache = None
+                    if needs_version_id:
+                        clause.clauses.append(mapper.version_id_col ==\
+                                sql.bindparam(mapper.version_id_col._label,
+                                                type_=col.type))
 
+                    return table.update(clause)
+                
+                statement = self._memo(('update', table), update_stmt)
+                
                 rows = 0
-                for state, state_dict, params, mapper, connection, value_params in update:
-                    if not value_params and compiled_cache is not None:
-                        c = connection.\
-                                execution_options(
-                                        compiled_cache=compiled_cache).\
-                                        execute(statement, params)
-                    else:
+                for state, state_dict, params, mapper, \
+                            connection, value_params in update:
+                    
+                    if value_params:
                         c = connection.execute(statement.values(value_params), params)
+                    else:
+                        c = connection.\
+                                execution_options(compiled_cache=\
+                                            compiled_cache.setdefault(
+                                                connection.engine, {})
+                                        ).execute(statement, params)
                         
                     mapper._postfetch(uowtransaction, table, 
                                         state, state_dict, c, 
@@ -1549,20 +1582,20 @@ class Mapper(object):
                             stacklevel=12)
                     
             if insert:
-                statement = table.insert()
-                if len(insert) > 1:
-                    compiled_cache = {}
-                else:
-                    compiled_cache = None
-                    
-                for state, state_dict, params, mapper, connection, value_params in insert:
-                    if not value_params and compiled_cache is not None:
-                        c = connection.\
-                                execution_options(
-                                        compiled_cache=compiled_cache).\
-                                        execute(statement, params)
-                    else:
+                statement = self._memo(('insert', table), table.insert)
+
+                for state, state_dict, params, mapper, \
+                            connection, value_params in insert:
+
+                    if value_params:
                         c = connection.execute(statement.values(value_params), params)
+                    else:
+                        c = connection.\
+                                execution_options(compiled_cache=\
+                                            compiled_cache.setdefault(
+                                                    connection.engine, {})
+                                        ).execute(statement, params)
+                    
                     primary_key = c.inserted_primary_key
 
                     if primary_key is not None:
@@ -1672,11 +1705,12 @@ class Mapper(object):
         tups = []
         for state in _sort_states(states):
             mapper = _state_mapper(state)
+
+            if connection_callable:
+                conn = connection_callable(self, state.obj())
+            else:
+                conn = connection
         
-            conn = connection_callable and \
-                connection_callable(self, state.obj()) or \
-                connection
-            
             if 'before_delete' in mapper.extension:
                 mapper.extension.before_delete(mapper, conn, state.obj())
             
@@ -1687,7 +1721,9 @@ class Mapper(object):
                     conn))
 
         table_to_mapper = self._sorted_tables
-
+        
+        compiled_cache = self._compiled_cache
+        
         for table in reversed(table_to_mapper.keys()):
             delete = util.defaultdict(list)
             for state, state_dict, mapper, has_identity, connection in tups:
@@ -1701,17 +1737,18 @@ class Mapper(object):
                 if mapper.version_id_col is not None and \
                             table.c.contains_column(mapper.version_id_col):
                     params[mapper.version_id_col.key] = \
-                                mapper._get_state_attr_by_column(state, state_dict, mapper.version_id_col)
+                                mapper._get_state_attr_by_column(state, state_dict,
+                                        mapper.version_id_col)
 
-            for connection, del_objects in delete.iteritems():
-                mapper = table_to_mapper[table]
+            mapper = table_to_mapper[table]
+            need_version_id = mapper.version_id_col is not None and \
+                table.c.contains_column(mapper.version_id_col)
+
+            def delete_stmt():
                 clause = sql.and_()
                 for col in mapper._pks_by_table[table]:
                     clause.clauses.append(col == sql.bindparam(col.key, type_=col.type))
 
-                need_version_id = mapper.version_id_col is not None and \
-                    table.c.contains_column(mapper.version_id_col)
-
                 if need_version_id:
                     clause.clauses.append(
                         mapper.version_id_col == 
@@ -1721,9 +1758,17 @@ class Mapper(object):
                         )
                     )
 
-                statement = table.delete(clause)
+                return table.delete(clause)
+
+            for connection, del_objects in delete.iteritems():
+                statement = self._memo(('delete', table), delete_stmt)
                 rows = -1
 
+                connection = connection.execution_options(
+                                compiled_cache=compiled_cache.setdefault(
+                                                    connection.engine, 
+                                                    {}))
+
                 if need_version_id and \
                         not connection.dialect.supports_sane_multi_rowcount:
                     # TODO: need test coverage for this [ticket:1761]
index 75fccb86f649a77f2c41e282a1e4bae4ed4eb7e8..ae6624d4739c6ee3978c8108cf7caf8efec0878d 100644 (file)
@@ -785,10 +785,7 @@ class Query(object):
         method is used.
 
         """
-        _execution_options = self._execution_options.copy()
-        for key, value in kwargs.items():
-            _execution_options[key] = value
-        self._execution_options = _execution_options
+        self._execution_options = self._execution_options.union(kwargs)
 
     @_generative()
     def with_lockmode(self, mode):
index 713cd8c3d2d826cea177528e26c9c5e05ca73e3a..af646aa5f538144f93a7cef56db4b2f582707362 100644 (file)
@@ -68,13 +68,13 @@ def sessionmaker(bind=None, class_=None, autoflush=True, autocommit=False,
 
     Options:
 
-    autocommit
-      Defaults to ``False``. When ``True``, the ``Session`` does not keep a
-      persistent transaction running, and will acquire connections from the
-      engine on an as-needed basis, returning them immediately after their
-      use. Flushes will begin and commit (or possibly rollback) their own
-      transaction if no transaction is present. When using this mode, the
-      `session.begin()` method may be used to begin a transaction explicitly.
+    :param autocommit: Defaults to ``False``. When ``True``, the ``Session``
+      does not keep a persistent transaction running, and will acquire
+      connections from the engine on an as-needed basis, returning them
+      immediately after their use. Flushes will begin and commit (or possibly
+      rollback) their own transaction if no transaction is present. When using
+      this mode, the `session.begin()` method may be used to begin a
+      transaction explicitly.
 
       Leaving it on its default value of ``False`` means that the ``Session``
       will acquire a connection and begin a transaction the first time it is
@@ -83,30 +83,27 @@ def sessionmaker(bind=None, class_=None, autoflush=True, autocommit=False,
       by any of these methods, the ``Session`` is ready for the next usage,
       which will again acquire and maintain a new connection/transaction.
 
-    autoflush
-      When ``True``, all query operations will issue a ``flush()`` call to
-      this ``Session`` before proceeding. This is a convenience feature so
-      that ``flush()`` need not be called repeatedly in order for database
-      queries to retrieve results. It's typical that ``autoflush`` is used in
-      conjunction with ``autocommit=False``.  In this scenario, explicit calls
-      to ``flush()`` are rarely needed; you usually only need to call
-      ``commit()`` (which flushes) to finalize changes.
-
-    bind
-      An optional ``Engine`` or ``Connection`` to which this ``Session``
-      should be bound. When specified, all SQL operations performed by this
-      session will execute via this connectable.
-
-    binds
-      An optional dictionary, which contains more granular "bind" information
-      than the ``bind`` parameter provides. This dictionary can map individual
-      ``Table`` instances as well as ``Mapper`` instances to individual
-      ``Engine`` or ``Connection`` objects. Operations which proceed relative
-      to a particular ``Mapper`` will consult this dictionary for the direct
-      ``Mapper`` instance as well as the mapper's ``mapped_table`` attribute
-      in order to locate an connectable to use. The full resolution is
-      described in the ``get_bind()`` method of ``Session``. Usage looks
-      like::
+    :param autoflush: When ``True``, all query operations will issue a 
+       ``flush()`` call to this ``Session`` before proceeding. This is a
+       convenience feature so that ``flush()`` need not be called repeatedly
+       in order for database queries to retrieve results. It's typical that
+       ``autoflush`` is used in conjunction with ``autocommit=False``. In this
+       scenario, explicit calls to ``flush()`` are rarely needed; you usually
+       only need to call ``commit()`` (which flushes) to finalize changes.
+
+    :param bind: An optional ``Engine`` or ``Connection`` to which this
+       ``Session`` should be bound. When specified, all SQL operations
+       performed by this session will execute via this connectable.
+
+    :param binds: An optional dictionary which contains more granular "bind"
+       information than the ``bind`` parameter provides. This dictionary can
+       map individual ``Table`` instances as well as ``Mapper`` instances to
+       individual ``Engine`` or ``Connection`` objects. Operations which
+       proceed relative to a particular ``Mapper`` will consult this
+       dictionary for the direct ``Mapper`` instance as well as the mapper's
+       ``mapped_table`` attribute in order to locate an connectable to use.
+       The full resolution is described in the ``get_bind()`` method of
+       ``Session``. Usage looks like::
 
         sess = Session(binds={
             SomeMappedClass: create_engine('postgresql://engine1'),
@@ -116,53 +113,52 @@ def sessionmaker(bind=None, class_=None, autoflush=True, autocommit=False,
 
       Also see the ``bind_mapper()`` and ``bind_table()`` methods.
 
-    \class_
-      Specify an alternate class other than ``sqlalchemy.orm.session.Session``
-      which should be used by the returned class.  This is the only argument
-      that is local to the ``sessionmaker()`` function, and is not sent
-      directly to the constructor for ``Session``.
-
-    _enable_transaction_accounting
-      Defaults to ``True``.  A legacy-only flag which when ``False``
-      disables *all* 0.5-style object accounting on transaction boundaries,
-      including auto-expiry of instances on rollback and commit, maintenance of
-      the "new" and "deleted" lists upon rollback, and autoflush
-      of pending changes upon begin(), all of which are interdependent.
-
-    expire_on_commit
-      Defaults to ``True``. When ``True``, all instances will be fully expired after
-      each ``commit()``, so that all attribute/object access subsequent to a completed
-      transaction will load from the most recent database state.
-
-    extension
-      An optional :class:`~sqlalchemy.orm.session.SessionExtension` instance, or
-      a list of such instances, which
-      will receive pre- and post- commit and flush events, as well as a
-      post-rollback event.  User- defined code may be placed within these
-      hooks using a user-defined subclass of ``SessionExtension``.
-
-    query_cls
-      Class which should be used to create new Query objects, as returned
-      by the ``query()`` method.  Defaults to :class:`~sqlalchemy.orm.query.Query`.
-
-    twophase
-      When ``True``, all transactions will be started using
-      :mod:`~sqlalchemy.engine_TwoPhaseTransaction`. During a ``commit()``, after
-      ``flush()`` has been issued for all attached databases, the
-      ``prepare()`` method on each database's ``TwoPhaseTransaction`` will be
-      called. This allows each database to roll back the entire transaction,
-      before each transaction is committed.
-
-    weak_identity_map
-      When set to the default value of ``True``, a weak-referencing map is
-      used; instances which are not externally referenced will be garbage
-      collected immediately. For dereferenced instances which have pending
-      changes present, the attribute management system will create a temporary
-      strong-reference to the object which lasts until the changes are flushed
-      to the database, at which point it's again dereferenced. Alternatively,
-      when using the value ``False``, the identity map uses a regular Python
-      dictionary to store instances. The session will maintain all instances
-      present until they are removed using expunge(), clear(), or purge().
+    :param \class_: Specify an alternate class other than
+       ``sqlalchemy.orm.session.Session`` which should be used by the returned
+       class. This is the only argument that is local to the
+       ``sessionmaker()`` function, and is not sent directly to the
+       constructor for ``Session``.
+
+    :param _enable_transaction_accounting:  Defaults to ``True``.  A
+       legacy-only flag which when ``False`` disables *all* 0.5-style object
+       accounting on transaction boundaries, including auto-expiry of
+       instances on rollback and commit, maintenance of the "new" and
+       "deleted" lists upon rollback, and autoflush of pending changes upon
+       begin(), all of which are interdependent.
+
+    :param expire_on_commit:  Defaults to ``True``. When ``True``, all
+       instances will be fully expired after each ``commit()``, so that all
+       attribute/object access subsequent to a completed transaction will load
+       from the most recent database state.
+
+    :param extension: An optional 
+       :class:`~sqlalchemy.orm.session.SessionExtension` instance, or a list
+       of such instances, which will receive pre- and post- commit and flush
+       events, as well as a post-rollback event. User- defined code may be
+       placed within these hooks using a user-defined subclass of
+       ``SessionExtension``.
+
+    :param query_cls:  Class which should be used to create new Query objects,
+       as returned by the ``query()`` method. Defaults to
+       :class:`~sqlalchemy.orm.query.Query`.
+
+    :param twophase:  When ``True``, all transactions will be started using
+        :mod:`~sqlalchemy.engine_TwoPhaseTransaction`. During a ``commit()``,
+        after ``flush()`` has been issued for all attached databases, the
+        ``prepare()`` method on each database's ``TwoPhaseTransaction`` will
+        be called. This allows each database to roll back the entire
+        transaction, before each transaction is committed.
+
+    :param weak_identity_map:  When set to the default value of ``True``, a
+       weak-referencing map is used; instances which are not externally
+       referenced will be garbage collected immediately. For dereferenced
+       instances which have pending changes present, the attribute management
+       system will create a temporary strong-reference to the object which
+       lasts until the changes are flushed to the database, at which point
+       it's again dereferenced. Alternatively, when using the value ``False``,
+       the identity map uses a regular Python dictionary to store instances.
+       The session will maintain all instances present until they are removed
+       using expunge(), clear(), or purge().
 
     """
     kwargs['bind'] = bind
@@ -516,14 +512,17 @@ class Session(object):
     public_methods = (
         '__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested',
         'close', 'commit', 'connection', 'delete', 'execute', 'expire',
-        'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind', 'is_modified', 
+        'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind',
+        'is_modified', 
         'merge', 'query', 'refresh', 'rollback', 
         'scalar')
-
+    
+    
     def __init__(self, bind=None, autoflush=True, expire_on_commit=True,
                 _enable_transaction_accounting=True,
                  autocommit=False, twophase=False, 
-                 weak_identity_map=True, binds=None, extension=None, query_cls=query.Query):
+                 weak_identity_map=True, binds=None, extension=None,
+                 query_cls=query.Query):
         """Construct a new Session.
 
         Arguments to ``Session`` are described using the
@@ -552,7 +551,6 @@ class Session(object):
         self.extensions = util.to_list(extension) or []
         self._query_cls = query_cls
         self._mapper_flush_opts = {}
-
         if binds is not None:
             for mapperortable, bind in binds.iteritems():
                 if isinstance(mapperortable, (type, Mapper)):
index ef132389297121a50757e8c943a89f7f39e81874..a802668a11d3e89844255fba6aec31344c7a2a47 100644 (file)
@@ -109,7 +109,10 @@ class Pool(log.Identified):
 
         """
         if logging_name:
-            self.logging_name = logging_name
+            self.logging_name = self._orig_logging_name = logging_name
+        else:
+            self._orig_logging_name = None
+            
         self.logger = log.instance_logger(self, echoflag=echo)
         self._threadconns = threading.local()
         self._creator = creator
@@ -498,6 +501,7 @@ class SingletonThreadPool(Pool):
             pool_size=self.size, 
             recycle=self._recycle, 
             echo=self.echo, 
+            logging_name=self._orig_logging_name,
             use_threadlocal=self._use_threadlocal, 
             listeners=self.listeners)
 
@@ -624,6 +628,7 @@ class QueuePool(Pool):
         return QueuePool(self._creator, pool_size=self._pool.maxsize, 
                           max_overflow=self._max_overflow, timeout=self._timeout, 
                           recycle=self._recycle, echo=self.echo, 
+                          logging_name=self._orig_logging_name,
                           use_threadlocal=self._use_threadlocal, listeners=self.listeners)
 
     def do_return_conn(self, conn):
@@ -730,6 +735,7 @@ class NullPool(Pool):
         return NullPool(self._creator, 
             recycle=self._recycle, 
             echo=self.echo, 
+            logging_name=self._orig_logging_name,
             use_threadlocal=self._use_threadlocal, 
             listeners=self.listeners)
 
@@ -770,6 +776,7 @@ class StaticPool(Pool):
                               use_threadlocal=self._use_threadlocal,
                               reset_on_return=self._reset_on_return,
                               echo=self.echo,
+                              logging_name=self._orig_logging_name,
                               listeners=self.listeners)
 
     def create_connection(self):
@@ -819,6 +826,7 @@ class AssertionPool(Pool):
     def recreate(self):
         self.logger.info("Pool recreating")
         return AssertionPool(self._creator, echo=self.echo, 
+                            logging_name=self._orig_logging_name,
                             listeners=self.listeners)
         
     def do_get(self):
index 711b03a027d363e61f9eba2c70509225b5bd6fd8..2d64cd8046b4867a3dcf0043ca679d51eb1ad9cd 100644 (file)
@@ -1,16 +1,18 @@
 from sqlalchemy.test.testing import eq_
-from sqlalchemy.orm import mapper, relationship, create_session, clear_mappers, sessionmaker
+from sqlalchemy.orm import mapper, relationship, create_session, clear_mappers, \
+                            sessionmaker, class_mapper
 from sqlalchemy.orm.mapper import _mapper_registry
 from sqlalchemy.orm.session import _sessions
 from sqlalchemy.util import jython
 import operator
-from sqlalchemy.test import testing
-from sqlalchemy import MetaData, Integer, String, ForeignKey, PickleType
+from sqlalchemy.test import testing, engines
+from sqlalchemy import MetaData, Integer, String, ForeignKey, PickleType, create_engine
 from sqlalchemy.test.schema import Table, Column
 import sqlalchemy as sa
 from sqlalchemy.sql import column
 from sqlalchemy.test.util import gc_collect
 import gc
+import weakref
 from test.orm import _base
 
 if jython:
@@ -26,6 +28,7 @@ class B(_base.ComparableEntity):
 def profile_memory(func):
     # run the test 50 times.  if length of gc.get_objects()
     # keeps growing, assert false
+    
     def profile(*args):
         gc_collect()
         samples = [0 for x in range(0, 50)]
@@ -33,6 +36,7 @@ def profile_memory(func):
             func(*args)
             gc_collect()
             samples[x] = len(gc.get_objects())
+                
         print "sample gc sizes:", samples
 
         assert len(_sessions) == 0
@@ -130,6 +134,64 @@ class MemUsageTest(EnsureZeroed):
         del m1, m2, m3
         assert_no_mappers()
 
+    @testing.crashes('sqlite', ':memory: connection not suitable here')
+    def test_orm_many_engines(self):
+        metadata = MetaData(testing.db)
+
+        table1 = Table("mytable", metadata,
+            Column('col1', Integer, primary_key=True, test_needs_autoincrement=True),
+            Column('col2', String(30)))
+
+        table2 = Table("mytable2", metadata,
+            Column('col1', Integer, primary_key=True, test_needs_autoincrement=True),
+            Column('col2', String(30)),
+            Column('col3', Integer, ForeignKey("mytable.col1")))
+
+        metadata.create_all()
+
+        m1 = mapper(A, table1, properties={
+            "bs":relationship(B, cascade="all, delete", order_by=table2.c.col1)},
+            order_by=table1.c.col1)
+        m2 = mapper(B, table2)
+
+        m3 = mapper(A, table1, non_primary=True)
+
+        @profile_memory
+        def go():
+            engine = engines.testing_engine(options={'logging_name':'FOO', 'pool_logging_name':'BAR'})
+            sess = create_session(bind=engine)
+            
+            a1 = A(col2="a1")
+            a2 = A(col2="a2")
+            a3 = A(col2="a3")
+            a1.bs.append(B(col2="b1"))
+            a1.bs.append(B(col2="b2"))
+            a3.bs.append(B(col2="b3"))
+            for x in [a1,a2,a3]:
+                sess.add(x)
+            sess.flush()
+            sess.expunge_all()
+
+            alist = sess.query(A).all()
+            eq_(
+                [
+                    A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
+                    A(col2="a2", bs=[]),
+                    A(col2="a3", bs=[B(col2="b3")])
+                ],
+                alist)
+
+            for a in alist:
+                sess.delete(a)
+            sess.flush()
+            sess.close()
+            engine.dispose()
+        go()
+
+        metadata.drop_all()
+        del m1, m2, m3
+        assert_no_mappers()
+
     def test_mapper_reset(self):
         metadata = MetaData(testing.db)
 
index 4d816cfc1b01936b300a4ed108743302fb678266..0820d7cc46de7f49e7eb346363b19628a371ca5b 100644 (file)
@@ -295,7 +295,7 @@ class ZooMarkTest(TestBase):
     def test_profile_1a_populate(self):
         self.test_baseline_1a_populate()
 
-    @profiling.function_call_count(848)
+    @profiling.function_call_count(640)
     def test_profile_2_insert(self):
         self.test_baseline_2_insert()
 
index 3f3f0e2d565a0441d6405aa478ecf588b02a564a..6b2ba2010cae962268da157695b5b379221dd6c2 100644 (file)
@@ -112,6 +112,21 @@ class ExecuteTest(TestBase):
         eq_(testing.db.execute(users_autoinc.select()).fetchall(), [
             (1, None)
         ])
+        
+    def test_engine_level_options(self):
+        eng = engines.testing_engine(options={
+            'execution_options':{'foo':'bar'}
+        })
+        conn = eng.contextual_connect()
+        eq_(conn._execution_options['foo'], 'bar')
+        eq_(conn.execution_options(bat='hoho')._execution_options['foo'], 'bar')
+        eq_(conn.execution_options(bat='hoho')._execution_options['bat'], 'hoho')
+        eq_(conn.execution_options(foo='hoho')._execution_options['foo'], 'hoho')
+        
+        eng.update_execution_options(foo='hoho')
+        conn = eng.contextual_connect()
+        eq_(conn._execution_options['foo'], 'hoho')
+        
 
 class CompiledCacheTest(TestBase):
     @classmethod
@@ -171,6 +186,10 @@ class LogTest(TestBase):
         }
         eng = engines.testing_engine(options=options)
         self._test_logger(eng, "myenginename", "mypoolname")
+        
+        eng.dispose()
+        self._test_logger(eng, "myenginename", "mypoolname")
+        
 
     def test_unnamed_logger(self):
         eng = engines.testing_engine(options={'echo':'debug', 'echo_pool':'debug'})