]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
- orm: Removed a lot of logging that nobody really cares about,
authorMike Bayer <mike_mp@zzzcomputing.com>
Sun, 28 Feb 2010 17:43:10 +0000 (17:43 +0000)
committerMike Bayer <mike_mp@zzzcomputing.com>
Sun, 28 Feb 2010 17:43:10 +0000 (17:43 +0000)
logging that remains will respond to live changes in the
log level.  No significant overhead is added.  [ticket:1719]
- engine: Opened up logging a bit such that isEnabledFor() is called
more often, so that changes to the log level for engine/pool
will be reflected on next connect.   This adds a small
amount of method call overhead.  It's negligible and will make
life a lot easier for all those situations when logging
just happens to be configured after create_engine() is called.
[ticket:1719]

13 files changed:
CHANGES
lib/sqlalchemy/dialects/maxdb/base.py
lib/sqlalchemy/engine/base.py
lib/sqlalchemy/log.py
lib/sqlalchemy/orm/mapper.py
lib/sqlalchemy/orm/properties.py
lib/sqlalchemy/orm/strategies.py
lib/sqlalchemy/orm/unitofwork.py
lib/sqlalchemy/pool.py
lib/sqlalchemy/test/noseplugin.py
test/aaa_profiling/test_pool.py
test/aaa_profiling/test_zoomark.py
test/aaa_profiling/test_zoomark_orm.py

diff --git a/CHANGES b/CHANGES
index d1ccecb5efda8c44246823bb0d26fa7796424bae..c4495552a30204ea23fbdddc16c585321b98bf88 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -19,6 +19,10 @@ CHANGES
     internally. The formerly "pending" objects are now expunged
     first. [ticket:1674]
 
+  - Removed a lot of logging that nobody really cares about,
+    logging that remains will respond to live changes in the
+    log level.  No significant overhead is added.  [ticket:1719]
+    
   - session.merge() will not expire existing scalar attributes
     on an existing target if the target has a value for that
     attribute, even if the incoming merged doesn't have
@@ -186,6 +190,14 @@ CHANGES
     Note that it is *not* built/installed by default.
     See README for installation instructions.
 
+  - Opened up logging a bit such that isEnabledFor() is called 
+    more often, so that changes to the log level for engine/pool
+    will be reflected on next connect.   This adds a small 
+    amount of method call overhead.  It's negligible and will make
+    life a lot easier for all those situations when logging 
+    just happens to be configured after create_engine() is called.
+    [ticket:1719]
+    
   - The assert_unicode flag is deprecated.  SQLAlchemy will raise
     a warning in all cases where it is asked to encode a non-unicode
     Python string, as well as when a Unicode or UnicodeType type
index f409f3213b87bdc4f4bc0191acd3feb19d2f5378..504c31209d428fffb2d57dfd12eafe1cef47c0e5 100644 (file)
@@ -390,13 +390,9 @@ class MaxDBExecutionContext(default.DefaultExecutionContext):
                     sql = "SELECT CURRENT_SCHEMA.%s.CURRVAL FROM DUAL" % (
                         self.compiled.preparer.format_table(table))
 
-                if self.connection.engine._should_log_info:
-                    self.connection.engine.logger.info(sql)
                 rs = self.cursor.execute(sql)
                 id = rs.fetchone()[0]
 
-                if self.connection.engine._should_log_debug:
-                    self.connection.engine.logger.debug([id])
                 if not self._last_inserted_ids:
                     # This shouldn't ever be > 1?  Right?
                     self._last_inserted_ids = \
index 578bd58d70517910c029b98de838e537557d560b..b4f2524d6e13ba22042df9045f3ae917109ec7a4 100644 (file)
@@ -747,6 +747,7 @@ class Connection(Connectable):
         self.__savepoint_seq = 0
         self.__branch = _branch
         self.__invalid = False
+        self._echo = self.engine._should_log_info()
         if _execution_options:
             self._execution_options = self._execution_options.union(_execution_options)
 
@@ -941,7 +942,7 @@ class Connection(Connectable):
         return self.__transaction is not None
 
     def _begin_impl(self):
-        if self.engine._should_log_info:
+        if self._echo:
             self.engine.logger.info("BEGIN")
         try:
             self.engine.dialect.do_begin(self.connection)
@@ -952,8 +953,9 @@ class Connection(Connectable):
     def _rollback_impl(self):
         # use getattr() for is_valid to support exceptions raised in dialect initializer, 
         # where we do not yet have the pool wrappers plugged in
-        if not self.closed and not self.invalidated and getattr(self.__connection, 'is_valid', False):
-            if self.engine._should_log_info:
+        if not self.closed and not self.invalidated and \
+                        getattr(self.__connection, 'is_valid', False):
+            if self._echo:
                 self.engine.logger.info("ROLLBACK")
             try:
                 self.engine.dialect.do_rollback(self.connection)
@@ -965,7 +967,7 @@ class Connection(Connectable):
             self.__transaction = None
 
     def _commit_impl(self):
-        if self.engine._should_log_info:
+        if self._echo:
             self.engine.logger.info("COMMIT")
         try:
             self.engine.dialect.do_commit(self.connection)
@@ -1184,7 +1186,7 @@ class Connection(Connectable):
             raise
 
     def _cursor_execute(self, cursor, statement, parameters, context=None):
-        if self.engine._should_log_info:
+        if self._echo:
             self.engine.logger.info(statement)
             self.engine.logger.info("%r", parameters)
         try:
@@ -1194,7 +1196,7 @@ class Connection(Connectable):
             raise
 
     def _cursor_executemany(self, cursor, statement, parameters, context=None):
-        if self.engine._should_log_info:
+        if self._echo:
             self.engine.logger.info(statement)
             self.engine.logger.info("%r", parameters)
         try:
@@ -1898,9 +1900,11 @@ class ResultProxy(object):
         self.closed = False
         self.cursor = context.cursor
         self.connection = context.root_connection
-        self._echo = context.engine._should_log_info
+        self._echo = self.connection._echo and \
+                        context.engine._should_log_debug()
         self._init_metadata()
 
+    
     def _init_metadata(self):
         metadata = self._cursor_description()
         if metadata is None:
index 62e0739f7c7b123cd319c2b2abe8e0a490d91721..eac90854cb67eec4a916032ae831bcd52f3e08e3 100644 (file)
@@ -53,17 +53,11 @@ def class_logger(cls, enable=False):
         logger.setLevel(logging.DEBUG)
     elif enable == 'info':
         logger.setLevel(logging.INFO)
-    cls._should_log_debug = logger.isEnabledFor(logging.DEBUG)
-    cls._should_log_info = logger.isEnabledFor(logging.INFO)
+    cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG)
+    cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO)
     cls.logger = logger
     _logged_classes.add(cls)
     
-def _refresh_class_loggers():
-    for cls in _logged_classes:
-        logger = logging.getLogger(cls.__module__ + "." + cls.__name__)
-        cls._should_log_debug = logger.isEnabledFor(logging.DEBUG)
-        cls._should_log_info = logger.isEnabledFor(logging.INFO)
-        
 def instance_logger(instance, echoflag=None):
     """create a logger for an instance.
     
@@ -93,8 +87,8 @@ def instance_logger(instance, echoflag=None):
             l.setLevel(logging.WARN)
     else:
         l = logging.getLogger(name)
-    instance._should_log_debug = l.isEnabledFor(logging.DEBUG)
-    instance._should_log_info = l.isEnabledFor(logging.INFO)
+    instance._should_log_debug = lambda: l.isEnabledFor(logging.DEBUG)
+    instance._should_log_info = lambda: l.isEnabledFor(logging.INFO)
     return l
 
 class echo_property(object):
index 21f32780496d56d20c112a80cc250142d9fb11dc..ae80a2a4d84af07e61898b442ff2f02a8120b65f 100644 (file)
@@ -458,7 +458,7 @@ class Mapper(object):
                     "key columns for mapped table '%s'" % (self, self.mapped_table.description))
 
             self.primary_key = primary_key
-            self._log("Identified primary key columns: " + str(primary_key))
+            self._log("Identified primary key columns: %s", primary_key)
 
     def _configure_properties(self):
         
@@ -523,7 +523,7 @@ class Mapper(object):
             self._configure_property(key, ConcreteInheritedProperty(), init=init, setparent=True)
             
     def _configure_property(self, key, prop, init=True, setparent=True):
-        self._log("_configure_property(%s, %s)" % (key, prop.__class__.__name__))
+        self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
 
         if not isinstance(prop, MapperProperty):
             # we were passed a Column or a list of Columns; generate a ColumnProperty
@@ -727,7 +727,7 @@ class Mapper(object):
         self._log("_post_configure_properties() started")
         l = [(key, prop) for key, prop in self._props.iteritems()]
         for key, prop in l:
-            self._log("initialize prop " + key)
+            self._log("initialize prop %s", key)
             
             if prop.parent is self and not prop._compile_started:
                 prop.init()
@@ -1242,7 +1242,8 @@ class Mapper(object):
             ret[t] = table_to_mapper[t]
         return ret
 
-    def _save_obj(self, states, uowtransaction, postupdate=False, post_update_cols=None, single=False):
+    def _save_obj(self, states, uowtransaction, postupdate=False, 
+                                post_update_cols=None, single=False):
         """Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects.
 
         This is called within the context of a UOWTransaction during a
@@ -1256,9 +1257,6 @@ class Mapper(object):
         mapper which does not inherit from any other mapper.
         
         """
-        if self._should_log_debug:
-            self._log_debug("_save_obj() start, " + (single and "non-batched" or "batched"))
-
         # if batch=false, call _save_obj separately for each object
         if not single and not self.batch:
             for state in _sort_states(states):
@@ -1316,13 +1314,13 @@ class Mapper(object):
                     existing = attributes.instance_state(instance)
                     if not uowtransaction.is_deleted(existing):
                         raise orm_exc.FlushError(
-                            "New instance %s with identity key %s conflicts with persistent instance %s" % 
+                            "New instance %s with identity key %s conflicts "
+                            "with persistent instance %s" % 
                             (state_str(state), instance_key, state_str(existing)))
                             
-                    if self._should_log_debug:
-                        self._log_debug(
-                            "detected row switch for identity %s.  will update %s, remove %s from "
-                            "transaction", instance_key, state_str(state), state_str(existing))
+                    self._log_debug(
+                        "detected row switch for identity %s.  will update %s, remove %s from "
+                        "transaction", instance_key, state_str(state), state_str(existing))
                             
                     # remove the "delete" flag from the existing element
                     uowtransaction.set_row_switch(existing)
@@ -1340,10 +1338,6 @@ class Mapper(object):
                     
                 pks = mapper._pks_by_table[table]
                 
-                if self._should_log_debug:
-                    self._log_debug("_save_obj() table '%s' instance %s identity %s",
-                                    table.name, state_str(state), str(instance_key))
-
                 isinsert = not has_identity and not postupdate and state not in row_switches
                 
                 params = {}
@@ -1550,9 +1544,6 @@ class Mapper(object):
         flush operation.
 
         """
-        if self._should_log_debug:
-            self._log_debug("_delete_obj() start")
-
         if 'connection_callable' in uowtransaction.mapper_flush_opts:
             connection_callable = uowtransaction.mapper_flush_opts['connection_callable']
             tups = [(state, _state_mapper(state), connection_callable(self, state.obj())) for state in _sort_states(states)]
index 20a1319f8ef5eadb17f414e434f71fffdc7ac521..56a03fc7cb733048bcfdeeecee026fe214f84f6e 100644 (file)
@@ -1091,13 +1091,12 @@ class RelationProperty(StrategizedProperty):
         
         
     def _post_init(self):
-        if self._should_log_info:
-            self.logger.info("%s setup primary join %s", self, self.primaryjoin)
-            self.logger.info("%s setup secondary join %s", self, self.secondaryjoin)
-            self.logger.info("%s synchronize pairs [%s]", self, ",".join("(%s => %s)" % (l, r) for l, r in self.synchronize_pairs))
-            self.logger.info("%s secondary synchronize pairs [%s]", self, ",".join(("(%s => %s)" % (l, r) for l, r in self.secondary_synchronize_pairs or [])))
-            self.logger.info("%s local/remote pairs [%s]", self, ",".join("(%s / %s)" % (l, r) for l, r in self.local_remote_pairs))
-            self.logger.info("%s relation direction %s", self, self.direction)
+        self.logger.info("%s setup primary join %s", self, self.primaryjoin)
+        self.logger.info("%s setup secondary join %s", self, self.secondaryjoin)
+        self.logger.info("%s synchronize pairs [%s]", self, ",".join("(%s => %s)" % (l, r) for l, r in self.synchronize_pairs))
+        self.logger.info("%s secondary synchronize pairs [%s]", self, ",".join(("(%s => %s)" % (l, r) for l, r in self.secondary_synchronize_pairs or [])))
+        self.logger.info("%s local/remote pairs [%s]", self, ",".join("(%s / %s)" % (l, r) for l, r in self.local_remote_pairs))
+        self.logger.info("%s relation direction %s", self, self.direction)
         
         if self.uselist is None:
             self.uselist = self.direction is not MANYTOONE
index 17b1fd8e8cb79521a99708afeaa7ada4311d80f8..cbd48303c2e652fa42c2a94ac0609f71c3fb3f62 100644 (file)
@@ -267,7 +267,7 @@ class LoadDeferredColumns(object):
         # narrow the keys down to just those which have no history
         group = [k for k in toload if k in state.unmodified]
 
-        if strategy._should_log_debug:
+        if strategy._should_log_debug():
             strategy.logger.debug(
                     "deferred load %s group %s",
                     (mapperutil.state_attribute_str(state, self.key), 
@@ -513,7 +513,7 @@ class LoadLazyAttribute(object):
         if kw.get('passive') is attributes.PASSIVE_NO_FETCH and not strategy.use_get:
             return attributes.PASSIVE_NO_RESULT
 
-        if strategy._should_log_debug:
+        if strategy._should_log_debug():
             strategy.logger.debug("loading %s", 
                                     mapperutil.state_attribute_str(state, self.key))
         
index d2901a49ff14d59a7bf4aeddc3a7b13fd112d4ae..4a694bc33320837a11308286b80df4ffec695bcd 100644 (file)
@@ -122,19 +122,13 @@ class UOWTransaction(object):
         else:
             return history.as_state()
 
-    def register_object(self, state, isdelete=False, listonly=False, postupdate=False, post_update_cols=None):
+    def register_object(self, state, isdelete=False, 
+                            listonly=False, postupdate=False, post_update_cols=None):
         
         # if object is not in the overall session, do nothing
         if not self.session._contains_state(state):
-            if self._should_log_debug:
-                self.logger.debug("object %s not part of session, not registering for flush",
-                                        mapperutil.state_str(state))
             return
 
-        if self._should_log_debug:
-            self.logger.debug("register object for flush: %s isdelete=%s listonly=%s postupdate=%s",
-                                    mapperutil.state_str(state), isdelete, listonly, postupdate)
-
         mapper = _state_mapper(state)
 
         task = self.get_task_by_mapper(mapper)
@@ -256,11 +250,10 @@ class UOWTransaction(object):
                 break
 
         tasks = self._sort_dependencies()
-        if self._should_log_info:
+        if self._should_log_info():
             self.logger.info("Task dump:\n%s", self._dump(tasks))
         UOWExecutor().execute(self, tasks)
-        if self._should_log_info:
-            self.logger.info("Execute Complete")
+        self.logger.info("Execute Complete")
 
     def _dump(self, tasks):
         from uowdumper import UOWDumper
@@ -296,16 +289,14 @@ class UOWTransaction(object):
         for item, cycles in nodes:
             task = self.get_task_by_mapper(item)
             if cycles:
-                for t in task._sort_circular_dependencies(self, [self.get_task_by_mapper(i) for i in cycles]):
+                for t in task._sort_circular_dependencies(
+                                        self, 
+                                        [self.get_task_by_mapper(i) for i in cycles]
+                                        ):
                     ret.append(t)
             else:
                 ret.append(task)
 
-        if self._should_log_debug:
-            self.logger.debug("Dependent tuples:\n" + "\n".join(
-                    "(%s->%s)" % (d[0].class_.__name__, d[1].class_.__name__)
-                    for d in self.dependencies))
-            self.logger.debug("Dependency sort:\n%s", ret)
         return ret
 
 log.class_logger(UOWTransaction)
index 92df7dfeb09f400f17d0a0164c270703728fd5c5..6dbadcb3b1b8d2dfe937500a7b71fbf53eb2b0ff 100644 (file)
@@ -207,24 +207,21 @@ class _ConnectionRecord(object):
 
     def close(self):
         if self.connection is not None:
-            if self.__pool._should_log_debug:
-                self.__pool.logger.debug("Closing connection %r", self.connection)
+            self.__pool.logger.debug("Closing connection %r", self.connection)
             try:
                 self.connection.close()
             except (SystemExit, KeyboardInterrupt):
                 raise
             except:
-                if self.__pool._should_log_debug:
-                    self.__pool.logger.debug("Exception closing connection %r",
-                                    self.connection)
+                self.__pool.logger.debug("Exception closing connection %r",
+                                self.connection)
 
     def invalidate(self, e=None):
-        if self.__pool._should_log_info:
-            if e is not None:
-                self.__pool.logger.info("Invalidate connection %r (reason: %s:%s)",
-                                self.connection, e.__class__.__name__, e)
-            else:
-                self.__pool.logger.info("Invalidate connection %r", self.connection)
+        if e is not None:
+            self.__pool.logger.info("Invalidate connection %r (reason: %s:%s)",
+                            self.connection, e.__class__.__name__, e)
+        else:
+            self.__pool.logger.info("Invalidate connection %r", self.connection)
         self.__close()
         self.connection = None
 
@@ -236,9 +233,8 @@ class _ConnectionRecord(object):
                 for l in self.__pool._on_connect:
                     l.connect(self.connection, self)
         elif (self.__pool._recycle > -1 and time.time() - self.starttime > self.__pool._recycle):
-            if self.__pool._should_log_info:
-                self.__pool.logger.info("Connection %r exceeded timeout; recycling",
-                                self.connection)
+            self.__pool.logger.info("Connection %r exceeded timeout; recycling",
+                            self.connection)
             self.__close()
             self.connection = self.__connect()
             self.info.clear()
@@ -249,13 +245,11 @@ class _ConnectionRecord(object):
 
     def __close(self):
         try:
-            if self.__pool._should_log_debug:
-                self.__pool.logger.debug("Closing connection %r", self.connection)
+            self.__pool.logger.debug("Closing connection %r", self.connection)
             self.connection.close()
         except Exception, e:
-            if self.__pool._should_log_debug:
-                self.__pool.logger.debug("Connection %r threw an error on close: %s" %
-                                (self.connection, e))
+            self.__pool.logger.debug("Connection %r threw an error on close: %s",
+                            self.connection, e)
             if isinstance(e, (SystemExit, KeyboardInterrupt)):
                 raise
 
@@ -263,12 +257,10 @@ class _ConnectionRecord(object):
         try:
             self.starttime = time.time()
             connection = self.__pool._creator()
-            if self.__pool._should_log_debug:
-                self.__pool.logger.debug("Created new connection %r", connection)
+            self.__pool.logger.debug("Created new connection %r", connection)
             return connection
         except Exception, e:
-            if self.__pool._should_log_debug:
-                self.__pool.logger.debug("Error on connect(): %s", e)
+            self.__pool.logger.debug("Error on connect(): %s", e)
             raise
 
 
@@ -292,8 +284,7 @@ def _finalize_fairy(connection, connection_record, pool, ref=None):
                 raise
     if connection_record is not None:
         connection_record.fairy = None
-        if pool._should_log_debug:
-            pool.logger.debug("Connection %r being returned to pool", connection)
+        pool.logger.debug("Connection %r being returned to pool", connection)
         if pool._on_checkin:
             for l in pool._on_checkin:
                 l.checkin(connection, connection_record)
@@ -318,9 +309,8 @@ class _ConnectionFairy(object):
             self.connection = None # helps with endless __getattr__ loops later on
             self._connection_record = None
             raise
-        if self._pool._should_log_debug:
-            self._pool.logger.debug("Connection %r checked out from pool" %
-                           self.connection)
+        self._pool.logger.debug("Connection %r checked out from pool" %
+                       self.connection)
 
     @property
     def _logger(self):
@@ -386,15 +376,13 @@ class _ConnectionFairy(object):
                     l.checkout(self.connection, self._connection_record, self)
                 return self
             except exc.DisconnectionError, e:
-                if self._pool._should_log_info:
-                    self._pool.logger.info(
-                    "Disconnection detected on checkout: %s", e)
+                self._pool.logger.info(
+                "Disconnection detected on checkout: %s", e)
                 self._connection_record.invalidate(e)
                 self.connection = self._connection_record.get_connection()
                 attempts -= 1
 
-        if self._pool._should_log_info:
-            self._pool.logger.info("Reconnection attempts exhausted on checkout")
+        self._pool.logger.info("Reconnection attempts exhausted on checkout")
         self.invalidate()
         raise exc.InvalidRequestError("This connection is closed")
 
@@ -452,7 +440,7 @@ class _CursorFairy(object):
                 ex_text = str(e)
             except TypeError:
                 ex_text = repr(e)
-            self.__parent._logger.warn("Error closing cursor: " + ex_text)
+            self.__parent._logger.warn("Error closing cursor: %s", ex_text)
 
             if isinstance(e, (SystemExit, KeyboardInterrupt)):
                 raise
@@ -673,8 +661,7 @@ class QueuePool(Pool):
                 break
 
         self._overflow = 0 - self.size()
-        if self._should_log_info:
-            self.logger.info("Pool disposed. " + self.status())
+        self.logger.info("Pool disposed. %s", self.status())
 
     def status(self):
         tup = (self.size(), self.checkedin(), self.overflow(), self.checkedout())
index 2ba6f751d5f1be5c9d6a7f7dede5702fa859caf2..5e8e21e8fdcc4c1ce3b6d7a9a211324b156346be 100644 (file)
@@ -85,8 +85,6 @@ class NoseSQLAlchemy(Plugin):
         for fn in post_configure:
             fn(self.options, file_config)
         
-        sqla_log._refresh_class_loggers()
-        
     def describeTest(self, test):
         return ""
         
index 18ae0c1b38fbeeeaf2a1cecc31877fd7505e57a0..33631ad4c7150ce07d3a0ee020d2b0171f8ac779 100644 (file)
@@ -18,7 +18,7 @@ class QueuePoolTest(TestBase, AssertsExecutionResults):
                          use_threadlocal=True)
 
 
-    @profiling.function_call_count(54, {'2.4': 36, '3.0':57, '3.1':57})
+    @profiling.function_call_count(64, {'2.4': 36, '3.0':57, '3.1':57})
     def test_first_connect(self):
         conn = pool.connect()
 
@@ -26,7 +26,7 @@ class QueuePoolTest(TestBase, AssertsExecutionResults):
         conn = pool.connect()
         conn.close()
 
-        @profiling.function_call_count(29, {'2.4': 21})
+        @profiling.function_call_count(32, {'2.4': 21})
         def go():
             conn2 = pool.connect()
             return conn2
index 706f8e470b7ec323fd5f07d1b505fb3fe83de5ad..d7ccd2e6dfab7acce0056e4b2ebee1eb5bbdaeff 100644 (file)
@@ -343,7 +343,7 @@ class ZooMarkTest(TestBase):
     def test_profile_4_expressions(self):
         self.test_baseline_4_expressions()
 
-    @profiling.function_call_count(1241, {'2.4': 854})
+    @profiling.function_call_count(1311, {'2.4': 854})
     def test_profile_5_aggregates(self):
         self.test_baseline_5_aggregates()
 
index 220925b6a380acf8df61e7f1b3f66346c53e451f..7029944a252cf138de04d6d6de8b9ce0105f1e28 100644 (file)
@@ -309,7 +309,7 @@ class ZooMarkTest(TestBase):
     def test_profile_4_expressions(self):
         self.test_baseline_4_expressions()
 
-    @profiling.function_call_count(1240)
+    @profiling.function_call_count(1313)
     def test_profile_5_aggregates(self):
         self.test_baseline_5_aggregates()