From ba015e62cc1c688b11a7b8f916867f123a6fe206 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 28 Feb 2010 17:43:10 +0000 Subject: [PATCH] - orm: Removed a lot of logging that nobody really cares about, logging that remains will respond to live changes in the log level. No significant overhead is added. [ticket:1719] - engine: Opened up logging a bit such that isEnabledFor() is called more often, so that changes to the log level for engine/pool will be reflected on next connect. This adds a small amount of method call overhead. It's negligible and will make life a lot easier for all those situations when logging just happens to be configured after create_engine() is called. [ticket:1719] --- CHANGES | 12 ++++++ lib/sqlalchemy/dialects/maxdb/base.py | 4 -- lib/sqlalchemy/engine/base.py | 18 +++++--- lib/sqlalchemy/log.py | 14 ++---- lib/sqlalchemy/orm/mapper.py | 29 +++++-------- lib/sqlalchemy/orm/properties.py | 13 +++--- lib/sqlalchemy/orm/strategies.py | 4 +- lib/sqlalchemy/orm/unitofwork.py | 25 ++++------- lib/sqlalchemy/pool.py | 59 ++++++++++---------------- lib/sqlalchemy/test/noseplugin.py | 2 - test/aaa_profiling/test_pool.py | 4 +- test/aaa_profiling/test_zoomark.py | 2 +- test/aaa_profiling/test_zoomark_orm.py | 2 +- 13 files changed, 80 insertions(+), 108 deletions(-) diff --git a/CHANGES b/CHANGES index d1ccecb5ef..c4495552a3 100644 --- a/CHANGES +++ b/CHANGES @@ -19,6 +19,10 @@ CHANGES internally. The formerly "pending" objects are now expunged first. [ticket:1674] + - Removed a lot of logging that nobody really cares about, + logging that remains will respond to live changes in the + log level. No significant overhead is added. [ticket:1719] + - session.merge() will not expire existing scalar attributes on an existing target if the target has a value for that attribute, even if the incoming merged doesn't have @@ -186,6 +190,14 @@ CHANGES Note that it is *not* built/installed by default. See README for installation instructions. + - Opened up logging a bit such that isEnabledFor() is called + more often, so that changes to the log level for engine/pool + will be reflected on next connect. This adds a small + amount of method call overhead. It's negligible and will make + life a lot easier for all those situations when logging + just happens to be configured after create_engine() is called. + [ticket:1719] + - The assert_unicode flag is deprecated. SQLAlchemy will raise a warning in all cases where it is asked to encode a non-unicode Python string, as well as when a Unicode or UnicodeType type diff --git a/lib/sqlalchemy/dialects/maxdb/base.py b/lib/sqlalchemy/dialects/maxdb/base.py index f409f3213b..504c31209d 100644 --- a/lib/sqlalchemy/dialects/maxdb/base.py +++ b/lib/sqlalchemy/dialects/maxdb/base.py @@ -390,13 +390,9 @@ class MaxDBExecutionContext(default.DefaultExecutionContext): sql = "SELECT CURRENT_SCHEMA.%s.CURRVAL FROM DUAL" % ( self.compiled.preparer.format_table(table)) - if self.connection.engine._should_log_info: - self.connection.engine.logger.info(sql) rs = self.cursor.execute(sql) id = rs.fetchone()[0] - if self.connection.engine._should_log_debug: - self.connection.engine.logger.debug([id]) if not self._last_inserted_ids: # This shouldn't ever be > 1? Right? self._last_inserted_ids = \ diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 578bd58d70..b4f2524d6e 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -747,6 +747,7 @@ class Connection(Connectable): self.__savepoint_seq = 0 self.__branch = _branch self.__invalid = False + self._echo = self.engine._should_log_info() if _execution_options: self._execution_options = self._execution_options.union(_execution_options) @@ -941,7 +942,7 @@ class Connection(Connectable): return self.__transaction is not None def _begin_impl(self): - if self.engine._should_log_info: + if self._echo: self.engine.logger.info("BEGIN") try: self.engine.dialect.do_begin(self.connection) @@ -952,8 +953,9 @@ class Connection(Connectable): def _rollback_impl(self): # use getattr() for is_valid to support exceptions raised in dialect initializer, # where we do not yet have the pool wrappers plugged in - if not self.closed and not self.invalidated and getattr(self.__connection, 'is_valid', False): - if self.engine._should_log_info: + if not self.closed and not self.invalidated and \ + getattr(self.__connection, 'is_valid', False): + if self._echo: self.engine.logger.info("ROLLBACK") try: self.engine.dialect.do_rollback(self.connection) @@ -965,7 +967,7 @@ class Connection(Connectable): self.__transaction = None def _commit_impl(self): - if self.engine._should_log_info: + if self._echo: self.engine.logger.info("COMMIT") try: self.engine.dialect.do_commit(self.connection) @@ -1184,7 +1186,7 @@ class Connection(Connectable): raise def _cursor_execute(self, cursor, statement, parameters, context=None): - if self.engine._should_log_info: + if self._echo: self.engine.logger.info(statement) self.engine.logger.info("%r", parameters) try: @@ -1194,7 +1196,7 @@ class Connection(Connectable): raise def _cursor_executemany(self, cursor, statement, parameters, context=None): - if self.engine._should_log_info: + if self._echo: self.engine.logger.info(statement) self.engine.logger.info("%r", parameters) try: @@ -1898,9 +1900,11 @@ class ResultProxy(object): self.closed = False self.cursor = context.cursor self.connection = context.root_connection - self._echo = context.engine._should_log_info + self._echo = self.connection._echo and \ + context.engine._should_log_debug() self._init_metadata() + def _init_metadata(self): metadata = self._cursor_description() if metadata is None: diff --git a/lib/sqlalchemy/log.py b/lib/sqlalchemy/log.py index 62e0739f7c..eac90854cb 100644 --- a/lib/sqlalchemy/log.py +++ b/lib/sqlalchemy/log.py @@ -53,17 +53,11 @@ def class_logger(cls, enable=False): logger.setLevel(logging.DEBUG) elif enable == 'info': logger.setLevel(logging.INFO) - cls._should_log_debug = logger.isEnabledFor(logging.DEBUG) - cls._should_log_info = logger.isEnabledFor(logging.INFO) + cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG) + cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO) cls.logger = logger _logged_classes.add(cls) -def _refresh_class_loggers(): - for cls in _logged_classes: - logger = logging.getLogger(cls.__module__ + "." + cls.__name__) - cls._should_log_debug = logger.isEnabledFor(logging.DEBUG) - cls._should_log_info = logger.isEnabledFor(logging.INFO) - def instance_logger(instance, echoflag=None): """create a logger for an instance. @@ -93,8 +87,8 @@ def instance_logger(instance, echoflag=None): l.setLevel(logging.WARN) else: l = logging.getLogger(name) - instance._should_log_debug = l.isEnabledFor(logging.DEBUG) - instance._should_log_info = l.isEnabledFor(logging.INFO) + instance._should_log_debug = lambda: l.isEnabledFor(logging.DEBUG) + instance._should_log_info = lambda: l.isEnabledFor(logging.INFO) return l class echo_property(object): diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 21f3278049..ae80a2a4d8 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -458,7 +458,7 @@ class Mapper(object): "key columns for mapped table '%s'" % (self, self.mapped_table.description)) self.primary_key = primary_key - self._log("Identified primary key columns: " + str(primary_key)) + self._log("Identified primary key columns: %s", primary_key) def _configure_properties(self): @@ -523,7 +523,7 @@ class Mapper(object): self._configure_property(key, ConcreteInheritedProperty(), init=init, setparent=True) def _configure_property(self, key, prop, init=True, setparent=True): - self._log("_configure_property(%s, %s)" % (key, prop.__class__.__name__)) + self._log("_configure_property(%s, %s)", key, prop.__class__.__name__) if not isinstance(prop, MapperProperty): # we were passed a Column or a list of Columns; generate a ColumnProperty @@ -727,7 +727,7 @@ class Mapper(object): self._log("_post_configure_properties() started") l = [(key, prop) for key, prop in self._props.iteritems()] for key, prop in l: - self._log("initialize prop " + key) + self._log("initialize prop %s", key) if prop.parent is self and not prop._compile_started: prop.init() @@ -1242,7 +1242,8 @@ class Mapper(object): ret[t] = table_to_mapper[t] return ret - def _save_obj(self, states, uowtransaction, postupdate=False, post_update_cols=None, single=False): + def _save_obj(self, states, uowtransaction, postupdate=False, + post_update_cols=None, single=False): """Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects. This is called within the context of a UOWTransaction during a @@ -1256,9 +1257,6 @@ class Mapper(object): mapper which does not inherit from any other mapper. """ - if self._should_log_debug: - self._log_debug("_save_obj() start, " + (single and "non-batched" or "batched")) - # if batch=false, call _save_obj separately for each object if not single and not self.batch: for state in _sort_states(states): @@ -1316,13 +1314,13 @@ class Mapper(object): existing = attributes.instance_state(instance) if not uowtransaction.is_deleted(existing): raise orm_exc.FlushError( - "New instance %s with identity key %s conflicts with persistent instance %s" % + "New instance %s with identity key %s conflicts " + "with persistent instance %s" % (state_str(state), instance_key, state_str(existing))) - if self._should_log_debug: - self._log_debug( - "detected row switch for identity %s. will update %s, remove %s from " - "transaction", instance_key, state_str(state), state_str(existing)) + self._log_debug( + "detected row switch for identity %s. will update %s, remove %s from " + "transaction", instance_key, state_str(state), state_str(existing)) # remove the "delete" flag from the existing element uowtransaction.set_row_switch(existing) @@ -1340,10 +1338,6 @@ class Mapper(object): pks = mapper._pks_by_table[table] - if self._should_log_debug: - self._log_debug("_save_obj() table '%s' instance %s identity %s", - table.name, state_str(state), str(instance_key)) - isinsert = not has_identity and not postupdate and state not in row_switches params = {} @@ -1550,9 +1544,6 @@ class Mapper(object): flush operation. """ - if self._should_log_debug: - self._log_debug("_delete_obj() start") - if 'connection_callable' in uowtransaction.mapper_flush_opts: connection_callable = uowtransaction.mapper_flush_opts['connection_callable'] tups = [(state, _state_mapper(state), connection_callable(self, state.obj())) for state in _sort_states(states)] diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index 20a1319f8e..56a03fc7cb 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -1091,13 +1091,12 @@ class RelationProperty(StrategizedProperty): def _post_init(self): - if self._should_log_info: - self.logger.info("%s setup primary join %s", self, self.primaryjoin) - self.logger.info("%s setup secondary join %s", self, self.secondaryjoin) - self.logger.info("%s synchronize pairs [%s]", self, ",".join("(%s => %s)" % (l, r) for l, r in self.synchronize_pairs)) - self.logger.info("%s secondary synchronize pairs [%s]", self, ",".join(("(%s => %s)" % (l, r) for l, r in self.secondary_synchronize_pairs or []))) - self.logger.info("%s local/remote pairs [%s]", self, ",".join("(%s / %s)" % (l, r) for l, r in self.local_remote_pairs)) - self.logger.info("%s relation direction %s", self, self.direction) + self.logger.info("%s setup primary join %s", self, self.primaryjoin) + self.logger.info("%s setup secondary join %s", self, self.secondaryjoin) + self.logger.info("%s synchronize pairs [%s]", self, ",".join("(%s => %s)" % (l, r) for l, r in self.synchronize_pairs)) + self.logger.info("%s secondary synchronize pairs [%s]", self, ",".join(("(%s => %s)" % (l, r) for l, r in self.secondary_synchronize_pairs or []))) + self.logger.info("%s local/remote pairs [%s]", self, ",".join("(%s / %s)" % (l, r) for l, r in self.local_remote_pairs)) + self.logger.info("%s relation direction %s", self, self.direction) if self.uselist is None: self.uselist = self.direction is not MANYTOONE diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 17b1fd8e8c..cbd48303c2 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -267,7 +267,7 @@ class LoadDeferredColumns(object): # narrow the keys down to just those which have no history group = [k for k in toload if k in state.unmodified] - if strategy._should_log_debug: + if strategy._should_log_debug(): strategy.logger.debug( "deferred load %s group %s", (mapperutil.state_attribute_str(state, self.key), @@ -513,7 +513,7 @@ class LoadLazyAttribute(object): if kw.get('passive') is attributes.PASSIVE_NO_FETCH and not strategy.use_get: return attributes.PASSIVE_NO_RESULT - if strategy._should_log_debug: + if strategy._should_log_debug(): strategy.logger.debug("loading %s", mapperutil.state_attribute_str(state, self.key)) diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py index d2901a49ff..4a694bc333 100644 --- a/lib/sqlalchemy/orm/unitofwork.py +++ b/lib/sqlalchemy/orm/unitofwork.py @@ -122,19 +122,13 @@ class UOWTransaction(object): else: return history.as_state() - def register_object(self, state, isdelete=False, listonly=False, postupdate=False, post_update_cols=None): + def register_object(self, state, isdelete=False, + listonly=False, postupdate=False, post_update_cols=None): # if object is not in the overall session, do nothing if not self.session._contains_state(state): - if self._should_log_debug: - self.logger.debug("object %s not part of session, not registering for flush", - mapperutil.state_str(state)) return - if self._should_log_debug: - self.logger.debug("register object for flush: %s isdelete=%s listonly=%s postupdate=%s", - mapperutil.state_str(state), isdelete, listonly, postupdate) - mapper = _state_mapper(state) task = self.get_task_by_mapper(mapper) @@ -256,11 +250,10 @@ class UOWTransaction(object): break tasks = self._sort_dependencies() - if self._should_log_info: + if self._should_log_info(): self.logger.info("Task dump:\n%s", self._dump(tasks)) UOWExecutor().execute(self, tasks) - if self._should_log_info: - self.logger.info("Execute Complete") + self.logger.info("Execute Complete") def _dump(self, tasks): from uowdumper import UOWDumper @@ -296,16 +289,14 @@ class UOWTransaction(object): for item, cycles in nodes: task = self.get_task_by_mapper(item) if cycles: - for t in task._sort_circular_dependencies(self, [self.get_task_by_mapper(i) for i in cycles]): + for t in task._sort_circular_dependencies( + self, + [self.get_task_by_mapper(i) for i in cycles] + ): ret.append(t) else: ret.append(task) - if self._should_log_debug: - self.logger.debug("Dependent tuples:\n" + "\n".join( - "(%s->%s)" % (d[0].class_.__name__, d[1].class_.__name__) - for d in self.dependencies)) - self.logger.debug("Dependency sort:\n%s", ret) return ret log.class_logger(UOWTransaction) diff --git a/lib/sqlalchemy/pool.py b/lib/sqlalchemy/pool.py index 92df7dfeb0..6dbadcb3b1 100644 --- a/lib/sqlalchemy/pool.py +++ b/lib/sqlalchemy/pool.py @@ -207,24 +207,21 @@ class _ConnectionRecord(object): def close(self): if self.connection is not None: - if self.__pool._should_log_debug: - self.__pool.logger.debug("Closing connection %r", self.connection) + self.__pool.logger.debug("Closing connection %r", self.connection) try: self.connection.close() except (SystemExit, KeyboardInterrupt): raise except: - if self.__pool._should_log_debug: - self.__pool.logger.debug("Exception closing connection %r", - self.connection) + self.__pool.logger.debug("Exception closing connection %r", + self.connection) def invalidate(self, e=None): - if self.__pool._should_log_info: - if e is not None: - self.__pool.logger.info("Invalidate connection %r (reason: %s:%s)", - self.connection, e.__class__.__name__, e) - else: - self.__pool.logger.info("Invalidate connection %r", self.connection) + if e is not None: + self.__pool.logger.info("Invalidate connection %r (reason: %s:%s)", + self.connection, e.__class__.__name__, e) + else: + self.__pool.logger.info("Invalidate connection %r", self.connection) self.__close() self.connection = None @@ -236,9 +233,8 @@ class _ConnectionRecord(object): for l in self.__pool._on_connect: l.connect(self.connection, self) elif (self.__pool._recycle > -1 and time.time() - self.starttime > self.__pool._recycle): - if self.__pool._should_log_info: - self.__pool.logger.info("Connection %r exceeded timeout; recycling", - self.connection) + self.__pool.logger.info("Connection %r exceeded timeout; recycling", + self.connection) self.__close() self.connection = self.__connect() self.info.clear() @@ -249,13 +245,11 @@ class _ConnectionRecord(object): def __close(self): try: - if self.__pool._should_log_debug: - self.__pool.logger.debug("Closing connection %r", self.connection) + self.__pool.logger.debug("Closing connection %r", self.connection) self.connection.close() except Exception, e: - if self.__pool._should_log_debug: - self.__pool.logger.debug("Connection %r threw an error on close: %s" % - (self.connection, e)) + self.__pool.logger.debug("Connection %r threw an error on close: %s", + self.connection, e) if isinstance(e, (SystemExit, KeyboardInterrupt)): raise @@ -263,12 +257,10 @@ class _ConnectionRecord(object): try: self.starttime = time.time() connection = self.__pool._creator() - if self.__pool._should_log_debug: - self.__pool.logger.debug("Created new connection %r", connection) + self.__pool.logger.debug("Created new connection %r", connection) return connection except Exception, e: - if self.__pool._should_log_debug: - self.__pool.logger.debug("Error on connect(): %s", e) + self.__pool.logger.debug("Error on connect(): %s", e) raise @@ -292,8 +284,7 @@ def _finalize_fairy(connection, connection_record, pool, ref=None): raise if connection_record is not None: connection_record.fairy = None - if pool._should_log_debug: - pool.logger.debug("Connection %r being returned to pool", connection) + pool.logger.debug("Connection %r being returned to pool", connection) if pool._on_checkin: for l in pool._on_checkin: l.checkin(connection, connection_record) @@ -318,9 +309,8 @@ class _ConnectionFairy(object): self.connection = None # helps with endless __getattr__ loops later on self._connection_record = None raise - if self._pool._should_log_debug: - self._pool.logger.debug("Connection %r checked out from pool" % - self.connection) + self._pool.logger.debug("Connection %r checked out from pool" % + self.connection) @property def _logger(self): @@ -386,15 +376,13 @@ class _ConnectionFairy(object): l.checkout(self.connection, self._connection_record, self) return self except exc.DisconnectionError, e: - if self._pool._should_log_info: - self._pool.logger.info( - "Disconnection detected on checkout: %s", e) + self._pool.logger.info( + "Disconnection detected on checkout: %s", e) self._connection_record.invalidate(e) self.connection = self._connection_record.get_connection() attempts -= 1 - if self._pool._should_log_info: - self._pool.logger.info("Reconnection attempts exhausted on checkout") + self._pool.logger.info("Reconnection attempts exhausted on checkout") self.invalidate() raise exc.InvalidRequestError("This connection is closed") @@ -452,7 +440,7 @@ class _CursorFairy(object): ex_text = str(e) except TypeError: ex_text = repr(e) - self.__parent._logger.warn("Error closing cursor: " + ex_text) + self.__parent._logger.warn("Error closing cursor: %s", ex_text) if isinstance(e, (SystemExit, KeyboardInterrupt)): raise @@ -673,8 +661,7 @@ class QueuePool(Pool): break self._overflow = 0 - self.size() - if self._should_log_info: - self.logger.info("Pool disposed. " + self.status()) + self.logger.info("Pool disposed. %s", self.status()) def status(self): tup = (self.size(), self.checkedin(), self.overflow(), self.checkedout()) diff --git a/lib/sqlalchemy/test/noseplugin.py b/lib/sqlalchemy/test/noseplugin.py index 2ba6f751d5..5e8e21e8fd 100644 --- a/lib/sqlalchemy/test/noseplugin.py +++ b/lib/sqlalchemy/test/noseplugin.py @@ -85,8 +85,6 @@ class NoseSQLAlchemy(Plugin): for fn in post_configure: fn(self.options, file_config) - sqla_log._refresh_class_loggers() - def describeTest(self, test): return "" diff --git a/test/aaa_profiling/test_pool.py b/test/aaa_profiling/test_pool.py index 18ae0c1b38..33631ad4c7 100644 --- a/test/aaa_profiling/test_pool.py +++ b/test/aaa_profiling/test_pool.py @@ -18,7 +18,7 @@ class QueuePoolTest(TestBase, AssertsExecutionResults): use_threadlocal=True) - @profiling.function_call_count(54, {'2.4': 36, '3.0':57, '3.1':57}) + @profiling.function_call_count(64, {'2.4': 36, '3.0':57, '3.1':57}) def test_first_connect(self): conn = pool.connect() @@ -26,7 +26,7 @@ class QueuePoolTest(TestBase, AssertsExecutionResults): conn = pool.connect() conn.close() - @profiling.function_call_count(29, {'2.4': 21}) + @profiling.function_call_count(32, {'2.4': 21}) def go(): conn2 = pool.connect() return conn2 diff --git a/test/aaa_profiling/test_zoomark.py b/test/aaa_profiling/test_zoomark.py index 706f8e470b..d7ccd2e6df 100644 --- a/test/aaa_profiling/test_zoomark.py +++ b/test/aaa_profiling/test_zoomark.py @@ -343,7 +343,7 @@ class ZooMarkTest(TestBase): def test_profile_4_expressions(self): self.test_baseline_4_expressions() - @profiling.function_call_count(1241, {'2.4': 854}) + @profiling.function_call_count(1311, {'2.4': 854}) def test_profile_5_aggregates(self): self.test_baseline_5_aggregates() diff --git a/test/aaa_profiling/test_zoomark_orm.py b/test/aaa_profiling/test_zoomark_orm.py index 220925b6a3..7029944a25 100644 --- a/test/aaa_profiling/test_zoomark_orm.py +++ b/test/aaa_profiling/test_zoomark_orm.py @@ -309,7 +309,7 @@ class ZooMarkTest(TestBase): def test_profile_4_expressions(self): self.test_baseline_4_expressions() - @profiling.function_call_count(1240) + @profiling.function_call_count(1313) def test_profile_5_aggregates(self): self.test_baseline_5_aggregates() -- 2.47.3