internally. The formerly "pending" objects are now expunged
first. [ticket:1674]
+ - Removed a lot of logging that nobody really cares about,
+ logging that remains will respond to live changes in the
+ log level. No significant overhead is added. [ticket:1719]
+
- session.merge() will not expire existing scalar attributes
on an existing target if the target has a value for that
attribute, even if the incoming merged doesn't have
Note that it is *not* built/installed by default.
See README for installation instructions.
+ - Opened up logging a bit such that isEnabledFor() is called
+ more often, so that changes to the log level for engine/pool
+ will be reflected on next connect. This adds a small
+ amount of method call overhead. It's negligible and will make
+ life a lot easier for all those situations when logging
+ just happens to be configured after create_engine() is called.
+ [ticket:1719]
+
- The assert_unicode flag is deprecated. SQLAlchemy will raise
a warning in all cases where it is asked to encode a non-unicode
Python string, as well as when a Unicode or UnicodeType type
sql = "SELECT CURRENT_SCHEMA.%s.CURRVAL FROM DUAL" % (
self.compiled.preparer.format_table(table))
- if self.connection.engine._should_log_info:
- self.connection.engine.logger.info(sql)
rs = self.cursor.execute(sql)
id = rs.fetchone()[0]
- if self.connection.engine._should_log_debug:
- self.connection.engine.logger.debug([id])
if not self._last_inserted_ids:
# This shouldn't ever be > 1? Right?
self._last_inserted_ids = \
self.__savepoint_seq = 0
self.__branch = _branch
self.__invalid = False
+ self._echo = self.engine._should_log_info()
if _execution_options:
self._execution_options = self._execution_options.union(_execution_options)
return self.__transaction is not None
def _begin_impl(self):
- if self.engine._should_log_info:
+ if self._echo:
self.engine.logger.info("BEGIN")
try:
self.engine.dialect.do_begin(self.connection)
def _rollback_impl(self):
# use getattr() for is_valid to support exceptions raised in dialect initializer,
# where we do not yet have the pool wrappers plugged in
- if not self.closed and not self.invalidated and getattr(self.__connection, 'is_valid', False):
- if self.engine._should_log_info:
+ if not self.closed and not self.invalidated and \
+ getattr(self.__connection, 'is_valid', False):
+ if self._echo:
self.engine.logger.info("ROLLBACK")
try:
self.engine.dialect.do_rollback(self.connection)
self.__transaction = None
def _commit_impl(self):
- if self.engine._should_log_info:
+ if self._echo:
self.engine.logger.info("COMMIT")
try:
self.engine.dialect.do_commit(self.connection)
raise
def _cursor_execute(self, cursor, statement, parameters, context=None):
- if self.engine._should_log_info:
+ if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info("%r", parameters)
try:
raise
def _cursor_executemany(self, cursor, statement, parameters, context=None):
- if self.engine._should_log_info:
+ if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info("%r", parameters)
try:
self.closed = False
self.cursor = context.cursor
self.connection = context.root_connection
- self._echo = context.engine._should_log_info
+ self._echo = self.connection._echo and \
+ context.engine._should_log_debug()
self._init_metadata()
+
def _init_metadata(self):
metadata = self._cursor_description()
if metadata is None:
logger.setLevel(logging.DEBUG)
elif enable == 'info':
logger.setLevel(logging.INFO)
- cls._should_log_debug = logger.isEnabledFor(logging.DEBUG)
- cls._should_log_info = logger.isEnabledFor(logging.INFO)
+ cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG)
+ cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO)
cls.logger = logger
_logged_classes.add(cls)
-def _refresh_class_loggers():
- for cls in _logged_classes:
- logger = logging.getLogger(cls.__module__ + "." + cls.__name__)
- cls._should_log_debug = logger.isEnabledFor(logging.DEBUG)
- cls._should_log_info = logger.isEnabledFor(logging.INFO)
-
def instance_logger(instance, echoflag=None):
"""create a logger for an instance.
l.setLevel(logging.WARN)
else:
l = logging.getLogger(name)
- instance._should_log_debug = l.isEnabledFor(logging.DEBUG)
- instance._should_log_info = l.isEnabledFor(logging.INFO)
+ instance._should_log_debug = lambda: l.isEnabledFor(logging.DEBUG)
+ instance._should_log_info = lambda: l.isEnabledFor(logging.INFO)
return l
class echo_property(object):
"key columns for mapped table '%s'" % (self, self.mapped_table.description))
self.primary_key = primary_key
- self._log("Identified primary key columns: " + str(primary_key))
+ self._log("Identified primary key columns: %s", primary_key)
def _configure_properties(self):
self._configure_property(key, ConcreteInheritedProperty(), init=init, setparent=True)
def _configure_property(self, key, prop, init=True, setparent=True):
- self._log("_configure_property(%s, %s)" % (key, prop.__class__.__name__))
+ self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
if not isinstance(prop, MapperProperty):
# we were passed a Column or a list of Columns; generate a ColumnProperty
self._log("_post_configure_properties() started")
l = [(key, prop) for key, prop in self._props.iteritems()]
for key, prop in l:
- self._log("initialize prop " + key)
+ self._log("initialize prop %s", key)
if prop.parent is self and not prop._compile_started:
prop.init()
ret[t] = table_to_mapper[t]
return ret
- def _save_obj(self, states, uowtransaction, postupdate=False, post_update_cols=None, single=False):
+ def _save_obj(self, states, uowtransaction, postupdate=False,
+ post_update_cols=None, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
mapper which does not inherit from any other mapper.
"""
- if self._should_log_debug:
- self._log_debug("_save_obj() start, " + (single and "non-batched" or "batched"))
-
# if batch=false, call _save_obj separately for each object
if not single and not self.batch:
for state in _sort_states(states):
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
- "New instance %s with identity key %s conflicts with persistent instance %s" %
+ "New instance %s with identity key %s conflicts "
+ "with persistent instance %s" %
(state_str(state), instance_key, state_str(existing)))
- if self._should_log_debug:
- self._log_debug(
- "detected row switch for identity %s. will update %s, remove %s from "
- "transaction", instance_key, state_str(state), state_str(existing))
+ self._log_debug(
+ "detected row switch for identity %s. will update %s, remove %s from "
+ "transaction", instance_key, state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.set_row_switch(existing)
pks = mapper._pks_by_table[table]
- if self._should_log_debug:
- self._log_debug("_save_obj() table '%s' instance %s identity %s",
- table.name, state_str(state), str(instance_key))
-
isinsert = not has_identity and not postupdate and state not in row_switches
params = {}
flush operation.
"""
- if self._should_log_debug:
- self._log_debug("_delete_obj() start")
-
if 'connection_callable' in uowtransaction.mapper_flush_opts:
connection_callable = uowtransaction.mapper_flush_opts['connection_callable']
tups = [(state, _state_mapper(state), connection_callable(self, state.obj())) for state in _sort_states(states)]
def _post_init(self):
- if self._should_log_info:
- self.logger.info("%s setup primary join %s", self, self.primaryjoin)
- self.logger.info("%s setup secondary join %s", self, self.secondaryjoin)
- self.logger.info("%s synchronize pairs [%s]", self, ",".join("(%s => %s)" % (l, r) for l, r in self.synchronize_pairs))
- self.logger.info("%s secondary synchronize pairs [%s]", self, ",".join(("(%s => %s)" % (l, r) for l, r in self.secondary_synchronize_pairs or [])))
- self.logger.info("%s local/remote pairs [%s]", self, ",".join("(%s / %s)" % (l, r) for l, r in self.local_remote_pairs))
- self.logger.info("%s relation direction %s", self, self.direction)
+ self.logger.info("%s setup primary join %s", self, self.primaryjoin)
+ self.logger.info("%s setup secondary join %s", self, self.secondaryjoin)
+ self.logger.info("%s synchronize pairs [%s]", self, ",".join("(%s => %s)" % (l, r) for l, r in self.synchronize_pairs))
+ self.logger.info("%s secondary synchronize pairs [%s]", self, ",".join(("(%s => %s)" % (l, r) for l, r in self.secondary_synchronize_pairs or [])))
+ self.logger.info("%s local/remote pairs [%s]", self, ",".join("(%s / %s)" % (l, r) for l, r in self.local_remote_pairs))
+ self.logger.info("%s relation direction %s", self, self.direction)
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
- if strategy._should_log_debug:
+ if strategy._should_log_debug():
strategy.logger.debug(
"deferred load %s group %s",
(mapperutil.state_attribute_str(state, self.key),
if kw.get('passive') is attributes.PASSIVE_NO_FETCH and not strategy.use_get:
return attributes.PASSIVE_NO_RESULT
- if strategy._should_log_debug:
+ if strategy._should_log_debug():
strategy.logger.debug("loading %s",
mapperutil.state_attribute_str(state, self.key))
else:
return history.as_state()
- def register_object(self, state, isdelete=False, listonly=False, postupdate=False, post_update_cols=None):
+ def register_object(self, state, isdelete=False,
+ listonly=False, postupdate=False, post_update_cols=None):
# if object is not in the overall session, do nothing
if not self.session._contains_state(state):
- if self._should_log_debug:
- self.logger.debug("object %s not part of session, not registering for flush",
- mapperutil.state_str(state))
return
- if self._should_log_debug:
- self.logger.debug("register object for flush: %s isdelete=%s listonly=%s postupdate=%s",
- mapperutil.state_str(state), isdelete, listonly, postupdate)
-
mapper = _state_mapper(state)
task = self.get_task_by_mapper(mapper)
break
tasks = self._sort_dependencies()
- if self._should_log_info:
+ if self._should_log_info():
self.logger.info("Task dump:\n%s", self._dump(tasks))
UOWExecutor().execute(self, tasks)
- if self._should_log_info:
- self.logger.info("Execute Complete")
+ self.logger.info("Execute Complete")
def _dump(self, tasks):
from uowdumper import UOWDumper
for item, cycles in nodes:
task = self.get_task_by_mapper(item)
if cycles:
- for t in task._sort_circular_dependencies(self, [self.get_task_by_mapper(i) for i in cycles]):
+ for t in task._sort_circular_dependencies(
+ self,
+ [self.get_task_by_mapper(i) for i in cycles]
+ ):
ret.append(t)
else:
ret.append(task)
- if self._should_log_debug:
- self.logger.debug("Dependent tuples:\n" + "\n".join(
- "(%s->%s)" % (d[0].class_.__name__, d[1].class_.__name__)
- for d in self.dependencies))
- self.logger.debug("Dependency sort:\n%s", ret)
return ret
log.class_logger(UOWTransaction)
def close(self):
if self.connection is not None:
- if self.__pool._should_log_debug:
- self.__pool.logger.debug("Closing connection %r", self.connection)
+ self.__pool.logger.debug("Closing connection %r", self.connection)
try:
self.connection.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
- if self.__pool._should_log_debug:
- self.__pool.logger.debug("Exception closing connection %r",
- self.connection)
+ self.__pool.logger.debug("Exception closing connection %r",
+ self.connection)
def invalidate(self, e=None):
- if self.__pool._should_log_info:
- if e is not None:
- self.__pool.logger.info("Invalidate connection %r (reason: %s:%s)",
- self.connection, e.__class__.__name__, e)
- else:
- self.__pool.logger.info("Invalidate connection %r", self.connection)
+ if e is not None:
+ self.__pool.logger.info("Invalidate connection %r (reason: %s:%s)",
+ self.connection, e.__class__.__name__, e)
+ else:
+ self.__pool.logger.info("Invalidate connection %r", self.connection)
self.__close()
self.connection = None
for l in self.__pool._on_connect:
l.connect(self.connection, self)
elif (self.__pool._recycle > -1 and time.time() - self.starttime > self.__pool._recycle):
- if self.__pool._should_log_info:
- self.__pool.logger.info("Connection %r exceeded timeout; recycling",
- self.connection)
+ self.__pool.logger.info("Connection %r exceeded timeout; recycling",
+ self.connection)
self.__close()
self.connection = self.__connect()
self.info.clear()
def __close(self):
try:
- if self.__pool._should_log_debug:
- self.__pool.logger.debug("Closing connection %r", self.connection)
+ self.__pool.logger.debug("Closing connection %r", self.connection)
self.connection.close()
except Exception, e:
- if self.__pool._should_log_debug:
- self.__pool.logger.debug("Connection %r threw an error on close: %s" %
- (self.connection, e))
+ self.__pool.logger.debug("Connection %r threw an error on close: %s",
+ self.connection, e)
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
try:
self.starttime = time.time()
connection = self.__pool._creator()
- if self.__pool._should_log_debug:
- self.__pool.logger.debug("Created new connection %r", connection)
+ self.__pool.logger.debug("Created new connection %r", connection)
return connection
except Exception, e:
- if self.__pool._should_log_debug:
- self.__pool.logger.debug("Error on connect(): %s", e)
+ self.__pool.logger.debug("Error on connect(): %s", e)
raise
raise
if connection_record is not None:
connection_record.fairy = None
- if pool._should_log_debug:
- pool.logger.debug("Connection %r being returned to pool", connection)
+ pool.logger.debug("Connection %r being returned to pool", connection)
if pool._on_checkin:
for l in pool._on_checkin:
l.checkin(connection, connection_record)
self.connection = None # helps with endless __getattr__ loops later on
self._connection_record = None
raise
- if self._pool._should_log_debug:
- self._pool.logger.debug("Connection %r checked out from pool" %
- self.connection)
+ self._pool.logger.debug("Connection %r checked out from pool" %
+ self.connection)
@property
def _logger(self):
l.checkout(self.connection, self._connection_record, self)
return self
except exc.DisconnectionError, e:
- if self._pool._should_log_info:
- self._pool.logger.info(
- "Disconnection detected on checkout: %s", e)
+ self._pool.logger.info(
+ "Disconnection detected on checkout: %s", e)
self._connection_record.invalidate(e)
self.connection = self._connection_record.get_connection()
attempts -= 1
- if self._pool._should_log_info:
- self._pool.logger.info("Reconnection attempts exhausted on checkout")
+ self._pool.logger.info("Reconnection attempts exhausted on checkout")
self.invalidate()
raise exc.InvalidRequestError("This connection is closed")
ex_text = str(e)
except TypeError:
ex_text = repr(e)
- self.__parent._logger.warn("Error closing cursor: " + ex_text)
+ self.__parent._logger.warn("Error closing cursor: %s", ex_text)
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
break
self._overflow = 0 - self.size()
- if self._should_log_info:
- self.logger.info("Pool disposed. " + self.status())
+ self.logger.info("Pool disposed. %s", self.status())
def status(self):
tup = (self.size(), self.checkedin(), self.overflow(), self.checkedout())
for fn in post_configure:
fn(self.options, file_config)
- sqla_log._refresh_class_loggers()
-
def describeTest(self, test):
return ""
use_threadlocal=True)
- @profiling.function_call_count(54, {'2.4': 36, '3.0':57, '3.1':57})
+ @profiling.function_call_count(64, {'2.4': 36, '3.0':57, '3.1':57})
def test_first_connect(self):
conn = pool.connect()
conn = pool.connect()
conn.close()
- @profiling.function_call_count(29, {'2.4': 21})
+ @profiling.function_call_count(32, {'2.4': 21})
def go():
conn2 = pool.connect()
return conn2
def test_profile_4_expressions(self):
self.test_baseline_4_expressions()
- @profiling.function_call_count(1241, {'2.4': 854})
+ @profiling.function_call_count(1311, {'2.4': 854})
def test_profile_5_aggregates(self):
self.test_baseline_5_aggregates()
def test_profile_4_expressions(self):
self.test_baseline_4_expressions()
- @profiling.function_call_count(1240)
+ @profiling.function_call_count(1313)
def test_profile_5_aggregates(self):
self.test_baseline_5_aggregates()