return self.__engine.dialect.create_execution_context(connection=self, **kwargs)
def __execute_raw(self, context):
- if self.__engine._should_log:
+ if self.__engine._should_log_info:
self.__engine.logger.info(context.statement)
self.__engine.logger.info(repr(context.parameters))
if context.parameters is not None and isinstance(context.parameters, list) and len(context.parameters) > 0 and isinstance(context.parameters[0], (list, tuple, dict)):
self.dialect=dialect
self.echo = echo
self.engine = self
- self.logger = logging.instance_logger(self)
- self._should_log = logging.is_info_enabled(self.logger)
+ self.logger = logging.instance_logger(self, echoflag=echo)
name = property(lambda s:sys.modules[s.dialect.__module__].descriptor()['name'], doc="String name of the [sqlalchemy.engine#Dialect] in use by this ``Engine``.")
- echo = logging.echo_property()
def __repr__(self):
return 'Engine(%s)' % str(self.url)
self.dialect = context.dialect
self.closed = False
self.cursor = context.cursor
- self.__echo = context.engine._should_log
+ self.__echo = context.engine._should_log_info
self._process_row = self._row_processor()
if context.is_select():
self._init_metadata()
# also speeds performance as logger initialization is apparently slow
return instance.__class__.__module__ + "." + instance.__class__.__name__ + ".0x.." + hex(id(instance))[-2:]
-def instance_logger(instance):
- return logging.getLogger(_get_instance_name(instance))
-
def class_logger(cls):
return logging.getLogger(cls.__module__ + "." + cls.__name__)
def is_info_enabled(logger):
return logger.isEnabledFor(logging.INFO)
-class echo_property(object):
- level_map={logging.DEBUG : "debug", logging.INFO:True}
-
- __doc__ = """when ``True``, enable log output for this element.
-
- This has the effect of setting the Python logging level for the
- namespace of this element's class and object reference. A value
- of boolean ``True`` indicates that the loglevel ``logging.INFO`` will be
- set for the logger, whereas the string value ``debug`` will set the loglevel
- to ``logging.DEBUG``.
- """
-
- def __get__(self, instance, owner):
- if instance is None:
- return self
- level = logging.getLogger(_get_instance_name(instance)).getEffectiveLevel()
- return echo_property.level_map.get(level, False)
-
- def __set__(self, instance, value):
- if value:
- default_logging(_get_instance_name(instance))
- logging.getLogger(_get_instance_name(instance)).setLevel(value == 'debug' and logging.DEBUG or logging.INFO)
- else:
- logging.getLogger(_get_instance_name(instance)).setLevel(logging.NOTSET)
+def instance_logger(instance, echoflag=None):
+ if echoflag:
+ default_logging(_get_instance_name(instance))
+ l = logging.getLogger(_get_instance_name(instance))
+ l.setLevel(echoflag == 'debug' and logging.DEBUG or logging.INFO)
+ else:
+ l = logging.getLogger(_get_instance_name(instance))
+ instance._should_log_debug = l.isEnabledFor(logging.DEBUG)
+ instance._should_log_info = l.isEnabledFor(logging.INFO)
+ return l
+
\ No newline at end of file
those changes will not be persisted.
"""
- self.uow = unitofwork.UnitOfWork(weak_identity_map=weak_identity_map)
+ self.echo_uow = echo_uow
+ self.uow = unitofwork.UnitOfWork(self, weak_identity_map=weak_identity_map)
self.identity_map = self.uow.identity_map
self.bind = bind
self.__binds = {}
- self.echo_uow = echo_uow
self.weak_identity_map = weak_identity_map
self.transaction = None
self.hash_key = id(self)
self.begin()
_sessions[self.hash_key] = self
- def _get_echo_uow(self):
- return self.uow.echo
-
- def _set_echo_uow(self, value):
- self.uow.echo = value
- echo_uow = property(_get_echo_uow,_set_echo_uow)
-
def begin(self, **kwargs):
"""Begin a transaction on this Session."""
for instance in self:
self._unattach(instance)
- echo = self.uow.echo
- self.uow = unitofwork.UnitOfWork(weak_identity_map=self.weak_identity_map)
+ self.uow = unitofwork.UnitOfWork(self, weak_identity_map=self.weak_identity_map)
self.identity_map = self.uow.identity_map
- self.uow.echo = echo
def bind_mapper(self, mapper, bind, entity_name=None):
"""Bind the given `mapper` or `class` to the given ``Engine`` or ``Connection``.
operation.
"""
- def __init__(self, identity_map=None, weak_identity_map=False):
- if identity_map is not None:
- self.identity_map = identity_map
+ def __init__(self, session, weak_identity_map=False):
+ if weak_identity_map:
+ self.identity_map = weakref.WeakValueDictionary()
else:
- if weak_identity_map:
- self.identity_map = weakref.WeakValueDictionary()
- else:
- self.identity_map = {}
+ self.identity_map = {}
self.new = util.Set() #OrderedSet()
self.deleted = util.Set()
- self.logger = logging.instance_logger(self)
-
- echo = logging.echo_property()
+ self.logger = logging.instance_logger(self, echoflag=session.echo_uow)
def _remove_deleted(self, obj):
if hasattr(obj, "_instance_key"):
# and organize a hierarchical dependency structure. it also handles
# communication with the mappers and relationships to fire off SQL
# and synchronize attributes between related objects.
- echo = logging.is_info_enabled(self.logger)
flush_context = UOWTransaction(self, session)
# information.
self.attributes = {}
- self.logger = logging.instance_logger(self)
- self.echo = uow.echo
+ self.logger = logging.instance_logger(self, echoflag=session.echo_uow)
- echo = logging.echo_property()
-
def register_object(self, obj, isdelete = False, listonly = False, postupdate=False, post_update_cols=None, **kwargs):
"""Add an object to this ``UOWTransaction`` to be updated in the database.
# if object is not in the overall session, do nothing
if not self.uow._is_valid(obj):
- if logging.is_debug_enabled(self.logger):
+ if self._should_log_debug:
self.logger.debug("object %s not part of session, not registering for flush" % (mapperutil.instance_str(obj)))
return
- if logging.is_debug_enabled(self.logger):
+ if self._should_log_debug:
self.logger.debug("register object for flush: %s isdelete=%s listonly=%s postupdate=%s" % (mapperutil.instance_str(obj), isdelete, listonly, postupdate))
mapper = object_mapper(obj)
break
head = self._sort_dependencies()
- if self.echo:
+ if self._should_log_info:
if head is None:
self.logger.info("Task dump: None")
else:
self.logger.info("Task dump:\n" + head.dump())
if head is not None:
UOWExecutor().execute(self, head)
- self.logger.info("Execute Complete")
+ if self._should_log_info:
+ self.logger.info("Execute Complete")
def post_exec(self):
"""mark processed objects as clean / deleted after a successful flush().
# get list of base mappers
mappers = [t.mapper for t in self.tasks.values() if t.base_task is t]
head = topological.QueueDependencySorter(self.dependencies, mappers).sort(allow_all_cycles=True)
- if logging.is_debug_enabled(self.logger):
+ if self._should_log_debug:
self.logger.debug("Dependent tuples:\n" + "\n".join(["(%s->%s)" % (d[0].class_.__name__, d[1].class_.__name__) for d in self.dependencies]))
self.logger.debug("Dependency sort:\n"+ str(head))
task = sort_hier(head)
def __init__(self, creator, recycle=-1, echo=None, use_threadlocal=True,
listeners=None):
- self.logger = logging.instance_logger(self)
+ self.logger = logging.instance_logger(self, echoflag=echo)
self._threadconns = {}
self._creator = creator
self._recycle = recycle
self._on_connect = []
self._on_checkout = []
self._on_checkin = []
- self._should_log = logging.is_info_enabled(self.logger)
if listeners:
for l in listeners:
self.add_listener(l)
- echo = logging.echo_property()
def unique_connection(self):
return _ConnectionFairy(self).checkout()
def close(self):
if self.connection is not None:
- if self.__pool._should_log:
+ if self.__pool._should_log_info:
self.__pool.log("Closing connection %s" % repr(self.connection))
self.connection.close()
def invalidate(self, e=None):
- if self.__pool._should_log:
+ if self.__pool._should_log_info:
if e is not None:
self.__pool.log("Invalidate connection %s (reason: %s:%s)" % (repr(self.connection), e.__class__.__name__, str(e)))
else:
for l in self.__pool._on_connect:
l.connect(self.connection, self)
elif (self.__pool._recycle > -1 and time.time() - self.starttime > self.__pool._recycle):
- if self.__pool._should_log:
+ if self.__pool._should_log_info:
self.__pool.log("Connection %s exceeded timeout; recycling" % repr(self.connection))
self.__close()
self.connection = self.__connect()
def __close(self):
try:
- if self.__pool._should_log:
+ if self.__pool._should_log_info:
self.__pool.log("Closing connection %s" % (repr(self.connection)))
self.connection.close()
except Exception, e:
- if self.__pool._should_log:
+ if self.__pool._should_log_info:
self.__pool.log("Connection %s threw an error on close: %s" % (repr(self.connection), str(e)))
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
try:
self.starttime = time.time()
connection = self.__pool._creator()
- if self.__pool._should_log:
+ if self.__pool._should_log_info:
self.__pool.log("Created new connection %s" % repr(connection))
return connection
except Exception, e:
- if self.__pool._should_log:
+ if self.__pool._should_log_info:
self.__pool.log("Error on connect(): %s" % (str(e)))
raise
raise
if connection_record is not None:
connection_record.backref = None
- if pool._should_log:
+ if pool._should_log_info:
pool.log("Connection %s being returned to pool" % repr(connection))
if pool._on_checkin:
for l in pool._on_checkin:
self.connection = None # helps with endless __getattr__ loops later on
self._connection_record = None
raise
- if self._pool._should_log:
+ if self._pool._should_log_info:
self._pool.log("Connection %s checked out from pool" % repr(self.connection))
_logger = property(lambda self: self._pool.logger)
l.checkout(self.connection, self._connection_record, self)
return self
except exceptions.DisconnectionError, e:
- if self._pool._should_log:
+ if self._pool._should_log_info:
self._pool.log(
"Disconnection detected on checkout: %s" % (str(e)))
self._connection_record.invalidate(e)
self.connection = self._connection_record.get_connection()
attempts -= 1
- if self._pool._should_log:
+ if self._pool._should_log_info:
self._pool.log("Reconnection attempts exhausted on checkout")
self.invalidate()
raise exceptions.InvalidRequestError("This connection is closed")
def recreate(self):
self.log("Pool recreating")
- return SingletonThreadPool(self._creator, pool_size=self.size, recycle=self._recycle, echo=self.echo, use_threadlocal=self._use_threadlocal)
+ return SingletonThreadPool(self._creator, pool_size=self.size, recycle=self._recycle, echo=self._should_log_info, use_threadlocal=self._use_threadlocal)
def dispose(self):
"""Dispose of this pool.
def recreate(self):
self.log("Pool recreating")
- return QueuePool(self._creator, pool_size=self._pool.maxsize, max_overflow=self._max_overflow, timeout=self._timeout, recycle=self._recycle, echo=self.echo, use_threadlocal=self._use_threadlocal)
+ return QueuePool(self._creator, pool_size=self._pool.maxsize, max_overflow=self._max_overflow, timeout=self._timeout, recycle=self._recycle, echo=self._should_log_info, use_threadlocal=self._use_threadlocal)
def do_return_conn(self, conn):
try:
break
self._overflow = 0 - self.size()
- if self._should_log:
+ if self._should_log_info:
self.log("Pool disposed. " + self.status())
def status(self):