]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
-removed echo_property() function, moved logging checks to
authorMike Bayer <mike_mp@zzzcomputing.com>
Tue, 21 Aug 2007 23:06:12 +0000 (23:06 +0000)
committerMike Bayer <mike_mp@zzzcomputing.com>
Tue, 21 Aug 2007 23:06:12 +0000 (23:06 +0000)
static variables

lib/sqlalchemy/engine/base.py
lib/sqlalchemy/logging.py
lib/sqlalchemy/orm/session.py
lib/sqlalchemy/orm/unitofwork.py
lib/sqlalchemy/pool.py

index 40999f3ef755eb874a9cff0752191d9922f6bd99..958cb74801b9a5004f0b4fd7010affc022e6830d 100644 (file)
@@ -828,7 +828,7 @@ class Connection(Connectable):
         return self.__engine.dialect.create_execution_context(connection=self, **kwargs)
 
     def __execute_raw(self, context):
-        if self.__engine._should_log:
+        if self.__engine._should_log_info:
             self.__engine.logger.info(context.statement)
             self.__engine.logger.info(repr(context.parameters))
         if context.parameters is not None and isinstance(context.parameters, list) and len(context.parameters) > 0 and isinstance(context.parameters[0], (list, tuple, dict)):
@@ -1007,11 +1007,9 @@ class Engine(Connectable):
         self.dialect=dialect
         self.echo = echo
         self.engine = self
-        self.logger = logging.instance_logger(self)
-        self._should_log = logging.is_info_enabled(self.logger)
+        self.logger = logging.instance_logger(self, echoflag=echo)
 
     name = property(lambda s:sys.modules[s.dialect.__module__].descriptor()['name'], doc="String name of the [sqlalchemy.engine#Dialect] in use by this ``Engine``.")
-    echo = logging.echo_property()
     
     def __repr__(self):
         return 'Engine(%s)' % str(self.url)
@@ -1207,7 +1205,7 @@ class ResultProxy(object):
         self.dialect = context.dialect
         self.closed = False
         self.cursor = context.cursor
-        self.__echo = context.engine._should_log
+        self.__echo = context.engine._should_log_info
         self._process_row = self._row_processor()
         if context.is_select():
             self._init_metadata()
index 17f89c7d271cb05de1592021d43929c7e4c591dc..2dfd1e50adabd933f75e7258170afaf5c7bf66c1 100644 (file)
@@ -56,9 +56,6 @@ def _get_instance_name(instance):
     # also speeds performance as logger initialization is apparently slow
     return instance.__class__.__module__ + "." + instance.__class__.__name__ + ".0x.." + hex(id(instance))[-2:]
 
-def instance_logger(instance):
-    return logging.getLogger(_get_instance_name(instance))
-
 def class_logger(cls):
     return logging.getLogger(cls.__module__ + "." + cls.__name__)
 
@@ -68,27 +65,14 @@ def is_debug_enabled(logger):
 def is_info_enabled(logger):
     return logger.isEnabledFor(logging.INFO)
 
-class echo_property(object):
-    level_map={logging.DEBUG : "debug", logging.INFO:True}
-    
-    __doc__ = """when ``True``, enable log output for this element.
-    
-    This has the effect of setting the Python logging level for the 
-    namespace of this element's class and object reference.  A value
-    of boolean ``True`` indicates that the loglevel ``logging.INFO`` will be 
-    set for the logger, whereas the string value ``debug`` will set the loglevel
-    to ``logging.DEBUG``.
-    """
-    
-    def __get__(self, instance, owner):
-        if instance is None:
-            return self
-        level = logging.getLogger(_get_instance_name(instance)).getEffectiveLevel()
-        return echo_property.level_map.get(level, False)
-        
-    def __set__(self, instance, value):
-        if value:
-            default_logging(_get_instance_name(instance))
-            logging.getLogger(_get_instance_name(instance)).setLevel(value == 'debug' and logging.DEBUG or logging.INFO)
-        else:
-            logging.getLogger(_get_instance_name(instance)).setLevel(logging.NOTSET)
+def instance_logger(instance, echoflag=None):
+    if echoflag:
+        default_logging(_get_instance_name(instance))
+        l = logging.getLogger(_get_instance_name(instance))
+        l.setLevel(echoflag == 'debug' and logging.DEBUG or logging.INFO)
+    else:
+        l = logging.getLogger(_get_instance_name(instance))
+    instance._should_log_debug = l.isEnabledFor(logging.DEBUG)
+    instance._should_log_info = l.isEnabledFor(logging.INFO)
+    return l
+    
\ No newline at end of file
index 5a91f8fbfe4ae0f29c0972441beeb94558b4c336..c6922b928bb3e07bf5d0d0dc6af946059338cab8 100644 (file)
@@ -390,12 +390,12 @@ class Session(object):
                 those changes will not be persisted.
             
         """
-        self.uow = unitofwork.UnitOfWork(weak_identity_map=weak_identity_map)
+        self.echo_uow = echo_uow
+        self.uow = unitofwork.UnitOfWork(self, weak_identity_map=weak_identity_map)
         self.identity_map = self.uow.identity_map
 
         self.bind = bind
         self.__binds = {}
-        self.echo_uow = echo_uow
         self.weak_identity_map = weak_identity_map
         self.transaction = None
         self.hash_key = id(self)
@@ -419,13 +419,6 @@ class Session(object):
             self.begin()
         _sessions[self.hash_key] = self
             
-    def _get_echo_uow(self):
-        return self.uow.echo
-
-    def _set_echo_uow(self, value):
-        self.uow.echo = value
-    echo_uow = property(_get_echo_uow,_set_echo_uow)
-    
     def begin(self, **kwargs):
         """Begin a transaction on this Session."""
 
@@ -573,10 +566,8 @@ class Session(object):
 
         for instance in self:
             self._unattach(instance)
-        echo = self.uow.echo
-        self.uow = unitofwork.UnitOfWork(weak_identity_map=self.weak_identity_map)
+        self.uow = unitofwork.UnitOfWork(self, weak_identity_map=self.weak_identity_map)
         self.identity_map = self.uow.identity_map
-        self.uow.echo = echo
 
     def bind_mapper(self, mapper, bind, entity_name=None):
         """Bind the given `mapper` or `class` to the given ``Engine`` or ``Connection``.
index 95ed950e19003554cd40b51d52ed3c3d1ea1def1..2f285cf462bdf7de8f1d205884c5c024e7dc9318 100644 (file)
@@ -87,20 +87,15 @@ class UnitOfWork(object):
     operation.
     """
 
-    def __init__(self, identity_map=None, weak_identity_map=False):
-        if identity_map is not None:
-            self.identity_map = identity_map
+    def __init__(self, session, weak_identity_map=False):
+        if weak_identity_map:
+            self.identity_map = weakref.WeakValueDictionary()
         else:
-            if weak_identity_map:
-                self.identity_map = weakref.WeakValueDictionary()
-            else:
-                self.identity_map = {}
+            self.identity_map = {}
 
         self.new = util.Set() #OrderedSet()
         self.deleted = util.Set()
-        self.logger = logging.instance_logger(self)
-
-    echo = logging.echo_property()
+        self.logger = logging.instance_logger(self, echoflag=session.echo_uow)
 
     def _remove_deleted(self, obj):
         if hasattr(obj, "_instance_key"):
@@ -169,7 +164,6 @@ class UnitOfWork(object):
         # and organize a hierarchical dependency structure.  it also handles
         # communication with the mappers and relationships to fire off SQL
         # and synchronize attributes between related objects.
-        echo = logging.is_info_enabled(self.logger)
 
         flush_context = UOWTransaction(self, session)
 
@@ -263,11 +257,8 @@ class UOWTransaction(object):
         # information. 
         self.attributes = {}
 
-        self.logger = logging.instance_logger(self)
-        self.echo = uow.echo
+        self.logger = logging.instance_logger(self, echoflag=session.echo_uow)
         
-    echo = logging.echo_property()
-
     def register_object(self, obj, isdelete = False, listonly = False, postupdate=False, post_update_cols=None, **kwargs):
         """Add an object to this ``UOWTransaction`` to be updated in the database.
 
@@ -280,11 +271,11 @@ class UOWTransaction(object):
 
         # if object is not in the overall session, do nothing
         if not self.uow._is_valid(obj):
-            if logging.is_debug_enabled(self.logger):
+            if self._should_log_debug:
                 self.logger.debug("object %s not part of session, not registering for flush" % (mapperutil.instance_str(obj)))
             return
 
-        if logging.is_debug_enabled(self.logger):
+        if self._should_log_debug:
             self.logger.debug("register object for flush: %s isdelete=%s listonly=%s postupdate=%s" % (mapperutil.instance_str(obj), isdelete, listonly, postupdate))
 
         mapper = object_mapper(obj)
@@ -414,14 +405,15 @@ class UOWTransaction(object):
                 break
 
         head = self._sort_dependencies()
-        if self.echo:
+        if self._should_log_info:
             if head is None:
                 self.logger.info("Task dump: None")
             else:
                 self.logger.info("Task dump:\n" + head.dump())
         if head is not None:
             UOWExecutor().execute(self, head)
-        self.logger.info("Execute Complete")
+        if self._should_log_info:
+            self.logger.info("Execute Complete")
 
     def post_exec(self):
         """mark processed objects as clean / deleted after a successful flush().
@@ -469,7 +461,7 @@ class UOWTransaction(object):
         # get list of base mappers
         mappers = [t.mapper for t in self.tasks.values() if t.base_task is t]
         head = topological.QueueDependencySorter(self.dependencies, mappers).sort(allow_all_cycles=True)
-        if logging.is_debug_enabled(self.logger):
+        if self._should_log_debug:
             self.logger.debug("Dependent tuples:\n" + "\n".join(["(%s->%s)" % (d[0].class_.__name__, d[1].class_.__name__) for d in self.dependencies]))
             self.logger.debug("Dependency sort:\n"+ str(head))
         task = sort_hier(head)
index 28f7c248d8d1d03e53b259d679bdd874e0c2f271..8802337ea774f0fd9ce8b8c94960cbeab5ca7cfa 100644 (file)
@@ -114,7 +114,7 @@ class Pool(object):
 
     def __init__(self, creator, recycle=-1, echo=None, use_threadlocal=True,
                  listeners=None):
-        self.logger = logging.instance_logger(self)
+        self.logger = logging.instance_logger(self, echoflag=echo)
         self._threadconns = {}
         self._creator = creator
         self._recycle = recycle
@@ -124,12 +124,10 @@ class Pool(object):
         self._on_connect = []
         self._on_checkout = []
         self._on_checkin = []
-        self._should_log = logging.is_info_enabled(self.logger)
         
         if listeners:
             for l in listeners:
                 self.add_listener(l)
-    echo = logging.echo_property()
 
     def unique_connection(self):
         return _ConnectionFairy(self).checkout()
@@ -203,12 +201,12 @@ class _ConnectionRecord(object):
 
     def close(self):
         if self.connection is not None:
-            if self.__pool._should_log:
+            if self.__pool._should_log_info:
                 self.__pool.log("Closing connection %s" % repr(self.connection))
             self.connection.close()
 
     def invalidate(self, e=None):
-        if self.__pool._should_log:
+        if self.__pool._should_log_info:
             if e is not None:
                 self.__pool.log("Invalidate connection %s (reason: %s:%s)" % (repr(self.connection), e.__class__.__name__, str(e)))
             else:
@@ -224,7 +222,7 @@ class _ConnectionRecord(object):
                 for l in self.__pool._on_connect:
                     l.connect(self.connection, self)
         elif (self.__pool._recycle > -1 and time.time() - self.starttime > self.__pool._recycle):
-            if self.__pool._should_log:
+            if self.__pool._should_log_info:
                 self.__pool.log("Connection %s exceeded timeout; recycling" % repr(self.connection))
             self.__close()
             self.connection = self.__connect()
@@ -236,11 +234,11 @@ class _ConnectionRecord(object):
 
     def __close(self):
         try:
-            if self.__pool._should_log:
+            if self.__pool._should_log_info:
                 self.__pool.log("Closing connection %s" % (repr(self.connection)))
             self.connection.close()
         except Exception, e:
-            if self.__pool._should_log:
+            if self.__pool._should_log_info:
                 self.__pool.log("Connection %s threw an error on close: %s" % (repr(self.connection), str(e)))
             if isinstance(e, (SystemExit, KeyboardInterrupt)):
                 raise
@@ -249,11 +247,11 @@ class _ConnectionRecord(object):
         try:
             self.starttime = time.time()
             connection = self.__pool._creator()
-            if self.__pool._should_log:
+            if self.__pool._should_log_info:
                 self.__pool.log("Created new connection %s" % repr(connection))
             return connection
         except Exception, e:
-            if self.__pool._should_log:
+            if self.__pool._should_log_info:
                 self.__pool.log("Error on connect(): %s" % (str(e)))
             raise
             
@@ -273,7 +271,7 @@ def _finalize_fairy(connection, connection_record, pool, ref=None):
                 raise
     if connection_record is not None:
         connection_record.backref = None
-        if pool._should_log:
+        if pool._should_log_info:
             pool.log("Connection %s being returned to pool" % repr(connection))
         if pool._on_checkin:
             for l in pool._on_checkin:
@@ -294,7 +292,7 @@ class _ConnectionFairy(object):
             self.connection = None # helps with endless __getattr__ loops later on
             self._connection_record = None
             raise
-        if self._pool._should_log:
+        if self._pool._should_log_info:
             self._pool.log("Connection %s checked out from pool" % repr(self.connection))
     
     _logger = property(lambda self: self._pool.logger)
@@ -356,14 +354,14 @@ class _ConnectionFairy(object):
                     l.checkout(self.connection, self._connection_record, self)
                 return self
             except exceptions.DisconnectionError, e:
-                if self._pool._should_log:
+                if self._pool._should_log_info:
                     self._pool.log(
                     "Disconnection detected on checkout: %s" % (str(e)))
                 self._connection_record.invalidate(e)
                 self.connection = self._connection_record.get_connection()
                 attempts -= 1
 
-        if self._pool._should_log:
+        if self._pool._should_log_info:
             self._pool.log("Reconnection attempts exhausted on checkout")
         self.invalidate()
         raise exceptions.InvalidRequestError("This connection is closed")
@@ -443,7 +441,7 @@ class SingletonThreadPool(Pool):
 
     def recreate(self):
         self.log("Pool recreating")
-        return SingletonThreadPool(self._creator, pool_size=self.size, recycle=self._recycle, echo=self.echo, use_threadlocal=self._use_threadlocal)
+        return SingletonThreadPool(self._creator, pool_size=self.size, recycle=self._recycle, echo=self._should_log_info, use_threadlocal=self._use_threadlocal)
         
     def dispose(self):
         """Dispose of this pool.
@@ -537,7 +535,7 @@ class QueuePool(Pool):
 
     def recreate(self):
         self.log("Pool recreating")
-        return QueuePool(self._creator, pool_size=self._pool.maxsize, max_overflow=self._max_overflow, timeout=self._timeout, recycle=self._recycle, echo=self.echo, use_threadlocal=self._use_threadlocal)
+        return QueuePool(self._creator, pool_size=self._pool.maxsize, max_overflow=self._max_overflow, timeout=self._timeout, recycle=self._recycle, echo=self._should_log_info, use_threadlocal=self._use_threadlocal)
 
     def do_return_conn(self, conn):
         try:
@@ -588,7 +586,7 @@ class QueuePool(Pool):
                 break
 
         self._overflow = 0 - self.size()
-        if self._should_log:
+        if self._should_log_info:
             self.log("Pool disposed. " + self.status())
 
     def status(self):