]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
added a self-creating dict that takes advantage of 2.5's __missing__, implemented in
authorMike Bayer <mike_mp@zzzcomputing.com>
Thu, 31 May 2007 23:23:14 +0000 (23:23 +0000)
committerMike Bayer <mike_mp@zzzcomputing.com>
Thu, 31 May 2007 23:23:14 +0000 (23:23 +0000)
ResultProxy, [ticket:582]

CHANGES
lib/sqlalchemy/engine/base.py
lib/sqlalchemy/util.py

diff --git a/CHANGES b/CHANGES
index 32d7103c63459ecc6842549472321891864b3847..19fbbf250236f742aaeedaaf3b876c29225476b6 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -1,26 +1,28 @@
 0.4.0
 - orm
-    - along with recent speedups to ResultProxy, total number of function calls
-      significantly reduced for large loads.  test/perf/masseagerload.py reports
-      0.4 as having the fewest number of function calls across all SA versions 
-      (0.1, 0.2, and 0.3)
-    - secondary inheritance loading: polymorphic mappers can be constructed *without* 
-      a select_table argument.  inheriting mappers whose tables were not 
-      represented in the initial load will issue a second SQL query immediately,
-      once per instance (i.e. not very efficient for large lists), 
-      in order to load the remaining columns.
-    - secondary inheritance loading can also move its second query into a column-
-      level "deferred" load, via the "polymorphic_fetch" argument, which can be set
-      to 'select' or 'deferred'
-    - added undefer_group() MapperOption, sets a set of "deferred" columns joined by a
-      "group" to load as "undeferred".
+    - along with recent speedups to ResultProxy, total number of
+      function calls significantly reduced for large loads.
+      test/perf/masseagerload.py reports 0.4 as having the fewest number
+      of function calls across all SA versions (0.1, 0.2, and 0.3)
+    - secondary inheritance loading: polymorphic mappers can be
+      constructed *without* a select_table argument. inheriting mappers
+      whose tables were not represented in the initial load will issue a
+      second SQL query immediately, once per instance (i.e. not very
+      efficient for large lists), in order to load the remaining
+      columns.
+    - secondary inheritance loading can also move its second query into
+      a column- level "deferred" load, via the "polymorphic_fetch"
+      argument, which can be set to 'select' or 'deferred'
+    - added undefer_group() MapperOption, sets a set of "deferred"
+      columns joined by a "group" to load as "undeferred".
     
 0.3.XXX
 - engines
-    - added detach() to Connection, allows underlying DBAPI connection to be detached 
-      from its pool, closing on dereference/close() instead of being reused by the pool.
-    - added invalidate() to Connection, immediately invalidates the Connection and its
-      underlying DBAPI connection.
+    - added detach() to Connection, allows underlying DBAPI connection
+      to be detached from its pool, closing on dereference/close()
+      instead of being reused by the pool.
+    - added invalidate() to Connection, immediately invalidates the
+      Connection and its underlying DBAPI connection.
 - sql
     - _Label class overrides compare_self to return its ultimate object.
       meaning, if you say someexpr.label('foo') == 5, it produces
index ed9a5b228baa8a2f78281aaf24b05914c150045b..e7eb108b537dea7e56ac7d8a647d7d1b3df8ad47 100644 (file)
@@ -869,8 +869,8 @@ class ResultProxy(object):
     def _init_metadata(self):
         if hasattr(self, '_ResultProxy__props'):
             return
-        self.__key_cache = {}
         self.__props = {}
+        self._key_cache = self._create_key_cache()
         self.__keys = []
         metadata = self.cursor.description
 
@@ -894,6 +894,30 @@ class ResultProxy(object):
             if self.__echo:
                 self.context.engine.logger.debug("Col " + repr(tuple([x[0] for x in metadata])))
 
+    def _create_key_cache(self):
+        # local copies to avoid circular ref against 'self'
+        props = self.__props
+        context = self.context
+        def lookup_key(key):
+            """Given a key, which could be a ColumnElement, string, etc.,
+            matches it to the appropriate key we got from the result set's
+            metadata; then cache it locally for quick re-access."""
+
+            if isinstance(key, int) and key in props:
+                rec = props[key]
+            elif isinstance(key, basestring) and key.lower() in props:
+                rec = props[key.lower()]
+            elif isinstance(key, sql.ColumnElement):
+                label = context.column_labels.get(key._label, key.name).lower()
+                if label in props:
+                    rec = props[label]
+
+            if not "rec" in locals():
+                raise exceptions.NoSuchColumnError("Could not locate column in row for column '%s'" % (repr(key)))
+
+            return rec
+        return util.PopulateDict(lookup_key)
+        
     def close(self):
         """Close this ResultProxy, and the underlying DBAPI cursor corresponding to the execution.
 
@@ -909,38 +933,12 @@ class ResultProxy(object):
             self.cursor.close()
             if self.connection.should_close_with_result:
                 self.connection.close()
-            
-    def _convert_key(self, key):
-        """Convert and cache a key.
-
-        Given a key, which could be a ColumnElement, string, etc.,
-        matches it to the appropriate key we got from the result set's
-        metadata; then cache it locally for quick re-access.
-        """
-
-        if key in self.__key_cache:
-            return self.__key_cache[key]
-        else:
-            if isinstance(key, int) and key in self.__props:
-                rec = self.__props[key]
-            elif isinstance(key, basestring) and key.lower() in self.__props:
-                rec = self.__props[key.lower()]
-            elif isinstance(key, sql.ColumnElement):
-                label = self.context.column_labels.get(key._label, key.name).lower()
-                if label in self.__props:
-                    rec = self.__props[label]
-                        
-            if not "rec" in locals():
-                raise exceptions.NoSuchColumnError("Could not locate column in row for column '%s'" % (repr(key)))
-
-            self.__key_cache[key] = rec
-            return rec
     
     keys = property(lambda s:s.__keys)
     
     def _has_key(self, row, key):
         try:
-            self._convert_key(key)
+            self._key_cache[key]
             return True
         except KeyError:
             return False
@@ -994,7 +992,7 @@ class ResultProxy(object):
         return self.context.supports_sane_rowcount()
 
     def _get_col(self, row, key):
-        rec = self._convert_key(key)
+        rec = self._key_cache[key]
         return rec[1].convert_result_value(row[rec[2]], self.dialect)
     
     def _fetchone_impl(self):
@@ -1106,7 +1104,7 @@ class BufferedColumnResultProxy(ResultProxy):
 
     """
     def _get_col(self, row, key):
-        rec = self._convert_key(key)
+        rec = self._key_cache[key]
         return row[rec[2]]
     
     def _process_row(self, row):
index a5a051ddbdd01b8f4f3224a576372ff6c531198a..38f06584fc67ddd3c06c3fd0b533b85f7f761ade 100644 (file)
@@ -11,7 +11,7 @@ except ImportError:
     import dummy_threading as threading
 
 import md5
-
+import sys
 import __builtin__
 
 try:
@@ -30,43 +30,30 @@ except:
             i -= 1
         raise StopIteration()
 
-try:
-    from collections import defaultdict
-except:
-    class defaultdict(dict):
-        def __init__(self, default_factory=None, *a, **kw):
-            if (default_factory is not None and
-                not hasattr(default_factory, '__call__')):
-                raise TypeError('first argument must be callable')
-            dict.__init__(self, *a, **kw)
-            self.default_factory = default_factory
+if sys.version_info >= (2, 5):
+    class PopulateDict(dict):
+        """a dict which populates missing values via a creation function.
+        
+        note the creation function takes a key, unlike collections.defaultdict.
+        """
+        
+        def __init__(self, creator):
+            self.creator = creator
+        def __missing__(self, key):
+            self[key] = val = self.creator(key)
+            return val
+else:
+    class PopulateDict(dict):
+        """a dict which populates missing values via a creation function."""
+
+        def __init__(self, creator):
+            self.creator = creator
         def __getitem__(self, key):
             try:
                 return dict.__getitem__(self, key)
             except KeyError:
-                value = self[key] = self.__missing__(key)
+                self[key] = value = self.creator(key)
                 return value
-        def __missing__(self, key):
-            if self.default_factory is None:
-                raise KeyError(key)
-            return self.default_factory()
-        def __reduce__(self):
-            if self.default_factory is None:
-                args = tuple()
-            else:
-                args = self.default_factory,
-            return type(self), args, None, None, iter(self)
-        def copy(self):
-            return self.__copy__()
-        def __copy__(self):
-            return type(self)(self.default_factory, self)
-        def __deepcopy__(self, memo):
-            import copy
-            return type(self)(self.default_factory,
-                              copy.deepcopy(self.items()))
-        def __repr__(self):
-            return 'defaultdict(%s, %s)' % (self.default_factory,
-                                            dict.__repr__(self))
 
 def to_list(x):
     if x is None: