]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
foo
authorMike Bayer <mike_mp@zzzcomputing.com>
Sun, 8 Mar 2015 15:29:10 +0000 (11:29 -0400)
committerMike Bayer <mike_mp@zzzcomputing.com>
Sun, 8 Mar 2015 15:34:14 +0000 (11:34 -0400)
lib/sqlalchemy/engine/default.py
lib/sqlalchemy/engine/result.py
lib/sqlalchemy/sql/compiler.py
test/sql/test_compiler.py
test/sql/test_join_rewriting.py
test/sql/test_labels.py
test/sql/test_selectable.py
test/sql/test_text.py
test/sql/test_type_expressions.py

index 62469d7201cc3694377d986d65ac4f9c98e41dd1..bdd9d2715c8b8b070490172737855f49c6723f16 100644 (file)
@@ -463,7 +463,7 @@ class DefaultExecutionContext(interfaces.ExecutionContext):
     executemany = False
     compiled = None
     statement = None
-    _result_columns = None
+    result_column_struct = None
     _is_implicit_returning = False
     _is_explicit_returning = False
 
@@ -522,10 +522,8 @@ class DefaultExecutionContext(interfaces.ExecutionContext):
         self.execution_options = compiled.statement._execution_options.union(
             connection._execution_options)
 
-        # compiled clauseelement.  process bind params, process table defaults,
-        # track collections used by ResultProxy to target and process results
-
-        self._result_columns = compiled._result_columns
+        self.result_column_struct = (
+            compiled._result_columns, compiled._ordered_columns)
 
         self.unicode_statement = util.text_type(compiled)
         if not dialect.supports_unicode_statements:
@@ -663,13 +661,6 @@ class DefaultExecutionContext(interfaces.ExecutionContext):
         self.cursor = self.create_cursor()
         return self
 
-    @util.memoized_property
-    def result_map(self):
-        if self._result_columns:
-            return self.compiled.result_map
-        else:
-            return None
-
     @util.memoized_property
     def engine(self):
         return self.root_connection.engine
index 6eca54a34dd42f2a43a997289b21e8451be92b4a..893cd2a305ac76404c3fc329a5215406b82c5253 100644 (file)
@@ -193,13 +193,14 @@ class ResultMetaData(object):
         translate_colname = context._translate_colname
         self.case_sensitive = case_sensitive = dialect.case_sensitive
 
-        if context._result_columns:
-            num_ctx_cols = len(context._result_columns)
+        if context.result_column_struct:
+            result_columns, cols_are_ordered = context.result_column_struct
+            num_ctx_cols = len(result_columns)
         else:
             num_ctx_cols = None
 
         if num_ctx_cols and \
-                context.compiled._ordered_columns and \
+                cols_are_ordered and \
                 num_ctx_cols == len(metadata):
             # case 1 - SQL expression statement, number of columns
             # in result matches number of cols in compiled.  This is the
@@ -217,10 +218,10 @@ class ResultMetaData(object):
                     obj,
                     None
                 ) for idx, (key, name, obj, type_)
-                in enumerate(context._result_columns)
+                in enumerate(result_columns)
             ]
             self.keys = [
-                elem[1] for elem in context._result_columns
+                elem[1] for elem in result_columns
             ]
         else:
             # case 2 - raw string, or number of columns in result does
@@ -234,6 +235,9 @@ class ResultMetaData(object):
             # In all these cases we fall back to the "named" approach
             # that SQLAlchemy has used up through 0.9.
 
+            if num_ctx_cols:
+                result_map = self._create_result_map(result_columns)
+
             raw = []
             self.keys = []
             untranslated = None
@@ -256,7 +260,7 @@ class ResultMetaData(object):
 
                 if num_ctx_cols:
                     try:
-                        ctx_rec = context.result_map[colname]
+                        ctx_rec = result_map[colname]
                     except KeyError:
                         mapped_type = typemap.get(coltype, sqltypes.NULLTYPE)
                         obj = None
@@ -296,8 +300,8 @@ class ResultMetaData(object):
             # ambiguous column exception when accessed.
             if len(by_key) != num_ctx_cols:
                 seen = set()
-                for idx in range(num_ctx_cols):
-                    key = raw[idx][1]
+                for rec in raw:
+                    key = rec[1]
                     if key in seen:
                         by_key[key] = (None, by_key[key][1], None)
                     seen.add(key)
@@ -324,6 +328,22 @@ class ResultMetaData(object):
                     for elem in raw if elem[5]
                 ])
 
+    @classmethod
+    def _create_result_map(cls, result_columns):
+        d = {}
+        for elem in result_columns:
+            key, rec = elem[0], elem[1:]
+            if key in d:
+                # conflicting keyname, just double up the list
+                # of objects.  this will cause an "ambiguous name"
+                # error if an attempt is made by the result set to
+                # access.
+                e_name, e_obj, e_type = d[key]
+                d[key] = e_name, e_obj + rec[1], e_type
+            else:
+                d[key] = rec
+        return d
+
     @util.pending_deprecation("0.8", "sqlite dialect uses "
                               "_translate_colname() now")
     def _set_keymap_synonym(self, name, origname):
index e37fa646c29168d5d14653ba6f251c5c61471339..a3a247ac03a0b737e95268cfb14a2177e8b11fec 100644 (file)
@@ -393,22 +393,6 @@ class SQLCompiler(Compiled):
         if self.positional and dialect.paramstyle == 'numeric':
             self._apply_numbered_params()
 
-    @property
-    def result_map(self):
-        d = {}
-        for elem in self._result_columns:
-            key, rec = elem[0], elem[1:]
-            if key in d:
-                # conflicting keyname, just double up the list
-                # of objects.  this will cause an "ambiguous name"
-                # error if an attempt is made by the result set to
-                # access.
-                e_name, e_obj, e_type = d[key]
-                d[key] = e_name, e_obj + rec[1], e_type
-            else:
-                d[key] = rec
-        return d
-
     @util.memoized_instancemethod
     def _init_cte_state(self):
         """Initialize collections related to CTEs only if
@@ -501,6 +485,11 @@ class SQLCompiler(Compiled):
         compiled object, for those values that are present."""
         return self.construct_params(_check=False)
 
+    @util.dependencies("sqlalchemy.engine.result")
+    def _create_result_map(self, result):
+        """utility method used for unit tests only."""
+        return result.ResultMetaData._create_result_map(self._result_columns)
+
     def default_from(self):
         """Called when a SELECT statement has no froms, and no FROM clause is
         to be appended.
index 428fc898602bf66662efc7b6d01ed910328dc14d..73c1402f6fe3ca7a1e747b95e1198d02c894ddc0 100644 (file)
@@ -3380,7 +3380,7 @@ class ResultMapTest(fixtures.TestBase):
         stmt = select([t]).union(select([t]))
         comp = stmt.compile()
         eq_(
-            comp.result_map,
+            comp._create_result_map(),
             {'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type),
              'b': ('b', (t.c.b, 'b', 'b'), t.c.b.type)}
         )
@@ -3391,7 +3391,7 @@ class ResultMapTest(fixtures.TestBase):
         stmt = select([t.c.a]).select_from(t.join(subq, t.c.a == subq.c.a))
         comp = stmt.compile()
         eq_(
-            comp.result_map,
+            comp._create_result_map(),
             {'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type)}
         )
 
@@ -3400,7 +3400,7 @@ class ResultMapTest(fixtures.TestBase):
         stmt = select([t.c.a]).union(select([t.c.b]))
         comp = stmt.compile()
         eq_(
-            comp.result_map,
+            comp._create_result_map(),
             {'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type)},
         )
 
@@ -3410,9 +3410,9 @@ class ResultMapTest(fixtures.TestBase):
         tc = type_coerce(t.c.a, String)
         stmt = select([t.c.a, l1, tc])
         comp = stmt.compile()
-        tc_anon_label = comp.result_map['a_1'][1][0]
+        tc_anon_label = comp._create_result_map()['a_1'][1][0]
         eq_(
-            comp.result_map,
+            comp._create_result_map(),
             {
                 'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type),
                 'bar': ('bar', (l1, 'bar'), l1.type),
@@ -3431,11 +3431,11 @@ class ResultMapTest(fixtures.TestBase):
             t1.join(union, t1.c.a == union.c.t1_a)).apply_labels()
         comp = stmt.compile()
         eq_(
-            set(comp.result_map),
+            set(comp._create_result_map()),
             set(['t1_1_b', 't1_1_a', 't1_a', 't1_b'])
         )
         is_(
-            comp.result_map['t1_a'][1][2], t1.c.a
+            comp._create_result_map()['t1_a'][1][2], t1.c.a
         )
 
     def test_insert_with_select_values(self):
@@ -3448,7 +3448,7 @@ class ResultMapTest(fixtures.TestBase):
         stmt = t2.insert().values(a=select([astring])).returning(aint)
         comp = stmt.compile(dialect=postgresql.dialect())
         eq_(
-            comp.result_map,
+            comp._create_result_map(),
             {'a': ('a', (aint, 'a', 'a'), aint.type)}
         )
 
@@ -3463,6 +3463,6 @@ class ResultMapTest(fixtures.TestBase):
             returning(aint)
         comp = stmt.compile(dialect=postgresql.dialect())
         eq_(
-            comp.result_map,
+            comp._create_result_map(),
             {'a': ('a', (aint, 'a', 'a'), aint.type)}
         )
index f99dfda4ef2f67f43c70f37a429f0c6d088e1396..922b7f322d16336b8bcf0881629ee711c0d7a272 100644 (file)
@@ -80,7 +80,7 @@ class _JoinRewriteTestBase(AssertsCompiledSQL):
         # .key in SQL
         for key, col in zip([c.name for c in s.c], s.inner_columns):
             key = key % compiled.anon_map
-            assert col in compiled.result_map[key][1]
+            assert col in compiled._create_result_map()[key][1]
 
     _a_bkeyselect_bkey = ""
 
index 4aa92308091d8f29882d06a3393e8f0d518a3502..1792a42d8fe78086a0ad7244ab1bae8e71f137d2 100644 (file)
@@ -90,7 +90,7 @@ class MaxIdentTest(fixtures.TestBase, AssertsCompiledSQL):
         table1 = self.table1
         compiled = s.compile(dialect=self._length_fixture())
 
-        assert set(compiled.result_map['some_large_named_table__2'][1]).\
+        assert set(compiled._create_result_map()['some_large_named_table__2'][1]).\
             issuperset(
             [
                 'some_large_named_table_this_is_the_data_column',
@@ -99,7 +99,7 @@ class MaxIdentTest(fixtures.TestBase, AssertsCompiledSQL):
             ]
         )
 
-        assert set(compiled.result_map['some_large_named_table__1'][1]).\
+        assert set(compiled._create_result_map()['some_large_named_table__1'][1]).\
             issuperset(
             [
                 'some_large_named_table_this_is_the_primarykey_column',
@@ -134,11 +134,11 @@ class MaxIdentTest(fixtures.TestBase, AssertsCompiledSQL):
         s2 = select([s])
         compiled = s2.compile(dialect=self._length_fixture())
         assert \
-            set(compiled.result_map['this_is_the_data_column'][1]).\
+            set(compiled._create_result_map()['this_is_the_data_column'][1]).\
             issuperset(['this_is_the_data_column',
                         s.c.this_is_the_data_column])
         assert \
-            set(compiled.result_map['this_is_the_primarykey_column'][1]).\
+            set(compiled._create_result_map()['this_is_the_primarykey_column'][1]).\
             issuperset(['this_is_the_primarykey_column',
                         s.c.this_is_the_primarykey_column])
 
@@ -170,14 +170,14 @@ class MaxIdentTest(fixtures.TestBase, AssertsCompiledSQL):
             ') '
             'AS anon_1', dialect=dialect)
         compiled = s.compile(dialect=dialect)
-        assert set(compiled.result_map['anon_1_this_is_the_data_2'][1]).\
+        assert set(compiled._create_result_map()['anon_1_this_is_the_data_2'][1]).\
             issuperset([
                 'anon_1_this_is_the_data_2',
                 q.corresponding_column(
                     table1.c.this_is_the_data_column)
             ])
 
-        assert set(compiled.result_map['anon_1_this_is_the_prim_1'][1]).\
+        assert set(compiled._create_result_map()['anon_1_this_is_the_prim_1'][1]).\
             issuperset([
                 'anon_1_this_is_the_prim_1',
                 q.corresponding_column(
@@ -437,13 +437,13 @@ class LabelLengthTest(fixtures.TestBase, AssertsCompiledSQL):
         dialect = default.DefaultDialect(label_length=10)
         compiled = q.compile(dialect=dialect)
 
-        assert set(compiled.result_map['some_2'][1]).issuperset([
+        assert set(compiled._create_result_map()['some_2'][1]).issuperset([
             table1.c.this_is_the_data_column,
             'some_large_named_table_this_is_the_data_column',
             'some_2'
         ])
 
-        assert set(compiled.result_map['some_1'][1]).issuperset([
+        assert set(compiled._create_result_map()['some_1'][1]).issuperset([
             table1.c.this_is_the_primarykey_column,
             'some_large_named_table_this_is_the_primarykey_column',
             'some_1'
@@ -459,12 +459,12 @@ class LabelLengthTest(fixtures.TestBase, AssertsCompiledSQL):
         dialect = default.DefaultDialect(label_length=10)
         compiled = x.compile(dialect=dialect)
 
-        assert set(compiled.result_map['this_2'][1]).issuperset([
+        assert set(compiled._create_result_map()['this_2'][1]).issuperset([
             q.corresponding_column(table1.c.this_is_the_data_column),
             'this_is_the_data_column',
             'this_2'])
 
-        assert set(compiled.result_map['this_1'][1]).issuperset([
+        assert set(compiled._create_result_map()['this_1'][1]).issuperset([
             q.corresponding_column(table1.c.this_is_the_primarykey_column),
             'this_is_the_primarykey_column',
             'this_1'])
@@ -531,7 +531,7 @@ class LabelLengthTest(fixtures.TestBase, AssertsCompiledSQL):
                             'SELECT asdf.abcde FROM a AS asdf',
                             dialect=dialect)
         compiled = s.compile(dialect=dialect)
-        assert set(compiled.result_map['abcde'][1]).issuperset([
+        assert set(compiled._create_result_map()['abcde'][1]).issuperset([
             'abcde', a1.c.abcde, 'abcde'])
 
         # column still there, but short label
@@ -540,5 +540,5 @@ class LabelLengthTest(fixtures.TestBase, AssertsCompiledSQL):
                             'SELECT asdf.abcde AS _1 FROM a AS asdf',
                             dialect=dialect)
         compiled = s.compile(dialect=dialect)
-        assert set(compiled.result_map['_1'][1]).issuperset([
+        assert set(compiled._create_result_map()['_1'][1]).issuperset([
             'asdf_abcde', a1.c.abcde, '_1'])
index 99d0cbe766d327d65d72f1aa75292ecc67eda35b..3931f99e48432d5ea6958976adf4ba0961868559 100644 (file)
@@ -1969,11 +1969,11 @@ class WithLabelsTest(fixtures.TestBase):
 
     def _assert_result_keys(self, s, keys):
         compiled = s.compile()
-        eq_(set(compiled.result_map), set(keys))
+        eq_(set(compiled._create_result_map()), set(keys))
 
     def _assert_subq_result_keys(self, s, keys):
         compiled = s.select().compile()
-        eq_(set(compiled.result_map), set(keys))
+        eq_(set(compiled._create_result_map()), set(keys))
 
     def _names_overlap(self):
         m = MetaData()
@@ -2124,7 +2124,7 @@ class SelectProxyTest(fixtures.TestBase):
         compiled = stmt.compile()
         return dict(
             (elem, key)
-            for key, elements in compiled.result_map.items()
+            for key, elements in compiled._create_result_map().items()
             for elem in elements[1]
         )
 
index 4302dde48e645f4e308765a5b759217d1d1536e7..c2f4d2c151a0055d3d956717e687e3fb68573f17 100644 (file)
@@ -315,7 +315,7 @@ class AsFromTest(fixtures.TestBase, AssertsCompiledSQL):
         )
 
         compiled = t.compile()
-        eq_(compiled.result_map,
+        eq_(compiled._create_result_map(),
             {'id': ('id',
                     (t.c.id._proxies[0],
                      'id',
@@ -331,7 +331,7 @@ class AsFromTest(fixtures.TestBase, AssertsCompiledSQL):
         t = text("select id, name from user").columns(id=Integer, name=String)
 
         compiled = t.compile()
-        eq_(compiled.result_map,
+        eq_(compiled._create_result_map(),
             {'id': ('id',
                     (t.c.id._proxies[0],
                      'id',
@@ -350,7 +350,7 @@ class AsFromTest(fixtures.TestBase, AssertsCompiledSQL):
             table1.join(t, table1.c.myid == t.c.id))
         compiled = stmt.compile()
         eq_(
-            compiled.result_map,
+            compiled._create_result_map(),
             {
                 "myid": ("myid",
                          (table1.c.myid, "myid", "myid"), table1.c.myid.type),
@@ -382,7 +382,7 @@ class AsFromTest(fixtures.TestBase, AssertsCompiledSQL):
         compiled = stmt.compile()
         return dict(
             (elem, key)
-            for key, elements in compiled.result_map.items()
+            for key, elements in compiled._create_result_map().items()
             for elem in elements[1]
         )
 
index c82ad3b942c16b814bb3138fe33cdd3d8a17bb92..574edfe9e849dd407b00b2daa03368719e3794ae 100644 (file)
@@ -53,19 +53,19 @@ class SelectTest(_ExprFixture, fixtures.TestBase, AssertsCompiledSQL):
         table = self._fixture()
 
         compiled = select([table]).apply_labels().compile()
-        assert table.c.y in compiled.result_map['test_table_y'][1]
-        assert table.c.x in compiled.result_map['test_table_x'][1]
+        assert table.c.y in compiled._create_result_map()['test_table_y'][1]
+        assert table.c.x in compiled._create_result_map()['test_table_x'][1]
 
         # the lower() function goes into the result_map, we don't really
         # need this but it's fine
         self.assert_compile(
-            compiled.result_map['test_table_y'][1][2],
+            compiled._create_result_map()['test_table_y'][1][2],
             "lower(test_table.y)"
         )
         # then the original column gets put in there as well.
         # it's not important that it's the last value.
         self.assert_compile(
-            compiled.result_map['test_table_y'][1][-1],
+            compiled._create_result_map()['test_table_y'][1][-1],
             "test_table.y"
         )