From: Mike Bayer Date: Tue, 6 Dec 2011 21:31:52 +0000 (-0500) Subject: - [bug] Decode incoming values when retrieving X-Git-Tag: rel_0_7_4~11 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=8ad65d0ee114970338c8b668c280f3e95534e744;p=thirdparty%2Fsqlalchemy%2Fsqlalchemy.git - [bug] Decode incoming values when retrieving list of index names and the names of columns within those indexes. [ticket:2269] - rewrite unicode reflection test to be of more general use on broken backends --- diff --git a/CHANGES b/CHANGES index d6ffdbbd1f..1f67610667 100644 --- a/CHANGES +++ b/CHANGES @@ -258,6 +258,10 @@ CHANGES work fully with FreeTDS 0.91 here, however. [ticket:2343] + - [bug] Decode incoming values when retrieving + list of index names and the names of columns + within those indexes. [ticket:2269] + - mysql - [bug] Unicode adjustments allow latest pymysql (post 0.4) to pass 100% on Python 2. diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 578c5073d7..bc50da5ad6 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1234,7 +1234,10 @@ class MSDialect(default.DefaultDialect): sqltypes.String(convert_unicode=True)), sql.bindparam('schname', current_schema, sqltypes.String(convert_unicode=True)) - ] + ], + typemap = { + 'name':sqltypes.Unicode() + } ) ) indexes = {} @@ -1260,7 +1263,11 @@ class MSDialect(default.DefaultDialect): sqltypes.String(convert_unicode=True)), sql.bindparam('schname', current_schema, sqltypes.String(convert_unicode=True)) - ]), + ], + typemap = { + 'name':sqltypes.Unicode() + } + ), ) for row in rp: if row['index_id'] in indexes: diff --git a/test/engine/test_reflection.py b/test/engine/test_reflection.py index 7cccb930b6..45d72bfabe 100644 --- a/test/engine/test_reflection.py +++ b/test/engine/test_reflection.py @@ -900,54 +900,115 @@ class SchemaManipulationTest(fixtures.TestBase): assert addresses.constraints == set([addresses.primary_key, fk]) class UnicodeReflectionTest(fixtures.TestBase): + @classmethod + def setup_class(cls): + # trigger mysql _server_casing check... + testing.db.connect().close() + + cls.bind = bind = engines.utf8_engine( + options={'convert_unicode' : True}) + + cls.metadata = metadata = MetaData() + + no_multibyte_period = set([ + (u'plain', u'col_plain', u'ix_plain') + ]) + no_has_table = [ + (u'no_has_table_1', u'col_Unit\u00e9ble', u'ix_Unit\u00e9ble'), + (u'no_has_table_2', u'col_\u6e2c\u8a66', u'ix_\u6e2c\u8a66'), + ] + no_case_sensitivity = [ + (u'\u6e2c\u8a66', u'col_\u6e2c\u8a66', u'ix_\u6e2c\u8a66'), + (u'unit\u00e9ble', u'col_unit\u00e9ble', u'ix_unit\u00e9ble'), + ] + full = [ + (u'Unit\u00e9ble', u'col_Unit\u00e9ble', u'ix_Unit\u00e9ble'), + (u'\u6e2c\u8a66', u'col_\u6e2c\u8a66', u'ix_\u6e2c\u8a66'), + ] + + # as you can see, our options for this kind of thing + # are really limited unless you're on PG or SQLite + + # forget about it on these backends + if testing.against('sybase', 'maxdb', 'oracle'): + names = no_multibyte_period + # mysql can't handle casing usually + elif testing.against("mysql") and \ + not testing.requires._has_mysql_fully_case_sensitive(): + names = no_multibyte_period.union(no_case_sensitivity) + # mssql + pyodbc + freetds can't compare multibyte names to + # information_schema.tables.table_name + elif testing.against("mssql"): + names = no_multibyte_period.union(no_has_table) + else: + names = no_multibyte_period.union(full) + + for tname, cname, ixname in names: + t = Table(tname, metadata, Column('id', sa.Integer, + sa.Sequence(cname + '_id_seq'), primary_key=True), + Column(cname, Integer) + ) + schema.Index(ixname, t.c[cname]) + + metadata.create_all(bind) + cls.names = names + + @classmethod + def teardown_class(cls): + cls.metadata.drop_all(cls.bind, checkfirst=False) + cls.bind.dispose() + + @testing.requires.unicode_connections + def test_has_table(self): + for tname, cname, ixname in self.names: + assert self.bind.has_table(tname), "Can't detect name %s" % tname @testing.requires.unicode_connections def test_basic(self): - try: + # the 'convert_unicode' should not get in the way of the + # reflection process. reflecttable for oracle, postgresql + # (others?) expect non-unicode strings in result sets/bind + # params - # the 'convert_unicode' should not get in the way of the - # reflection process. reflecttable for oracle, postgresql - # (others?) expect non-unicode strings in result sets/bind - # params + bind = self.bind + names = set([rec[0] for rec in self.names]) - bind = engines.utf8_engine(options={'convert_unicode' - : True}) - metadata = MetaData(bind) - if testing.against('sybase', 'maxdb', 'oracle', 'mssql'): - names = set([u'plain']) - else: - names = set([u'plain', u'Unit\u00e9ble', u'\u6e2c\u8a66' - ]) - for name in names: - Table(name, metadata, Column('id', sa.Integer, - sa.Sequence(name + '_id_seq'), primary_key=True)) - metadata.create_all() - reflected = set(bind.table_names()) + reflected = set(bind.table_names()) - # Jython 2.5 on Java 5 lacks unicodedata.normalize + # Jython 2.5 on Java 5 lacks unicodedata.normalize - if not names.issubset(reflected) and hasattr(unicodedata, - 'normalize'): + if not names.issubset(reflected) and hasattr(unicodedata,'normalize'): - # Python source files in the utf-8 coding seem to - # normalize literals as NFC (and the above are - # explicitly NFC). Maybe this database normalizes NFD - # on reflection. + # Python source files in the utf-8 coding seem to + # normalize literals as NFC (and the above are + # explicitly NFC). Maybe this database normalizes NFD + # on reflection. - nfc = set([unicodedata.normalize('NFC', n) for n in - names]) - self.assert_(nfc == names) + nfc = set([unicodedata.normalize('NFC', n) for n in names]) + self.assert_(nfc == names) - # Yep. But still ensure that bulk reflection and - # create/drop work with either normalization. + # Yep. But still ensure that bulk reflection and + # create/drop work with either normalization. - r = MetaData(bind, reflect=True) - r.drop_all() - r.create_all() - finally: - metadata.drop_all() - bind.dispose() + r = MetaData(bind, reflect=True) + r.drop_all(checkfirst=False) + r.create_all(checkfirst=False) + @testing.requires.unicode_connections + def test_get_names(self): + inspector = Inspector.from_engine(self.bind) + names = dict( + (tname, (cname, ixname)) for tname, cname, ixname in self.names + ) + for tname in inspector.get_table_names(): + assert tname in names + eq_( + [ + (rec['name'], rec['column_names'][0]) + for rec in inspector.get_indexes(tname) + ], + [(names[tname][1], names[tname][0])] + ) class SchemaTest(fixtures.TestBase):