From 437f1ce670e84964dc701488ac09af565ba807f7 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 14 Jan 2007 20:21:36 +0000 Subject: [PATCH] - postgres cursor option is now server_side_cursors=False; some users get bad results using them so theyre off by default - type system slightly modified to support TypeDecorators that can be overridden by the dialect - added an NVarchar type to mssql (produces NVARCHAR), also MSUnicode which provides Unicode-translation for the NVarchar regardless of dialect convert_unicode setting. --- CHANGES | 19 ++++++++++++++----- README.unittests | 5 +++++ lib/sqlalchemy/databases/mssql.py | 12 +++++++----- lib/sqlalchemy/databases/postgres.py | 11 +++++------ lib/sqlalchemy/types.py | 9 ++++++++- test/sql/testtypes.py | 16 +++++++++++++++- 6 files changed, 54 insertions(+), 18 deletions(-) diff --git a/CHANGES b/CHANGES index 7ab2c07145..9021cb2817 100644 --- a/CHANGES +++ b/CHANGES @@ -16,14 +16,23 @@ - the "op()" function is now treated as an "operation", rather than a "comparison". the difference is, an operation produces a BinaryExpression from which further operations can occur whereas comparison produces the more restrictive BooleanExpression -- postgres - - postgres no longer uses client-side cursors, uses more efficient server side - cursors via apparently undocumented psycopg2 behavior recently discovered on the - mailing list. disable it via create_engine('postgres://', client_side_cursors=True) + - type system slightly modified to support TypeDecorators that can be overridden by the dialect + (ok, thats not very clear, it allows the mssql tweak below to be possible) +- mssql: + - added an NVarchar type (produces NVARCHAR), also MSUnicode which provides Unicode-translation + for the NVarchar regardless of dialect convert_unicode setting. +- postgres: + - postgres has an optional "server_side_cursors=True" flag which will utilize + server side cursors. these are appropriate for fetching only partial results + and are necessary for working with very large unbounded result sets. + While we'd like this to be the default behavior, different environments seem + to have different results and the causes have not been isolated so we are leaving + the feature off by default for now. Uses an apparently undocumented psycopg2 + behavior recently discovered on the psycopg mailing list. - added "BIGSERIAL" support for postgres table with PGBigInteger/autoincrement - fixes to postgres reflection to better handle when schema names are present; thanks to jason (at) ncsmags.com [ticket:402] -- mysql +- mysql: - mysql is inconsistent with what kinds of quotes it uses in foreign keys during a SHOW CREATE TABLE, reflection updated to accomodate for all three styles [ticket:420] - firebird: diff --git a/README.unittests b/README.unittests index 1039562364..617d5fd6f0 100644 --- a/README.unittests +++ b/README.unittests @@ -81,3 +81,8 @@ utility with the "-a" (annotate) option, such as: which will create a new annotated file ./lib/sqlalchemy/sql.py,cover . Pretty cool ! +TIPS +---- +When running the tests on postgres, postgres gets slower and slower each time you run the tests. +This seems to be related to the constant creation/dropping of tables. Running a "VACUUM FULL" +on the database will speed it up again. diff --git a/lib/sqlalchemy/databases/mssql.py b/lib/sqlalchemy/databases/mssql.py index 2028ab48f9..6d351d0fb4 100644 --- a/lib/sqlalchemy/databases/mssql.py +++ b/lib/sqlalchemy/databases/mssql.py @@ -146,9 +146,13 @@ class MSText(sqltypes.TEXT): class MSString(sqltypes.String): def get_col_spec(self): return "VARCHAR(%(length)s)" % {'length' : self.length} -class MSUnicode(sqltypes.Unicode): +class MSNVarchar(MSString): + """NVARCHAR string, does unicode conversion if dialect.convert_encoding is true""" def get_col_spec(self): return "NVARCHAR(%(length)s)" % {'length' : self.length} +class MSUnicode(sqltypes.Unicode): + """Unicode subclass, does unicode conversion in all cases, uses NVARCHAR impl""" + impl = MSNVarchar class MSChar(sqltypes.CHAR): def get_col_spec(self): return "CHAR(%(length)s)" % {'length' : self.length} @@ -259,8 +263,6 @@ class MSSQLExecutionContext(default.DefaultExecutionContext): self.HASIDENT = False - - class MSSQLDialect(ansisql.ANSIDialect): def __init__(self, module=None, auto_identity_insert=False, **params): self.module = module or dbmodule @@ -546,7 +548,7 @@ class MSSQLCompiler(ansisql.ANSICompiler): class MSSQLSchemaGenerator(ansisql.ANSISchemaGenerator): def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) + " " + column.type.engine_impl(self.engine).get_col_spec() - + # install a IDENTITY Sequence if we have an implicit IDENTITY column if column.primary_key and column.autoincrement and isinstance(column.type, sqltypes.Integer) and not column.foreign_key: if column.default is None or (isinstance(column.default, schema.Sequence) and column.default.optional): @@ -583,7 +585,7 @@ class MSSQLIdentifierPreparer(ansisql.ANSIIdentifierPreparer): #TODO: determin MSSQL's case folding rules return value -if dbmodule.__name__ == 'adodbapi': +if dbmodule and dbmodule.__name__ == 'adodbapi': dialect = MSSQLDialect else: dialect = PyMSSQLDialect diff --git a/lib/sqlalchemy/databases/postgres.py b/lib/sqlalchemy/databases/postgres.py index 79db8716c3..94519a5acc 100644 --- a/lib/sqlalchemy/databases/postgres.py +++ b/lib/sqlalchemy/databases/postgres.py @@ -207,9 +207,9 @@ class PGExecutionContext(default.DefaultExecutionContext): self._last_inserted_ids = [v for v in row] class PGDialect(ansisql.ANSIDialect): - def __init__(self, module=None, use_oids=False, use_information_schema=False, client_side_cursors=False, **params): + def __init__(self, module=None, use_oids=False, use_information_schema=False, server_side_cursors=False, **params): self.use_oids = use_oids - self.client_side_cursors = client_side_cursors + self.server_side_cursors = server_side_cursors if module is None: #if psycopg is None: # raise exceptions.ArgumentError("Couldnt locate psycopg1 or psycopg2: specify postgres module argument") @@ -241,13 +241,12 @@ class PGDialect(ansisql.ANSIDialect): return ([], opts) def create_cursor(self, connection): - if self.client_side_cursors: - return connection.cursor() - else: + if self.server_side_cursors: # use server-side cursors: # http://lists.initd.org/pipermail/psycopg/2007-January/005251.html return connection.cursor('x') - + else: + return connection.cursor() def create_execution_context(self): return PGExecutionContext(self) diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py index 3362312427..7260442700 100644 --- a/lib/sqlalchemy/types.py +++ b/lib/sqlalchemy/types.py @@ -82,6 +82,12 @@ class TypeDecorator(AbstractType): try: return self.impl_dict[dialect] except: + # see if the dialect has an adaptation of the TypeDecorator itself + adapted_decorator = dialect.type_descriptor(self) + if adapted_decorator is not self: + result = adapted_decorator.dialect_impl(dialect) + self.impl_dict[dialect] = result + return result typedesc = dialect.type_descriptor(self.impl) tt = self.copy() if not isinstance(tt, self.__class__): @@ -138,7 +144,8 @@ def adapt_type(typeobj, colspecs): except KeyError: pass else: - # couldnt adapt...raise exception ? + # couldnt adapt - so just return the type itself + # (it may be a user-defined type) return typeobj # if we adapted the given generic type to a database-specific type, # but it turns out the originally given "generic" type diff --git a/test/sql/testtypes.py b/test/sql/testtypes.py index 2700ec6c79..8889b7b34c 100644 --- a/test/sql/testtypes.py +++ b/test/sql/testtypes.py @@ -60,7 +60,20 @@ class AdaptTest(PersistTest): assert (t1.impl.length == 20) assert isinstance(t2.impl, TEXT) assert t2.impl.length is None - + + + def testdialecttypedecorators(self): + """test that a a Dialect can provide a dialect-specific subclass of a TypeDecorator subclass.""" + import sqlalchemy.databases.mssql as mssql + dialect = mssql.MSSQLDialect() + # run the test twice to insure the caching step works too + for x in range(0, 1): + col = Column('', Unicode(length=10)) + dialect_type = col.type.dialect_impl(dialect) + assert isinstance(dialect_type, mssql.MSUnicode) + assert dialect_type.get_col_spec() == 'NVARCHAR(10)' + assert isinstance(dialect_type.impl, mssql.MSString) + class OverrideTest(PersistTest): """tests user-defined types, including a full type as well as a TypeDecorator""" @@ -166,6 +179,7 @@ class UnicodeTest(AssertMixin): self.assert_(isinstance(x['plain_data'], unicode) and x['plain_data'] == unicodedata) finally: db.engine.dialect.convert_unicode = prev_unicode + class BinaryTest(AssertMixin): -- 2.47.2