From c5e1abc7f7adce841775ea92b72bcf95207027af Mon Sep 17 00:00:00 2001
From: Mike Bayer
Engines exist for SQLite, Postgres, MySQL, and Oracle, using the Pysqlite, Psycopg (1 or 2), MySQLDB, and cx_Oracle modules. Each engine imports its corresponding module which is required to be installed. For Postgres and Oracle, an alternate module may be specified at construction time as well.
An example of connecting to each engine is as follows: The string based argument names for connecting are translated to the appropriate names when the connection is made; argument names include "host" or "hostname" for database host, "database", "db", or "dbname" for the database name (also is dsn for Oracle), "user" or "username" for the user, and "password", "pw", or "passwd" for the password. SQLite expects "filename" or "file" for the filename, or if None it defaults to "":memory:". The connection arguments can be specified as a string + dictionary pair, or a single URL-encoded string, as follows: Note that the general form of connecting to an engine is: The second argument is a dictionary whose key/value pairs will be passed to the underlying DBAPI connect() method as keyword arguments. Any keyword argument supported by the DBAPI module can be in this dictionary. Engines can also be loaded by URL. The above format is converted into <% ' A few useful methods off the SQLEngine are described here: The remaining arguments to create_engine are keyword arguments that are passed to the specific subclass of sqlalchemy.engine.SQLEngine being used, as well as the underlying sqlalchemy.pool.Pool instance. All of the options described in the previous section <&formatting.myt:link, path="pooling_configuration"&> can be specified, as well as engine-specific options: pool=None : an instance of sqlalchemy.pool.Pool to be used as the underlying source for connections, overriding the engine's connect arguments (pooling is described in the previous section). If None, a default Pool (QueuePool or SingletonThreadPool as appropriate) will be created using the engine's connect arguments. Example: Creating and dropping is easy, just use the create() and drop() methods:
<&|formatting.myt:code&>
- <&formatting.myt:poplink&>
- employees = Table('employees', engine,
+ <&formatting.myt:poplink&>employees = Table('employees', engine,
Column('employee_id', Integer, primary_key=True),
Column('employee_name', String(60), nullable=False, key='name'),
Column('employee_dept', Integer, ForeignKey("departments.department_id"))
@@ -126,18 +122,162 @@ CREATE TABLE employees(
employee_name VARCHAR(60) NOT NULL,
employee_dept INTEGER REFERENCES departments(department_id)
)
-
{} &>
-
- <&formatting.myt:poplink&>
- employees.drop() <&|formatting.myt:codepopper, link="sql" &>
+
+ <&formatting.myt:poplink&>employees.drop() <&|formatting.myt:codepopper, link="sql" &>
DROP TABLE employees
{} &>
&>
&>
+
+ <&|doclib.myt:item, name="defaults", description="Column Defaults and OnUpdates" &>
+ SQLAlchemy includes flexible constructs in which to create default values for columns upon the insertion of rows, as well as upon update. These defaults can take several forms: a constant, a Python callable to be pre-executed before the SQL is executed, a SQL expression or function to be pre-executed before the SQL is executed, a pre-executed Sequence (for databases that support sequences), or a "passive" default, which is a default function triggered by the database itself upon insert, the value of which can then be post-fetched by the engine, provided the row provides a primary key in which to call upon. A basic default is most easily specified by the "default" keyword argument to Column: The "default" keyword can also take SQL expressions, including select statements or direct function calls: The "default" keyword argument is shorthand for using a ColumnDefault object in a column definition. This syntax is optional, but is required for other types of defaults, futher described below: Similar to an on-insert default is an on-update default, which is most easily specified by the "onupdate" keyword to Column, which also can be a constanct, plain Python function or SQL expression: To use a ColumnDefault explicitly for an on-update, use the "for_update" keyword argument: A PassiveDefault indicates a column default or on-update value that is executed automatically by the database. This construct is used to specify a SQL function that will be specified as "DEFAULT" when creating tables, and also to indicate the presence of new data that is available to be "post-fetched" after an insert or update execution. A create call for the above table will produce: PassiveDefaults also send a message to the SQLEngine that data is available after update or insert. The object-relational mapper system uses this information to post-fetch rows after insert or update, so that instances can be refreshed with the new data. Below is a simplified version: Tables that are reflected from the database which have default values set on them, will receive those defaults as PassiveDefaults. Current Postgres support does not rely upon OID's to determine the identity of a row. This is because the usage of OIDs has been deprecated with Postgres and they are disabled by default for table creates as of PG version 8. Pyscopg2's "cursor.lastrowid" function only returns OIDs. Therefore, when inserting a new row which has passive defaults set on the primary key columns, the default function is still pre-executed since SQLAlchemy would otherwise have no way of retrieving the row just inserted. A table with a sequence looks like: The Sequence is used with Postgres or Oracle to indicate the name of a Sequence that will be used to create default values for a column. When a table with a Sequence on a column is created by SQLAlchemy, the Sequence object is also created. Similarly, the Sequence is dropped when the table is dropped. Sequences are typically used with primary key columns. When using Postgres, if an integer primary key column defines no explicit Sequence or other default method, SQLAlchemy will create the column with the SERIAL keyword, and will pre-execute a sequence named "tablename_columnname_seq" in order to retrieve new primary key values. Oracle, which has no "auto-increment" keyword, requires that a Sequence be created for a table if automatic primary key generation is desired. Note that for all databases, primary key values can always be explicitly stated within the bind parameters for any insert statement as well, removing the need for any kind of default generation function. A Sequence object can be defined on a Table that is then used for a non-sequence-supporting database. In that case, the Sequence object is simply ignored. Note that a Sequence object is entirely optional for all databases except Oracle, as other databases offer options for auto-creating primary key values, such as AUTOINCREMENT, SERIAL, etc. SQLAlchemy will use these default methods for creating primary key values if no Sequence is present on the table metadata. A sequence can also be specified with optional=True which indicates the Sequence should only be used on a database that requires an explicit sequence, and not those that supply some other method of providing integer values. At the moment, it essentially means "use this sequence only with Oracle and not Postgres". Indexes can be defined on table columns, including named indexes, non-unique or unique, multiple column. Indexes are included along with table create and drop statements. They are not used for any kind of run-time constraint checking...SQLAlchemy leaves that job to the expert on constraint checking, the database itself. Occasionally an application will need to reference the same tables within multiple databases simultaneously. Since a Table object is specific to a SQLEngine, an extra method is provided to create copies of the Table object for a different SQLEngine instance, which can represent a different set of connection parameters, or a totally different database driver:
+ A Table object created against a specific engine can be re-created against a new engine using the toengine method: You can also create tables using a "database neutral" engine, which can serve as a starting point for tables that are then adapted to specific engines: Also available is the "database neutral" ansisql engine: Flexible "multi-engined" tables can also be achieved via the proxy engine, described in the section <&formatting.myt:link, path="dbengine_proxy"&>. A table with a sequence looks like: The Sequence is used when a Postgres or Oracle database schema defines a sequence of a specific name which must be used to create integer values. If a Sequence is not defined, Postgres will default to regular SERIAL access. Oracle currently has no default primary key method; so explicit primary key values or Sequence objects are required to insert new rows. Defining a Sequence means that it will be created along with the table.create() call, and that the sequence will be explicitly used when inserting new rows for this table, for databases that support sequences. If the Table is connected to a database that doesnt support sequences, the Sequence object is simply ignored. Note that a Sequence object is entirely optional for all databases except Oracle, as other databases offer options for auto-creating primary key values, such as AUTOINCREMENT, SERIAL, etc. SQLAlchemy will use these default methods for creating primary key values if no Sequence is present on the table metadata. A sequence can also be specified with optional=True which indicates the Sequence should only be used on a database that requires an explicit sequence, and not those that supply some other method of providing integer values. At the moment, it essentially means "use this sequence only with Oracle and not Postgres". More docs TODO in this area include the ColumnDefault and PassiveDefault objects which provide more options to automatic generation of column values. TableClause and ColumnClause are "primitive" versions of the Table and Column objects which dont use engines at all; applications that just want to generate SQL strings but not directly communicate with a database can use TableClause and ColumnClause objects, which are non-singleton and serve as the "lexical" base class of Table and Column: TableClause and ColumnClause are strictly lexical. This means they are fully supported within the full range of SQL statement generation, but they don't support schema concepts like creates, drops, primary keys, defaults, nullable status, indexes, or foreign keys.
-
&>
<&|doclib.myt:item, name="proxy", description="Using the Proxy Engine" &>
diff --git a/doc/build/content/docstrings.myt b/doc/build/content/docstrings.myt
index 9ab1146d86..91513fcb97 100644
--- a/doc/build/content/docstrings.myt
+++ b/doc/build/content/docstrings.myt
@@ -14,7 +14,7 @@
<& pydoc.myt:obj_doc, obj=schema &>
<& pydoc.myt:obj_doc, obj=engine, classes=[engine.SQLEngine, engine.ResultProxy, engine.RowProxy] &>
-<& pydoc.myt:obj_doc, obj=sql, classes=[sql.Compiled, sql.ClauseElement, sql.TableClause, sql.ColumnClause] &>
+<& pydoc.myt:obj_doc, obj=sql, classes=[sql.ClauseParameters, sql.Compiled, sql.ClauseElement, sql.TableClause, sql.ColumnClause] &>
<& pydoc.myt:obj_doc, obj=pool, classes=[pool.DBProxy, pool.Pool, pool.QueuePool, pool.SingletonThreadPool] &>
<& pydoc.myt:obj_doc, obj=mapping &>
<& pydoc.myt:obj_doc, obj=mapping.objectstore, classes=[mapping.objectstore.Session, mapping.objectstore.Session.SessionTrans, mapping.objectstore.UnitOfWork] &>
diff --git a/doc/build/content/document_base.myt b/doc/build/content/document_base.myt
index a8b0910158..f37857e25f 100644
--- a/doc/build/content/document_base.myt
+++ b/doc/build/content/document_base.myt
@@ -23,7 +23,7 @@
onepage='documentation'
index='index'
title='SQLAlchemy Documentation'
- version = '0.1.3'
+ version = '0.1.4'
%attr>
<%method title>
diff --git a/doc/build/content/metadata.myt b/doc/build/content/metadata.myt
index e35d205d77..5b03ee647b 100644
--- a/doc/build/content/metadata.myt
+++ b/doc/build/content/metadata.myt
@@ -107,15 +107,11 @@
>>> othertable is news_articles
True
&>
-
-
-
&>
<&|doclib.myt:item, name="creating", description="Creating and Dropping Database Tables" &>
One level below using a DBProxy to make transparent pools is creating the pool yourself. The pool module comes with two implementations of connection pools: QueuePool and SingletonThreadPool. While QueuePool uses Queue.Queue to provide connections, SingletonThreadPool provides a single per-thread connection which SQLite requires.
+ +Constructing your own pool involves passing a callable used to create a connection. Through this method, custom connection schemes can be made, such as a connection that automatically executes some initialization commands to start. The options from the previous section can be used as they apply to QueuePool or SingletonThreadPool.
+ <&|formatting.myt:code, title="Plain QueuePool"&> + import sqlalchemy.pool as pool + import psycopg2 + + def getconn(): + c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') + # execute an initialization function on the connection before returning + c.cursor.execute("setup_encodings()") + return c + + p = pool.QueuePool(getconn, max_overflow=10, pool_size=5, use_threadlocal=True) + &> + + <&|formatting.myt:code, title="SingletonThreadPool"&> + import sqlalchemy.pool as pool + import sqlite + + def getconn(): + return sqlite.connect(filename='myfile.db') + + # SQLite connections require the SingletonThreadPool + p = pool.SingletonThreadPool(getconn) + &> + + &> &> \ No newline at end of file diff --git a/lib/sqlalchemy/ansisql.py b/lib/sqlalchemy/ansisql.py index 7f95cd392f..b039b346be 100644 --- a/lib/sqlalchemy/ansisql.py +++ b/lib/sqlalchemy/ansisql.py @@ -118,7 +118,8 @@ class ANSICompiler(sql.Compiled): objects compiled within this object. The output is dependent on the paramstyle of the DBAPI being used; if a named style, the return result will be a dictionary with keynames matching the compiled statement. If a positional style, the output - will be a list corresponding to the bind positions in the compiled statement. + will be a list, with an iterator that will return parameter + values in an order corresponding to the bind positions in the compiled statement. for an executemany style of call, this method should be called for each element in the list of parameter groups that will ultimately be executed. @@ -129,32 +130,23 @@ class ANSICompiler(sql.Compiled): bindparams = {} bindparams.update(params) + d = sql.ClauseParameters(self.engine) if self.positional: - d = OrderedDict() for k in self.positiontup: b = self.binds[k] - if self.engine is not None: - d[k] = b.typeprocess(b.value, self.engine) - else: - d[k] = b.value + d.set_parameter(k, b.value, b) else: - d = {} for b in self.binds.values(): - if self.engine is not None: - d[b.key] = b.typeprocess(b.value, self.engine) - else: - d[b.key] = b.value + d.set_parameter(b.key, b.value, b) for key, value in bindparams.iteritems(): try: b = self.binds[key] except KeyError: continue - if self.engine is not None: - d[b.key] = b.typeprocess(value, self.engine) - else: - d[b.key] = value + d.set_parameter(b.key, value, b) + #print "FROM", params, "TO", d return d def get_named_params(self, parameters): diff --git a/lib/sqlalchemy/databases/firebird.py b/lib/sqlalchemy/databases/firebird.py index 4dd4aa2a67..7d5cfed117 100644 --- a/lib/sqlalchemy/databases/firebird.py +++ b/lib/sqlalchemy/databases/firebird.py @@ -176,19 +176,8 @@ class FBSQLEngine(ansisql.ANSISQLEngine): return self.context.last_inserted_ids def pre_exec(self, proxy, compiled, parameters, **kwargs): - # this is just an assertion that all the primary key columns in an insert statement - # have a value set up, or have a default generator ready to go - if getattr(compiled, "isinsert", False): - if isinstance(parameters, list): - plist = parameters - else: - plist = [parameters] - for param in plist: - for primary_key in compiled.statement.table.primary_key: - if not param.has_key(primary_key.key) or param[primary_key.key] is None: - if primary_key.default is None: - raise "Column '%s.%s': Firebird primary key columns require a default value or a schema.Sequence to create ids" % (primary_key.table.name, primary_key.name) - + pass + def _executemany(self, c, statement, parameters): rowcount = 0 for param in parameters: diff --git a/lib/sqlalchemy/databases/mysql.py b/lib/sqlalchemy/databases/mysql.py index 8b262877c2..c55da97cb0 100644 --- a/lib/sqlalchemy/databases/mysql.py +++ b/lib/sqlalchemy/databases/mysql.py @@ -134,7 +134,7 @@ class MySQLEngine(ansisql.ANSISQLEngine): def __init__(self, opts, module = None, **params): if module is None: self.module = mysql - self.opts = opts or {} + self.opts = self._translate_connect_args(('host', 'db', 'user', 'passwd'), opts) ansisql.ANSISQLEngine.__init__(self, **params) def connect_args(self): diff --git a/lib/sqlalchemy/databases/oracle.py b/lib/sqlalchemy/databases/oracle.py index 8f80586808..21b478001e 100644 --- a/lib/sqlalchemy/databases/oracle.py +++ b/lib/sqlalchemy/databases/oracle.py @@ -90,7 +90,7 @@ def descriptor(): class OracleSQLEngine(ansisql.ANSISQLEngine): def __init__(self, opts, use_ansi = True, module = None, **params): self._use_ansi = use_ansi - self.opts = opts or {} + self.opts = self._translate_connect_args((None, 'dsn', 'user', 'password'), opts) if module is None: self.module = cx_Oracle else: @@ -181,18 +181,7 @@ order by UCC.CONSTRAINT_NAME""",{'table_name' : table.name.upper()}) return self.context.last_inserted_ids def pre_exec(self, proxy, compiled, parameters, **kwargs): - # this is just an assertion that all the primary key columns in an insert statement - # have a value set up, or have a default generator ready to go - if getattr(compiled, "isinsert", False): - if isinstance(parameters, list): - plist = parameters - else: - plist = [parameters] - for param in plist: - for primary_key in compiled.statement.table.primary_key: - if not param.has_key(primary_key.key) or param[primary_key.key] is None: - if primary_key.default is None: - raise "Column '%s.%s': Oracle primary key columns require a default value or a schema.Sequence to create ids" % (primary_key.table.name, primary_key.name) + pass def _executemany(self, c, statement, parameters): rowcount = 0 diff --git a/lib/sqlalchemy/databases/postgres.py b/lib/sqlalchemy/databases/postgres.py index db20b636c3..72d4260127 100644 --- a/lib/sqlalchemy/databases/postgres.py +++ b/lib/sqlalchemy/databases/postgres.py @@ -181,7 +181,7 @@ class PGSQLEngine(ansisql.ANSISQLEngine): self.version = 1 except: self.version = 1 - self.opts = opts or {} + self.opts = self._translate_connect_args(('host', 'database', 'user', 'password'), opts) if self.opts.has_key('port'): if self.version == 2: self.opts['port'] = int(self.opts['port']) diff --git a/lib/sqlalchemy/engine.py b/lib/sqlalchemy/engine.py index 269402f81d..e44e0a9509 100644 --- a/lib/sqlalchemy/engine.py +++ b/lib/sqlalchemy/engine.py @@ -203,6 +203,25 @@ class SQLEngine(schema.SchemaEngine): self._figure_paramstyle() self.logger = logger or util.Logger(origin='engine') + def _translate_connect_args(self, names, args): + """translates a dictionary of connection arguments to those used by a specific dbapi. + the names parameter is a tuple of argument names in the form ('host', 'database', 'user', 'password') + where the given strings match the corresponding argument names for the dbapi. Will return a dictionary + with the dbapi-specific parameters, the generic ones removed, and any additional parameters still remaining, + from the dictionary represented by args. Will return a blank dictionary if args is null.""" + if args is None: + return {} + a = args.copy() + standard_names = [('host','hostname'), ('database', 'dbname'), ('user', 'username'), ('password', 'passwd', 'pw')] + for n in names: + sname = standard_names.pop(0) + if n is None: + continue + for sn in sname: + if sn != n and a.has_key(sn): + a[n] = a[sn] + del a[sn] + return a def _get_ischema(self): # We use a property for ischema so that the accessor # creation only happens as needed, since otherwise we @@ -563,7 +582,6 @@ class SQLEngine(schema.SchemaEngine): parameters = [compiled.get_params(**m) for m in parameters] else: parameters = compiled.get_params(**parameters) - def proxy(statement=None, parameters=None): if statement is None: return cursor diff --git a/lib/sqlalchemy/mapping/mapper.py b/lib/sqlalchemy/mapping/mapper.py index 554b2d5b42..a77c2db12f 100644 --- a/lib/sqlalchemy/mapping/mapper.py +++ b/lib/sqlalchemy/mapping/mapper.py @@ -651,8 +651,8 @@ class Mapper(object): for c in table.c: if c.primary_key or not params.has_key(c.name): continue - if self._getattrbycolumn(obj, c) != params[c.name]: - self._setattrbycolumn(obj, c, params[c.name]) + if self._getattrbycolumn(obj, c) != params.get_original(c.name): + self._setattrbycolumn(obj, c, params.get_original(c.name)) def delete_obj(self, objects, uow): """called by a UnitOfWork object to delete objects, which involves a diff --git a/lib/sqlalchemy/schema.py b/lib/sqlalchemy/schema.py index 5cb9f20430..756c03b6ef 100644 --- a/lib/sqlalchemy/schema.py +++ b/lib/sqlalchemy/schema.py @@ -267,7 +267,7 @@ class Column(sql.ColumnClause, SchemaItem): name will all be included in the index, in the order of their creation. - unique=None : True or undex name. Indicates that this column is + unique=None : True or index name. Indicates that this column is indexed in a unique index . Pass true to autogenerate the index name. Pass a string to specify the index name. Multiple columns that specify the same index name will all be included in the index, in the diff --git a/lib/sqlalchemy/sql.py b/lib/sqlalchemy/sql.py index 89b4b55854..4eaf33e00a 100644 --- a/lib/sqlalchemy/sql.py +++ b/lib/sqlalchemy/sql.py @@ -232,6 +232,27 @@ def _is_literal(element): def is_column(col): return isinstance(col, ColumnElement) +class ClauseParameters(util.OrderedDict): + """represents a dictionary/iterator of bind parameter key names/values. Includes parameters compiled with a Compiled object as well as additional arguments passed to the Compiled object's get_params() method. Parameter values will be converted as per the TypeEngine objects present in the bind parameter objects. The non-converted value can be retrieved via the get_original method. For Compiled objects that compile positional parameters, the values() iteration of the object will return the parameter values in the correct order.""" + def __init__(self, engine=None): + super(ClauseParameters, self).__init__(self) + self.engine = engine + self.binds = {} + def set_parameter(self, key, value, bindparam): + self[key] = value + self.binds[key] = bindparam + def get_original(self, key): + return super(ClauseParameters, self).__getitem__(key) + def __getitem__(self, key): + v = super(ClauseParameters, self).__getitem__(key) + if self.engine is not None and self.binds.has_key(key): + v = self.binds[key].typeprocess(v, self.engine) + return v + def values(self): + return [self[key] for key in self] + def get_original_dict(self): + return self.copy() + class ClauseVisitor(object): """Defines the visiting of ClauseElements.""" def visit_column(self, column):pass @@ -779,6 +800,8 @@ class Function(ClauseList, ColumnElement): clause = BindParamClause(self.name, clause, shortname=self.name, type=None) self.clauses.append(clause) def _process_from_dict(self, data, asfrom): + super(Function, self)._process_from_dict(data, asfrom) + # this helps a Select object get the engine from us data.setdefault(self, self) def copy_container(self): clauses = [clause.copy_container() for clause in self.clauses] diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py index 96e6f1edbd..89fd3bd2c7 100644 --- a/lib/sqlalchemy/types.py +++ b/lib/sqlalchemy/types.py @@ -91,12 +91,12 @@ class String(TypeEngine): class Unicode(String): def convert_bind_param(self, value, engine): - if isinstance(value, unicode): + if value is not None and isinstance(value, unicode): return value.encode(engine.encoding) else: return value def convert_result_value(self, value, engine): - if not isinstance(value, unicode): + if value is not None and not isinstance(value, unicode): return value.decode(engine.encoding) else: return value diff --git a/lib/sqlalchemy/util.py b/lib/sqlalchemy/util.py index 2b522d5711..303cf56830 100644 --- a/lib/sqlalchemy/util.py +++ b/lib/sqlalchemy/util.py @@ -191,7 +191,8 @@ class OrderedDict(dict): def itervalues(self): return iter([self[key] for key in self._list]) - def iterkeys(self): return self.__iter__() + def iterkeys(self): + return self.__iter__() def iteritems(self): return iter([(key, self[key]) for key in self.keys()]) diff --git a/setup.py b/setup.py index b8b9d1f56e..8df1eb2fbe 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ use_setuptools() from setuptools import setup, find_packages setup(name = "SQLAlchemy", - version = "0.1.3", + version = "0.1.4", description = "Database Abstraction Library", author = "Mike Bayer", author_email = "mike_mp@zzzcomputing.com", diff --git a/test/indexes.py b/test/indexes.py index d0cb1a131e..17c2e7dc67 100644 --- a/test/indexes.py +++ b/test/indexes.py @@ -68,7 +68,6 @@ class IndexTest(testbase.AssertMixin): def test_index_create_inline(self): """Test indexes defined with tables""" - testbase.db.echo = True capt = [] class dummy: pass diff --git a/test/mapper.py b/test/mapper.py index 81592df8c9..eff6566547 100644 --- a/test/mapper.py +++ b/test/mapper.py @@ -301,7 +301,7 @@ class PropertyTest(MapperSuperTest): au = AddressUser.mapper.get_by(user_name='jack') self.assert_(au.email_address == 'jack@gmail.com') - + class DeferredTest(MapperSuperTest): def testbasic(self): diff --git a/test/objectstore.py b/test/objectstore.py index affa32a5f8..0a02d6fb11 100644 --- a/test/objectstore.py +++ b/test/objectstore.py @@ -141,6 +141,29 @@ class SessionTest(AssertMixin): self.assert_(name_of(7) != name1, msg="user_name should not be %s" % name1) self.assert_(name_of(8) != name2, msg="user_name should not be %s" % name2) +class UnicodeTest(AssertMixin): + def setUpAll(self): + global uni_table + uni_table = Table('test', db, + Column('id', Integer, primary_key=True), + Column('txt', Unicode(50))).create() + + def tearDownAll(self): + uni_table.drop() + uni_table.deregister() + + def testbasic(self): + class Test(object): + pass + assign_mapper(Test, uni_table) + + txt = u"\u0160\u0110\u0106\u010c\u017d" + t1 = Test(id=1, txt = txt) + self.assert_(t1.txt == txt) + objectstore.commit() + self.assert_(t1.txt == txt) + + class PKTest(AssertMixin): def setUpAll(self): db.echo = False @@ -836,7 +859,6 @@ class SaveTest(AssertMixin): ) ] ) - return objects[2].keywords.append(k) dkid = objects[5].keywords[1].keyword_id del objects[5].keywords[1] diff --git a/test/select.py b/test/select.py index 47bc19515c..768827347e 100644 --- a/test/select.py +++ b/test/select.py @@ -278,11 +278,14 @@ FROM mytable, myothertable WHERE foo.id = foofoo(lala) AND datetime(foo) = Today def testliteral(self): self.runtest(select([literal("foo") + literal("bar")], from_obj=[table1]), - "SELECT :literal + :literal_1 FROM mytable", engine=db) + "SELECT :literal + :literal_1 FROM mytable") def testfunction(self): self.runtest(func.lala(3, 4, literal("five"), table1.c.myid) * table2.c.otherid, - "lala(:lala, :lala_1, :literal, mytable.myid) * myothertable.otherid", engine=db) + "lala(:lala, :lala_1, :literal, mytable.myid) * myothertable.otherid") + + self.runtest(select([func.count(table1.c.myid)]), + "SELECT count(mytable.myid) FROM mytable") def testjoin(self): self.runtest( diff --git a/test/testbase.py b/test/testbase.py index bddb940bec..f10cce681a 100644 --- a/test/testbase.py +++ b/test/testbase.py @@ -40,7 +40,7 @@ def parse_argv(): elif DBTYPE == 'postgres': db_uri = 'postgres://database=test&port=5432&host=127.0.0.1&user=scott&password=tiger' elif DBTYPE == 'mysql': - db_uri = 'mysql://db=test&host=127.0.0.1&user=scott&passwd=tiger' + db_uri = 'mysql://database=test&host=127.0.0.1&user=scott&password=tiger' elif DBTYPE == 'oracle': db_uri = 'oracle://user=scott&password=tiger' -- 2.47.2