]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
- move out maxdb
authorMike Bayer <mike_mp@zzzcomputing.com>
Thu, 18 Oct 2012 22:24:15 +0000 (18:24 -0400)
committerMike Bayer <mike_mp@zzzcomputing.com>
Thu, 18 Oct 2012 22:24:15 +0000 (18:24 -0400)
- begin consolidating docs for dialects to be more self contained
- add a separate section for "external" dialects
- not sure how we're going to go with this yet.

36 files changed:
CHANGES
doc/build/core/engines.rst
doc/build/dialects/drizzle.rst
doc/build/dialects/index.rst
doc/build/dialects/maxdb.rst [deleted file]
doc/build/dialects/mysql.rst
doc/build/dialects/postgresql.rst
doc/build/index.rst
doc/build/static/docs.css
lib/sqlalchemy/dialects/__init__.py
lib/sqlalchemy/dialects/drizzle/base.py
lib/sqlalchemy/dialects/firebird/base.py
lib/sqlalchemy/dialects/firebird/fdb.py
lib/sqlalchemy/dialects/firebird/kinterbasdb.py
lib/sqlalchemy/dialects/maxdb/__init__.py [deleted file]
lib/sqlalchemy/dialects/maxdb/base.py [deleted file]
lib/sqlalchemy/dialects/maxdb/sapdb.py [deleted file]
lib/sqlalchemy/dialects/mssql/base.py
lib/sqlalchemy/dialects/mysql/base.py
lib/sqlalchemy/dialects/mysql/gaerdbms.py
lib/sqlalchemy/dialects/mysql/mysqlconnector.py
lib/sqlalchemy/dialects/mysql/mysqldb.py
lib/sqlalchemy/dialects/mysql/oursql.py
lib/sqlalchemy/dialects/mysql/pymysql.py
lib/sqlalchemy/dialects/mysql/pyodbc.py
lib/sqlalchemy/dialects/mysql/zxjdbc.py
lib/sqlalchemy/dialects/oracle/base.py
lib/sqlalchemy/dialects/postgresql/base.py
lib/sqlalchemy/dialects/postgresql/pg8000.py
lib/sqlalchemy/dialects/postgresql/psycopg2.py
lib/sqlalchemy/dialects/postgresql/pypostgresql.py
lib/sqlalchemy/dialects/postgresql/zxjdbc.py
lib/sqlalchemy/dialects/sqlite/base.py
lib/sqlalchemy/orm/session.py
lib/sqlalchemy/orm/unitofwork.py
test/dialect/test_maxdb.py [deleted file]

diff --git a/CHANGES b/CHANGES
index 084ab4eadd8786325e805c37fbb3706fa7240de1..89f62ffd84689e9b353f8d1e9662f78b65b4b372 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -743,6 +743,14 @@ underneath "0.7.xx".
     ready for general use yet, however
     it does have *extremely* rudimental
     functionality now.
+    https://bitbucket.org/zzzeek/sqlalchemy-access
+
+- maxdb
+  - [moved] The MaxDB dialect, which hasn't been
+    functional for several years, is
+    moved out to a pending bitbucket project,
+    https://bitbucket.org/zzzeek/sqlalchemy-maxdb.
+
 
 - sqlite
   - [feature] the SQLite date and time types
index b6192ac6984ec80e39ed79048c8748b656ffe83e..2a8cfdd5ee65453cd2132cd7b67e6938e43b1b30 100644 (file)
@@ -49,96 +49,8 @@ SQLAlchemy includes many :class:`~sqlalchemy.engine.base.Dialect` implementation
 backends.   Dialects for the most common databases are included with SQLAlchemy; a handful
 of others require an additional install of a separate dialect.
 
-Those dialects which are included with SQLAlchemy are described under the section :ref:`dialect_toplevel`.
-All dialects additionally require that an appropriate DBAPI driver is installed.
-
-The table below summarizes the state of DBAPI support in this version of SQLAlchemy.  The values
-translate as:
-
-* yes / Python platform - The SQLAlchemy dialect is mostly or fully operational on the target platform.
-* yes / OS platform - The DBAPI supports that platform.
-* no / Python platform - The DBAPI does not support that platform, or there is no SQLAlchemy dialect support.
-* no / OS platform - The DBAPI does not support that platform.
-* partial - the DBAPI is partially usable on the target platform but has major unresolved issues.
-* development - a development version of the dialect exists, but is not yet usable.
-* thirdparty - the dialect itself is maintained by a third party, who should be consulted for
-  information on current support.
-* \* - indicates the given DBAPI is the "default" for SQLAlchemy, i.e. when just the database name is specified
-
-===============================================================  ===========================  ===========  ===========   ===========  =================  ============
-Driver                                                           Connect string               Py2K         Py3K          Jython       Unix               Windows
-===============================================================  ===========================  ===========  ===========   ===========  =================  ============
-**DB2/Informix IDS**
-ibm-db_                                                          See `ibm-db`_                unknown      unknown       unknown      unknown            unknown
-**Drizzle** :ref:`(docs) <drizzle_toplevel>`
-mysql-python_                                                    ``drizzle+mysqldb``\*        yes          development   no           yes                yes
-**Firebird / Interbase** :ref:`(docs) <firebird_toplevel>`
-kinterbasdb_                                                     ``firebird+kinterbasdb``\*   yes          development   no           yes                yes
-**Informix** :ref:`(docs) <informix_toplevel>`
-informixdb_                                                      ``informix+informixdb``\*    yes          development   no           unknown            unknown
-**MaxDB** :ref:`(docs) <maxdb_toplevel>`
-sapdb_                                                           ``maxdb+sapdb``\*            development  development   no           yes                unknown
-**Microsoft Access**
-pyodbc_                                                          See `sqlalchemy-access`_     development  development   no           unknown            yes
-**Microsoft SQL Server** :ref:`(docs) <mssql_toplevel>`
-adodbapi_                                                        ``mssql+adodbapi``           development  development   no           no                 yes
-`jTDS JDBC Driver`_                                              ``mssql+zxjdbc``             no           no            development  yes                yes
-mxodbc_                                                          ``mssql+mxodbc``             yes          development   no           yes with FreeTDS_  yes
-pyodbc_                                                          ``mssql+pyodbc``\*           yes          development   no           yes with FreeTDS_  yes
-pymssql_                                                         ``mssql+pymssql``            yes          development   no           yes                yes
-**MySQL** :ref:`(docs) <mysql_toplevel>`
-`MySQL Connector/J`_                                             ``mysql+zxjdbc``             no           no            yes          yes                yes
-`MySQL Connector/Python`_                                        ``mysql+mysqlconnector``     yes          development   no           yes                yes
-mysql-python_                                                    ``mysql+mysqldb``\*          yes          development   no           yes                yes
-OurSQL_                                                          ``mysql+oursql``             yes          yes           no           yes                yes
-pymysql_                                                         ``mysql+pymysql``            yes          development   no           yes                yes
-rdbms_ (Google Appengine)                                        ``mysql+gaerdbms``           yes          development   no           no                 no
-**Oracle** :ref:`(docs) <oracle_toplevel>`
-cx_oracle_                                                       ``oracle+cx_oracle``\*       yes          development   no           yes                yes
-`Oracle JDBC Driver`_                                            ``oracle+zxjdbc``            no           no            yes          yes                yes
-**Postgresql** :ref:`(docs) <postgresql_toplevel>`
-pg8000_                                                          ``postgresql+pg8000``        yes          yes           no           yes                yes
-`PostgreSQL JDBC Driver`_                                        ``postgresql+zxjdbc``        no           no            yes          yes                yes
-psycopg2_                                                        ``postgresql+psycopg2``\*    yes          yes           no           yes                yes
-pypostgresql_                                                    ``postgresql+pypostgresql``  no           yes           no           yes                yes
-**SQLite** :ref:`(docs) <sqlite_toplevel>`
-pysqlite_                                                        ``sqlite+pysqlite``\*        yes          yes           no           yes                yes
-sqlite3_                                                         ``sqlite+pysqlite``\*        yes          yes           no           yes                yes
-**Sybase ASE** :ref:`(docs) <sybase_toplevel>`
-mxodbc_                                                          ``sybase+mxodbc``            development  development   no           yes                yes
-pyodbc_                                                          ``sybase+pyodbc``\*          partial      development   no           unknown            unknown
-python-sybase_                                                   ``sybase+pysybase``          yes [1]_     development   no           yes                yes
-===============================================================  ===========================  ===========  ===========   ===========  =================  ============
-
-.. [1] The Sybase dialect currently lacks the ability to reflect tables.
-.. _psycopg2: http://www.initd.org/
-.. _pg8000: http://pybrary.net/pg8000/
-.. _sqlalchemy-access: https://bitbucket.org/zzzeek/sqlalchemy-access
-.. _pypostgresql: http://python.projects.postgresql.org/
-.. _mysql-python: http://sourceforge.net/projects/mysql-python
-.. _MySQL Connector/Python: https://launchpad.net/myconnpy
-.. _OurSQL: http://packages.python.org/oursql/
-.. _pymysql: http://code.google.com/p/pymysql/
-.. _rdbms: https://developers.google.com/cloud-sql/docs/developers_guide_python
-.. _PostgreSQL JDBC Driver: http://jdbc.postgresql.org/
-.. _sqlite3: http://docs.python.org/library/sqlite3.html
-.. _pysqlite: http://pypi.python.org/pypi/pysqlite/
-.. _MySQL Connector/J: http://dev.mysql.com/downloads/connector/j/
-.. _cx_Oracle: http://cx-oracle.sourceforge.net/
-.. _Oracle JDBC Driver: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html
-.. _kinterbasdb:  http://firebirdsql.org/index.php?op=devel&sub=python
-.. _pyodbc: http://code.google.com/p/pyodbc/
-.. _mxodbc: http://www.egenix.com/products/python/mxODBC/
-.. _FreeTDS: http://www.freetds.org/
-.. _adodbapi: http://adodbapi.sourceforge.net/
-.. _pymssql: http://code.google.com/p/pymssql/
-.. _jTDS JDBC Driver: http://jtds.sourceforge.net/
-.. _ibm-db: http://code.google.com/p/ibm-db/
-.. _informixdb: http://informixdb.sourceforge.net/
-.. _sapdb: http://www.sapdb.org/sapdbapi.html
-.. _python-sybase: http://python-sybase.sourceforge.net/
-
-Further detail on dialects is available at :ref:`dialect_toplevel`.
+See the section :ref:`dialect_toplevel` for information on the various backends available.
+
 
 
 .. _create_engine_args:
index 114c6f9c94a8bad87e809e3f7fab0af8fce9051a..ec0af93ce0a324cc85aeaafe26d98c638d1b7ede 100644 (file)
@@ -16,7 +16,7 @@ valid with Drizzle are importable from the top level dialect::
             DECIMAL, DOUBLE, ENUM, FLOAT, INT, INTEGER,
             NUMERIC, TEXT, TIME, TIMESTAMP, VARBINARY, VARCHAR
 
-Types which are specific to Drizzle, or have Drizzle-specific 
+Types which are specific to Drizzle, or have Drizzle-specific
 construction arguments, are as follows:
 
 .. currentmodule:: sqlalchemy.dialects.drizzle
@@ -70,6 +70,8 @@ construction arguments, are as follows:
     :show-inheritance:
 
 
+.. _drizzle_mysqldb:
+
 MySQL-Python Notes
 --------------------
 
index 2eb770bdec1e41bdb816e8ee3f9e6d5b2b01649e..7f87439a0e5513d289f00a1a742e4a5c000713fd 100644 (file)
@@ -4,13 +4,13 @@ Dialects
 ========
 
 The **dialect** is the system SQLAlchemy uses to communicate with various types of DBAPIs and databases.
-A compatibility chart of supported backends can be found at :ref:`supported_dbapis`.  The sections that
-follow contain reference documentation and notes specific to the usage of each backend, as well as notes
+The sections that follow contain reference documentation and notes specific to the usage of each backend, as well as notes
 for the various DBAPIs.
 
-Note that not all backends are fully ported and tested with
-current versions of SQLAlchemy. The compatibility chart
-should be consulted to check for current support level.
+All dialects require that an appropriate DBAPI driver is installed.
+
+Included Dialects
+-----------------
 
 .. toctree::
     :maxdepth: 1
@@ -19,7 +19,6 @@ should be consulted to check for current support level.
     drizzle
     firebird
     informix
-    maxdb
     mssql
     mysql
     oracle
@@ -27,5 +26,26 @@ should be consulted to check for current support level.
     sqlite
     sybase
 
+.. _external_toplevel:
+
+External Dialects
+-----------------
+
+.. note::
+
+    As of SQLAlchemy 0.8, several dialects have been moved to external
+    projects, and dialects for new databases will also be published
+    as external projects.   The rationale here is to keep the base
+    SQLAlchemy install and test suite from growing inordinately large.
+
+    The "classic" dialects such as SQLite, MySQL, Postgresql, Oracle,
+    SQL Server, Firebird will remain in the Core for the time being.
+
+Current external dialect projects for SQLAlchemy include:
+
+* `sqlalchemy-access <https://bitbucket.org/zzzeek/sqlalchemy-access>`_ - driver for Microsoft Access.
+* `sqlalchemy-akiban <https://github.com/zzzeek/sqlalchemy_akiban>`_ - driver and ORM extensions for the `Akiban <http://www.akiban.com>`_ database.
+* `sqlalchemy-cubrid <https://bitbucket.org/zzzeek/sqlalchemy-cubrid>`_ - driver for the CUBRID database.
+* `sqlalchemy-maxdb <https://bitbucket.org/zzzeek/sqlalchemy-maxdb>`_ - driver for the MaxDB database.
 
 
diff --git a/doc/build/dialects/maxdb.rst b/doc/build/dialects/maxdb.rst
deleted file mode 100644 (file)
index c4f8a80..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _maxdb_toplevel:
-
-MaxDB
-=====
-
-.. automodule:: sqlalchemy.dialects.maxdb.base
index 916df525eb476b88feaa1e0598d93608385d19c1..2110e375afed08f6e60eeec2f81ea92cfceda901 100644 (file)
@@ -18,7 +18,7 @@ valid with MySQL are importable from the top level dialect::
             NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \
             TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR
 
-Types which are specific to MySQL, or have MySQL-specific 
+Types which are specific to MySQL, or have MySQL-specific
 construction arguments, are as follows:
 
 .. currentmodule:: sqlalchemy.dialects.mysql
@@ -155,38 +155,51 @@ construction arguments, are as follows:
     :members: __init__
     :show-inheritance:
 
+.. _mysqldb:
 
-MySQL-Python Notes
+MySQL-Python
 --------------------
 
 .. automodule:: sqlalchemy.dialects.mysql.mysqldb
 
-OurSQL Notes
+.. _oursql:
+
+OurSQL
 --------------
 
 .. automodule:: sqlalchemy.dialects.mysql.oursql
 
-pymysql Notes
+.. _pymysql:
+
+pymysql
 -------------
 
 .. automodule:: sqlalchemy.dialects.mysql.pymysql
 
-MySQL-Connector Notes
+.. _mysqlconnector:
+
+MySQL-Connector
 ----------------------
 
 .. automodule:: sqlalchemy.dialects.mysql.mysqlconnector
 
-Google App Engine Notes
+.. _gaerdbms:
+
+Google App Engine
 -----------------------
 
 .. automodule:: sqlalchemy.dialects.mysql.gaerdbms
 
-pyodbc Notes
+.. _mysql_pyodbc:
+
+pyodbc
 --------------
 
 .. automodule:: sqlalchemy.dialects.mysql.pyodbc
 
-zxjdbc Notes
+.. _mysql_zxjdbc:
+
+zxjdbc
 --------------
 
 .. automodule:: sqlalchemy.dialects.mysql.zxjdbc
index 5a2f3343a0b40d433880c930a51c9bdde5877639..cf6f277f56ea394f926521eca50d0fd3a561d218 100644 (file)
@@ -69,24 +69,31 @@ construction arguments, are as follows:
     :members: __init__
     :show-inheritance:
 
+.. _psycopg2:
 
-psycopg2 Notes
+psycopg2
 --------------
 
 .. automodule:: sqlalchemy.dialects.postgresql.psycopg2
 
 
-py-postgresql Notes
+.. _pypostgresql:
+
+py-postgresql
 --------------------
 
 .. automodule:: sqlalchemy.dialects.postgresql.pypostgresql
 
-pg8000 Notes
+.. _pg8000:
+
+pg8000
 --------------
 
 .. automodule:: sqlalchemy.dialects.postgresql.pg8000
 
-zxjdbc Notes
+.. _zxjdbc:
+
+zxjdbc
 --------------
 
 .. automodule:: sqlalchemy.dialects.postgresql.zxjdbc
index 59521b7e19ed2613460d32c9b674394ecedf58b3..01e62a42093cef9457860e3d89675980ee60dd73 100644 (file)
@@ -102,18 +102,8 @@ are documented here.  In contrast to the ORM's domain-centric mode of usage, the
 Dialect Documentation
 ======================
 
-The **dialect** is the system SQLAlchemy uses to communicate with various types of DBAPIs and databases.  This section describes notes, options, and
-usage patterns regarding individual dialects.
-
-:doc:`dialects/drizzle` |
-:doc:`dialects/firebird` |
-:doc:`dialects/informix` |
-:doc:`dialects/maxdb` |
-:doc:`dialects/mssql` |
-:doc:`dialects/mysql` |
-:doc:`dialects/oracle` |
-:doc:`dialects/postgresql` |
-:doc:`dialects/sqlite` |
-:doc:`dialects/sybase`
+The **dialect** is the system SQLAlchemy uses to communicate with various types of DBAPIs and databases.
+This section describes notes, options, and usage patterns regarding individual dialects.
 
+:doc:`Index of all Dialects <dialects/index>`
 
index be13e5a75413c23d05d1ab8ca93a520fa126fcfb..a5a0cba8d1d6e6a801f13ed249030bfb9c50a586 100644 (file)
@@ -226,13 +226,20 @@ a.headerlink:hover {
 #docs-body h1 {
   /* hide the <h1> for each content section. */
   display:none;
-  font-size:1.8em;
+  font-size:2.0em;
 }
 
 #docs-body h2 {
-  font-size:1.6em;
+  font-size:1.8em;
+  border-top:1px solid;
+  /*border-bottom:1px solid;*/
+  padding-top:20px;
 }
 
+#sqlalchemy-documentation h2 {
+  border-top:none;
+  padding-top:0;
+}
 #docs-body h3 {
   font-size:1.4em;
 }
index fa1788e78bc32bb6f748711478a298ea406d71f0..2d400aa60d0d6bec67c3f2c749f166d615c2729f 100644 (file)
@@ -8,7 +8,6 @@ __all__ = (
     'drizzle',
     'firebird',
 #    'informix',
-#    'maxdb',
     'mssql',
     'mysql',
     'oracle',
index 858253f1c0a2daf446996b025ed648d8d0b8ee28..d32240c38249773c64b469562cff5c77e801a819 100644 (file)
@@ -16,10 +16,14 @@ the `Drizzle Documentation <http://docs.drizzle.org/index.html>`_.
 The SQLAlchemy Drizzle dialect leans heavily on the MySQL dialect, so much of
 the :doc:`SQLAlchemy MySQL <mysql>` documentation is also relevant.
 
-Connecting
-----------
+DBAPI Support
+-------------
 
-See the individual driver sections below for details on connecting.
+The following dialect/driver options are available:
+
+``drizzle://``- uses mysqldb_
+
+``drizzle+mysqldb://`` - uses mysqldb_
 
 """
 
index 4a83f233256d9cfad74f430a98fd6329527f2f0c..b990400389889d88956d12e69a42c174b8470615 100644 (file)
@@ -7,10 +7,19 @@
 """
 Support for the Firebird database.
 
-Connectivity is usually supplied via the kinterbasdb_ DBAPI module.
+DBAPI Support
+-------------
 
-Dialects
-~~~~~~~~
+The following dialect/driver options are available:
+
+``firebird://``- uses kinterbasdb_
+
+``firebird+kinterbasdb://`` - uses kinterbasdb_
+
+``firebird+fdb://`` - uses fdb_
+
+Firebird Dialects
+-----------------
 
 Firebird offers two distinct dialects_ (not to be confused with a
 SQLAlchemy ``Dialect``):
@@ -27,7 +36,7 @@ support for dialect 1 is not well tested and probably has
 incompatibilities.
 
 Locking Behavior
-~~~~~~~~~~~~~~~~
+----------------
 
 Firebird locks tables aggressively.  For this reason, a DROP TABLE may
 hang until other transactions are released.  SQLAlchemy does its best
@@ -47,7 +56,7 @@ The above use case can be alleviated by calling ``first()`` on the
 all remaining cursor/connection resources.
 
 RETURNING support
-~~~~~~~~~~~~~~~~~
+-----------------
 
 Firebird 2.0 supports returning a result set from inserts, and 2.1
 extends that to deletes and updates. This is generically exposed by
index e6ca170f7739e460c73d238af845d5b22877aa24..3601b391b127adee74114f53f1563f12c588bbc0 100644 (file)
@@ -7,15 +7,12 @@
 """
 fdb is a kinterbasdb compatible DBAPI for Firebird.
 
-Usage is currently the same as that of kinterbasdb, with the exception
-of the connect string below.
-
 .. versionadded:: 0.8 - Support for the fdb Firebird driver.
 
-Status
-------
+DBAPI
+-----
 
-The fdb dialect is new and not yet tested (can't get fdb to build).
+http://pypi.python.org/pypi/fdb/
 
 Connecting
 -----------
@@ -24,6 +21,12 @@ Connect string format::
 
     firebird+fdb://user:password@host:port/path/to/db[?key=value&key=value...]
 
+Status
+------
+
+The fdb dialect is new and not yet tested (can't get fdb to build).
+
+
 """
 
 from .kinterbasdb import FBDialect_kinterbasdb
index 47160f5ea247927ae6182e569b83ff0c3ea50120..78fffd647156102039062e77a6bfd9dc12a873cb 100644 (file)
@@ -6,6 +6,11 @@
 
 """
 
+DBAPI
+-----
+
+http://firebirdsql.org/index.php?op=devel&sub=python
+
 Connecting
 -----------
 
diff --git a/lib/sqlalchemy/dialects/maxdb/__init__.py b/lib/sqlalchemy/dialects/maxdb/__init__.py
deleted file mode 100644 (file)
index c045f11..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-# maxdb/__init__.py
-# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-from sqlalchemy.dialects.maxdb import base, sapdb
-
-base.dialect = sapdb.dialect
\ No newline at end of file
diff --git a/lib/sqlalchemy/dialects/maxdb/base.py b/lib/sqlalchemy/dialects/maxdb/base.py
deleted file mode 100644 (file)
index 03f7ef2..0000000
+++ /dev/null
@@ -1,1120 +0,0 @@
-# maxdb/base.py
-# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""Support for the MaxDB database.
-
-.. note::
-
-    The MaxDB dialect is **non-functional as of SQLAlchemy 0.6**,
-    pending development efforts to bring it up-to-date.
-
-Overview
---------
-
-The ``maxdb`` dialect is **experimental** and has only been tested on 7.6.03.007
-and 7.6.00.037.  Of these, **only 7.6.03.007 will work** with SQLAlchemy's ORM.
-The earlier version has severe ``LEFT JOIN`` limitations and will return
-incorrect results from even very simple ORM queries.
-
-Only the native Python DB-API is currently supported.  ODBC driver support
-is a future enhancement.
-
-Connecting
-----------
-
-The username is case-sensitive.  If you usually connect to the
-database with sqlcli and other tools in lower case, you likely need to
-use upper case for DB-API.
-
-Implementation Notes
---------------------
-
-With the 7.6.00.37 driver and Python 2.5, it seems that all DB-API
-generated exceptions are broken and can cause Python to crash.
-
-For 'somecol.in_([])' to work, the IN operator's generation must be changed
-to cast 'NULL' to a numeric, i.e. NUM(NULL).  The DB-API doesn't accept a
-bind parameter there, so that particular generation must inline the NULL value,
-which depends on [ticket:807].
-
-The DB-API is very picky about where bind params may be used in queries.
-
-Bind params for some functions (e.g. MOD) need type information supplied.
-The dialect does not yet do this automatically.
-
-Max will occasionally throw up 'bad sql, compile again' exceptions for
-perfectly valid SQL.  The dialect does not currently handle these, more
-research is needed.
-
-MaxDB 7.5 and Sap DB <= 7.4 reportedly do not support schemas.  A very
-slightly different version of this dialect would be required to support
-those versions, and can easily be added if there is demand.  Some other
-required components such as an Max-aware 'old oracle style' join compiler
-(thetas with (+) outer indicators) are already done and available for
-integration- email the devel list if you're interested in working on
-this.
-
-Versions tested: 7.6.03.07 and 7.6.00.37, native Python DB-API
-
-* MaxDB has severe limitations on OUTER JOINs, which are essential to ORM
-  eager loading. And rather than raise an error if a SELECT can't be serviced,
-  the database simply returns incorrect results.
-* Version 7.6.03.07 seems to JOIN properly, however the docs do not show the
-  OUTER restrictions being lifted (as of this writing), and no changelog is
-  available to confirm either. If you are using a different server version and
-  your tasks require the ORM or any semi-advanced SQL through the SQL layer,
-  running the SQLAlchemy test suite against your database is HIGHLY
-  recommended before you begin.
-* Version 7.6.00.37 is LHS/RHS sensitive in `FROM lhs LEFT OUTER JOIN rhs ON
-  lhs.col=rhs.col` vs `rhs.col=lhs.col`!
-* Version 7.6.00.37 is confused by `SELECT DISTINCT col as alias FROM t ORDER
-  BY col` - these aliased, DISTINCT, ordered queries need to be re-written to
-  order by the alias name.
-* Version 7.6.x supports creating a SAVEPOINT but not its RELEASE.
-* MaxDB supports autoincrement-style columns (DEFAULT SERIAL) and independent
-  sequences. When including a DEFAULT SERIAL column in an insert, 0 needs to
-  be inserted rather than NULL to generate a value.
-* MaxDB supports ANSI and "old Oracle style" theta joins with (+) outer join
-  indicators.
-* The SQLAlchemy dialect is schema-aware and probably won't function correctly
-  on server versions (pre-7.6?). Support for schema-less server versions could
-  be added if there's call.
-* ORDER BY is not supported in subqueries. LIMIT is not supported in
-  subqueries. In 7.6.00.37, TOP does work in subqueries, but without limit not
-  so useful. OFFSET does not work in 7.6 despite being in the docs. Row number
-  tricks in WHERE via ROWNO may be possible but it only seems to allow
-  less-than comparison!
-* Version 7.6.03.07 can't LIMIT if a derived table is in FROM: `SELECT * FROM
-  (SELECT * FROM a) LIMIT 2`
-* MaxDB does not support sql's CAST and can only usefullly cast two types.
-  There isn't much implicit type conversion, so be precise when creating
-  `PassiveDefaults` in DDL generation: `'3'` and `3` aren't the same.
-
-sapdb.dbapi
-^^^^^^^^^^^
-
-* As of 2007-10-22 the Python 2.4 and 2.5 compatible versions of the DB-API
-  are no longer available. A forum posting at SAP states that the Python
-  driver will be available again "in the future". The last release from MySQL
-  AB works if you can find it.
-* sequence.NEXTVAL skips every other value!
-* No rowcount for executemany()
-* If an INSERT into a table with a DEFAULT SERIAL column inserts the results
-  of a function `INSERT INTO t VALUES (LENGTH('foo'))`, the cursor won't have
-  the serial id. It needs to be manually yanked from tablename.CURRVAL.
-* Super-duper picky about where bind params can be placed. Not smart about
-  converting Python types for some functions, such as `MOD(5, ?)`.
-* LONG (text, binary) values in result sets are read-once. The dialect uses a
-  caching RowProxy when these types are present.
-* Connection objects seem like they want to be either `close()`d or garbage
-  collected, but not both. There's a warning issued but it seems harmless.
-
-
-"""
-import datetime
-import itertools
-import re
-
-from ... import exc, schema, sql, util, processors
-from ...sql import expression as sql_expr
-from ...sql import compiler, visitors
-from ...engine import result as _result, default, reflection
-from ... import types as sqltypes
-
-
-class _StringType(sqltypes.String):
-    _type = None
-
-    def __init__(self, length=None, encoding=None, **kw):
-        super(_StringType, self).__init__(length=length, **kw)
-        self.encoding = encoding
-
-    def bind_processor(self, dialect):
-        if self.encoding == 'unicode':
-            return None
-        else:
-            def process(value):
-                if isinstance(value, unicode):
-                    return value.encode(dialect.encoding)
-                else:
-                    return value
-            return process
-
-    def result_processor(self, dialect, coltype):
-        #XXX: this code is probably very slow and one should try (if at all
-        # possible) to determine the correct code path on a per-connection
-        # basis (ie, here in result_processor, instead of inside the processor
-        # function itself) and probably also use a few generic
-        # processors, or possibly per query (though there is no mechanism
-        # for that yet).
-        def process(value):
-            while True:
-                if value is None:
-                    return None
-                elif isinstance(value, unicode):
-                    return value
-                elif isinstance(value, str):
-                    if self.convert_unicode or dialect.convert_unicode:
-                        return value.decode(dialect.encoding)
-                    else:
-                        return value
-                elif hasattr(value, 'read'):
-                    # some sort of LONG, snarf and retry
-                    value = value.read(value.remainingLength())
-                    continue
-                else:
-                    # unexpected type, return as-is
-                    return value
-        return process
-
-
-class MaxString(_StringType):
-    _type = 'VARCHAR'
-
-
-class MaxUnicode(_StringType):
-    _type = 'VARCHAR'
-
-    def __init__(self, length=None, **kw):
-        kw['encoding'] = 'unicode'
-        super(MaxUnicode, self).__init__(length=length, **kw)
-
-
-class MaxChar(_StringType):
-    _type = 'CHAR'
-
-
-class MaxText(_StringType):
-    _type = 'LONG'
-
-    def __init__(self, length=None, **kw):
-        super(MaxText, self).__init__(length, **kw)
-
-    def get_col_spec(self):
-        spec = 'LONG'
-        if self.encoding is not None:
-            spec = ' '.join((spec, self.encoding))
-        elif self.convert_unicode:
-            spec = ' '.join((spec, 'UNICODE'))
-
-        return spec
-
-
-class MaxNumeric(sqltypes.Numeric):
-    """The FIXED (also NUMERIC, DECIMAL) data type."""
-
-    def __init__(self, precision=None, scale=None, **kw):
-        kw.setdefault('asdecimal', True)
-        super(MaxNumeric, self).__init__(scale=scale, precision=precision,
-                                         **kw)
-
-    def bind_processor(self, dialect):
-        return None
-
-
-class MaxTimestamp(sqltypes.DateTime):
-    def bind_processor(self, dialect):
-        def process(value):
-            if value is None:
-                return None
-            elif isinstance(value, basestring):
-                return value
-            elif dialect.datetimeformat == 'internal':
-                ms = getattr(value, 'microsecond', 0)
-                return value.strftime("%Y%m%d%H%M%S" + ("%06u" % ms))
-            elif dialect.datetimeformat == 'iso':
-                ms = getattr(value, 'microsecond', 0)
-                return value.strftime("%Y-%m-%d %H:%M:%S." + ("%06u" % ms))
-            else:
-                raise exc.InvalidRequestError(
-                    "datetimeformat '%s' is not supported." % (
-                    dialect.datetimeformat,))
-        return process
-
-    def result_processor(self, dialect, coltype):
-        if dialect.datetimeformat == 'internal':
-            def process(value):
-                if value is None:
-                    return None
-                else:
-                    return datetime.datetime(
-                        *[int(v)
-                          for v in (value[0:4], value[4:6], value[6:8],
-                                    value[8:10], value[10:12], value[12:14],
-                                    value[14:])])
-        elif dialect.datetimeformat == 'iso':
-            def process(value):
-                if value is None:
-                    return None
-                else:
-                    return datetime.datetime(
-                        *[int(v)
-                          for v in (value[0:4], value[5:7], value[8:10],
-                                    value[11:13], value[14:16], value[17:19],
-                                    value[20:])])
-        else:
-            raise exc.InvalidRequestError(
-                "datetimeformat '%s' is not supported." %
-                dialect.datetimeformat)
-        return process
-
-
-class MaxDate(sqltypes.Date):
-    def bind_processor(self, dialect):
-        def process(value):
-            if value is None:
-                return None
-            elif isinstance(value, basestring):
-                return value
-            elif dialect.datetimeformat == 'internal':
-                return value.strftime("%Y%m%d")
-            elif dialect.datetimeformat == 'iso':
-                return value.strftime("%Y-%m-%d")
-            else:
-                raise exc.InvalidRequestError(
-                    "datetimeformat '%s' is not supported." % (
-                    dialect.datetimeformat,))
-        return process
-
-    def result_processor(self, dialect, coltype):
-        if dialect.datetimeformat == 'internal':
-            def process(value):
-                if value is None:
-                    return None
-                else:
-                    return datetime.date(int(value[0:4]), int(value[4:6]),
-                                         int(value[6:8]))
-        elif dialect.datetimeformat == 'iso':
-            def process(value):
-                if value is None:
-                    return None
-                else:
-                    return datetime.date(int(value[0:4]), int(value[5:7]),
-                                         int(value[8:10]))
-        else:
-            raise exc.InvalidRequestError(
-                "datetimeformat '%s' is not supported." %
-                dialect.datetimeformat)
-        return process
-
-
-class MaxTime(sqltypes.Time):
-    def bind_processor(self, dialect):
-        def process(value):
-            if value is None:
-                return None
-            elif isinstance(value, basestring):
-                return value
-            elif dialect.datetimeformat == 'internal':
-                return value.strftime("%H%M%S")
-            elif dialect.datetimeformat == 'iso':
-                return value.strftime("%H-%M-%S")
-            else:
-                raise exc.InvalidRequestError(
-                    "datetimeformat '%s' is not supported." % (
-                    dialect.datetimeformat,))
-        return process
-
-    def result_processor(self, dialect, coltype):
-        if dialect.datetimeformat == 'internal':
-            def process(value):
-                if value is None:
-                    return None
-                else:
-                    return datetime.time(int(value[0:4]), int(value[4:6]),
-                                         int(value[6:8]))
-        elif dialect.datetimeformat == 'iso':
-            def process(value):
-                if value is None:
-                    return None
-                else:
-                    return datetime.time(int(value[0:4]), int(value[5:7]),
-                                         int(value[8:10]))
-        else:
-            raise exc.InvalidRequestError(
-                "datetimeformat '%s' is not supported." %
-                dialect.datetimeformat)
-        return process
-
-
-class MaxBlob(sqltypes.LargeBinary):
-    def bind_processor(self, dialect):
-        return processors.to_str
-
-    def result_processor(self, dialect, coltype):
-        def process(value):
-            if value is None:
-                return None
-            else:
-                return value.read(value.remainingLength())
-        return process
-
-class MaxDBTypeCompiler(compiler.GenericTypeCompiler):
-    def _string_spec(self, string_spec, type_):
-        if type_.length is None:
-            spec = 'LONG'
-        else:
-            spec = '%s(%s)' % (string_spec, type_.length)
-
-        if getattr(type_, 'encoding'):
-            spec = ' '.join([spec, getattr(type_, 'encoding').upper()])
-        return spec
-
-    def visit_text(self, type_):
-        spec = 'LONG'
-        if getattr(type_, 'encoding', None):
-            spec = ' '.join((spec, type_.encoding))
-        elif type_.convert_unicode:
-            spec = ' '.join((spec, 'UNICODE'))
-
-        return spec
-
-    def visit_char(self, type_):
-        return self._string_spec("CHAR", type_)
-
-    def visit_string(self, type_):
-        return self._string_spec("VARCHAR", type_)
-
-    def visit_large_binary(self, type_):
-        return "LONG BYTE"
-
-    def visit_numeric(self, type_):
-        if type_.scale and type_.precision:
-            return 'FIXED(%s, %s)' % (type_.precision, type_.scale)
-        elif type_.precision:
-            return 'FIXED(%s)' % type_.precision
-        else:
-            return 'INTEGER'
-
-    def visit_BOOLEAN(self, type_):
-        return "BOOLEAN"
-
-colspecs = {
-    sqltypes.Numeric: MaxNumeric,
-    sqltypes.DateTime: MaxTimestamp,
-    sqltypes.Date: MaxDate,
-    sqltypes.Time: MaxTime,
-    sqltypes.String: MaxString,
-    sqltypes.Unicode:MaxUnicode,
-    sqltypes.LargeBinary: MaxBlob,
-    sqltypes.Text: MaxText,
-    sqltypes.CHAR: MaxChar,
-    sqltypes.TIMESTAMP: MaxTimestamp,
-    sqltypes.BLOB: MaxBlob,
-    sqltypes.Unicode: MaxUnicode,
-    }
-
-ischema_names = {
-    'boolean': sqltypes.BOOLEAN,
-    'char': sqltypes.CHAR,
-    'character': sqltypes.CHAR,
-    'date': sqltypes.DATE,
-    'fixed': sqltypes.Numeric,
-    'float': sqltypes.FLOAT,
-    'int': sqltypes.INT,
-    'integer': sqltypes.INT,
-    'long binary': sqltypes.BLOB,
-    'long unicode': sqltypes.Text,
-    'long': sqltypes.Text,
-    'long': sqltypes.Text,
-    'smallint': sqltypes.SmallInteger,
-    'time': sqltypes.Time,
-    'timestamp': sqltypes.TIMESTAMP,
-    'varchar': sqltypes.VARCHAR,
-    }
-
-# TODO: migrate this to sapdb.py
-class MaxDBExecutionContext(default.DefaultExecutionContext):
-    def post_exec(self):
-        # DB-API bug: if there were any functions as values,
-        # then do another select and pull CURRVAL from the
-        # autoincrement column's implicit sequence... ugh
-        if self.compiled.isinsert and not self.executemany:
-            table = self.compiled.statement.table
-            index, serial_col = _autoserial_column(table)
-
-            if serial_col and (not self.compiled._safeserial or
-                               not(self._last_inserted_ids) or
-                               self._last_inserted_ids[index] in (None, 0)):
-                if table.schema:
-                    sql = "SELECT %s.CURRVAL FROM DUAL" % (
-                        self.compiled.preparer.format_table(table))
-                else:
-                    sql = "SELECT CURRENT_SCHEMA.%s.CURRVAL FROM DUAL" % (
-                        self.compiled.preparer.format_table(table))
-
-                rs = self.cursor.execute(sql)
-                id = rs.fetchone()[0]
-
-                if not self._last_inserted_ids:
-                    # This shouldn't ever be > 1?  Right?
-                    self._last_inserted_ids = \
-                      [None] * len(table.primary_key.columns)
-                self._last_inserted_ids[index] = id
-
-        super(MaxDBExecutionContext, self).post_exec()
-
-    def get_result_proxy(self):
-        if self.cursor.description is not None:
-            for column in self.cursor.description:
-                if column[1] in ('Long Binary', 'Long', 'Long Unicode'):
-                    return MaxDBResultProxy(self)
-        return _result.ResultProxy(self)
-
-    @property
-    def rowcount(self):
-        if hasattr(self, '_rowcount'):
-            return self._rowcount
-        else:
-            return self.cursor.rowcount
-
-    def fire_sequence(self, seq):
-        if seq.optional:
-            return None
-        return self._execute_scalar("SELECT %s.NEXTVAL FROM DUAL" % (
-            self.dialect.identifier_preparer.format_sequence(seq)))
-
-class MaxDBCachedColumnRow(_result.RowProxy):
-    """A RowProxy that only runs result_processors once per column."""
-
-    def __init__(self, parent, row):
-        super(MaxDBCachedColumnRow, self).__init__(parent, row)
-        self.columns = {}
-        self._row = row
-        self._parent = parent
-
-    def _get_col(self, key):
-        if key not in self.columns:
-            self.columns[key] = self._parent._get_col(self._row, key)
-        return self.columns[key]
-
-    def __iter__(self):
-        for i in xrange(len(self._row)):
-            yield self._get_col(i)
-
-    def __repr__(self):
-        return repr(list(self))
-
-    def __eq__(self, other):
-        return ((other is self) or
-                (other == tuple([self._get_col(key)
-                                 for key in xrange(len(self._row))])))
-    def __getitem__(self, key):
-        if isinstance(key, slice):
-            indices = key.indices(len(self._row))
-            return tuple([self._get_col(i) for i in xrange(*indices)])
-        else:
-            return self._get_col(key)
-
-    def __getattr__(self, name):
-        try:
-            return self._get_col(name)
-        except KeyError:
-            raise AttributeError(name)
-
-
-class MaxDBResultProxy(_result.ResultProxy):
-    _process_row = MaxDBCachedColumnRow
-
-class MaxDBCompiler(compiler.SQLCompiler):
-
-    function_conversion = {
-        'CURRENT_DATE': 'DATE',
-        'CURRENT_TIME': 'TIME',
-        'CURRENT_TIMESTAMP': 'TIMESTAMP',
-        }
-
-    # These functions must be written without parens when called with no
-    # parameters.  e.g. 'SELECT DATE FROM DUAL' not 'SELECT DATE() FROM DUAL'
-    bare_functions = set([
-        'CURRENT_SCHEMA', 'DATE', 'FALSE', 'SYSDBA', 'TIME', 'TIMESTAMP',
-        'TIMEZONE', 'TRANSACTION', 'TRUE', 'USER', 'UID', 'USERGROUP',
-        'UTCDATE', 'UTCDIFF'])
-
-    def visit_mod_binary(self, binary, operator, **kw):
-        return "mod(%s, %s)" % \
-                    (self.process(binary.left, **kw),
-                            self.process(binary.right, **kw))
-
-    def default_from(self):
-        return ' FROM DUAL'
-
-    def for_update_clause(self, select):
-        clause = select.for_update
-        if clause is True:
-            return " WITH LOCK EXCLUSIVE"
-        elif clause is None:
-            return ""
-        elif clause == "read":
-            return " WITH LOCK"
-        elif clause == "ignore":
-            return " WITH LOCK (IGNORE) EXCLUSIVE"
-        elif clause == "nowait":
-            return " WITH LOCK (NOWAIT) EXCLUSIVE"
-        elif isinstance(clause, basestring):
-            return " WITH LOCK %s" % clause.upper()
-        elif not clause:
-            return ""
-        else:
-            return " WITH LOCK EXCLUSIVE"
-
-    def function_argspec(self, fn, **kw):
-        if fn.name.upper() in self.bare_functions:
-            return ""
-        elif len(fn.clauses) > 0:
-            return compiler.SQLCompiler.function_argspec(self, fn, **kw)
-        else:
-            return ""
-
-    def visit_function(self, fn, **kw):
-        transform = self.function_conversion.get(fn.name.upper(), None)
-        if transform:
-            fn = fn._clone()
-            fn.name = transform
-        return super(MaxDBCompiler, self).visit_function(fn, **kw)
-
-    def visit_cast(self, cast, **kwargs):
-        # MaxDB only supports casts * to NUMERIC, * to VARCHAR or
-        # date/time to VARCHAR.  Casts of LONGs will fail.
-        if isinstance(cast.type, (sqltypes.Integer, sqltypes.Numeric)):
-            return "NUM(%s)" % self.process(cast.clause)
-        elif isinstance(cast.type, sqltypes.String):
-            return "CHR(%s)" % self.process(cast.clause)
-        else:
-            return self.process(cast.clause)
-
-    def visit_sequence(self, sequence):
-        if sequence.optional:
-            return None
-        else:
-            return (
-                self.dialect.identifier_preparer.format_sequence(sequence) +
-                ".NEXTVAL")
-
-    class ColumnSnagger(visitors.ClauseVisitor):
-        def __init__(self):
-            self.count = 0
-            self.column = None
-        def visit_column(self, column):
-            self.column = column
-            self.count += 1
-
-    def _find_labeled_columns(self, columns, use_labels=False):
-        labels = {}
-        for column in columns:
-            if isinstance(column, basestring):
-                continue
-            snagger = self.ColumnSnagger()
-            snagger.traverse(column)
-            if snagger.count == 1:
-                if isinstance(column, sql_expr.Label):
-                    labels[unicode(snagger.column)] = column.name
-                elif use_labels:
-                    labels[unicode(snagger.column)] = column._label
-
-        return labels
-
-    def order_by_clause(self, select, **kw):
-        order_by = self.process(select._order_by_clause, **kw)
-
-        # ORDER BY clauses in DISTINCT queries must reference aliased
-        # inner columns by alias name, not true column name.
-        if order_by and getattr(select, '_distinct', False):
-            labels = self._find_labeled_columns(select.inner_columns,
-                                                select.use_labels)
-            if labels:
-                for needs_alias in labels.keys():
-                    r = re.compile(r'(^| )(%s)(,| |$)' %
-                                   re.escape(needs_alias))
-                    order_by = r.sub((r'\1%s\3' % labels[needs_alias]),
-                                     order_by)
-
-        # No ORDER BY in subqueries.
-        if order_by:
-            if self.is_subquery():
-                # It's safe to simply drop the ORDER BY if there is no
-                # LIMIT.  Right?  Other dialects seem to get away with
-                # dropping order.
-                if select._limit:
-                    raise exc.CompileError(
-                        "MaxDB does not support ORDER BY in subqueries")
-                else:
-                    return ""
-            return " ORDER BY " + order_by
-        else:
-            return ""
-
-    def get_select_precolumns(self, select):
-        # Convert a subquery's LIMIT to TOP
-        sql = select._distinct and 'DISTINCT ' or ''
-        if self.is_subquery() and select._limit:
-            if select._offset:
-                raise exc.InvalidRequestError(
-                    'MaxDB does not support LIMIT with an offset.')
-            sql += 'TOP %s ' % select._limit
-        return sql
-
-    def limit_clause(self, select):
-        # The docs say offsets are supported with LIMIT.  But they're not.
-        # TODO: maybe emulate by adding a ROWNO/ROWNUM predicate?
-        # TODO: does MaxDB support bind params for LIMIT / TOP ?
-        if self.is_subquery():
-            # sub queries need TOP
-            return ''
-        elif select._offset:
-            raise exc.InvalidRequestError(
-                'MaxDB does not support LIMIT with an offset.')
-        else:
-            return ' \n LIMIT %s' % (select._limit,)
-
-    def visit_insert(self, insert):
-        self.isinsert = True
-        self._safeserial = True
-
-        colparams = self._get_colparams(insert)
-        for value in (insert.parameters or {}).itervalues():
-            if isinstance(value, sql_expr.Function):
-                self._safeserial = False
-                break
-
-        return ''.join(('INSERT INTO ',
-                         self.preparer.format_table(insert.table),
-                         ' (',
-                         ', '.join([self.preparer.format_column(c[0])
-                                    for c in colparams]),
-                         ') VALUES (',
-                         ', '.join([c[1] for c in colparams]),
-                         ')'))
-
-
-class MaxDBIdentifierPreparer(compiler.IdentifierPreparer):
-    reserved_words = set([
-        'abs', 'absolute', 'acos', 'adddate', 'addtime', 'all', 'alpha',
-        'alter', 'any', 'ascii', 'asin', 'atan', 'atan2', 'avg', 'binary',
-        'bit', 'boolean', 'byte', 'case', 'ceil', 'ceiling', 'char',
-        'character', 'check', 'chr', 'column', 'concat', 'constraint', 'cos',
-        'cosh', 'cot', 'count', 'cross', 'curdate', 'current', 'curtime',
-        'database', 'date', 'datediff', 'day', 'dayname', 'dayofmonth',
-        'dayofweek', 'dayofyear', 'dec', 'decimal', 'decode', 'default',
-        'degrees', 'delete', 'digits', 'distinct', 'double', 'except',
-        'exists', 'exp', 'expand', 'first', 'fixed', 'float', 'floor', 'for',
-        'from', 'full', 'get_objectname', 'get_schema', 'graphic', 'greatest',
-        'group', 'having', 'hex', 'hextoraw', 'hour', 'ifnull', 'ignore',
-        'index', 'initcap', 'inner', 'insert', 'int', 'integer', 'internal',
-        'intersect', 'into', 'join', 'key', 'last', 'lcase', 'least', 'left',
-        'length', 'lfill', 'list', 'ln', 'locate', 'log', 'log10', 'long',
-        'longfile', 'lower', 'lpad', 'ltrim', 'makedate', 'maketime',
-        'mapchar', 'max', 'mbcs', 'microsecond', 'min', 'minute', 'mod',
-        'month', 'monthname', 'natural', 'nchar', 'next', 'no', 'noround',
-        'not', 'now', 'null', 'num', 'numeric', 'object', 'of', 'on',
-        'order', 'packed', 'pi', 'power', 'prev', 'primary', 'radians',
-        'real', 'reject', 'relative', 'replace', 'rfill', 'right', 'round',
-        'rowid', 'rowno', 'rpad', 'rtrim', 'second', 'select', 'selupd',
-        'serial', 'set', 'show', 'sign', 'sin', 'sinh', 'smallint', 'some',
-        'soundex', 'space', 'sqrt', 'stamp', 'statistics', 'stddev',
-        'subdate', 'substr', 'substring', 'subtime', 'sum', 'sysdba',
-        'table', 'tan', 'tanh', 'time', 'timediff', 'timestamp', 'timezone',
-        'to', 'toidentifier', 'transaction', 'translate', 'trim', 'trunc',
-        'truncate', 'ucase', 'uid', 'unicode', 'union', 'update', 'upper',
-        'user', 'usergroup', 'using', 'utcdate', 'utcdiff', 'value', 'values',
-        'varchar', 'vargraphic', 'variance', 'week', 'weekofyear', 'when',
-        'where', 'with', 'year', 'zoned' ])
-
-    def _normalize_name(self, name):
-        if name is None:
-            return None
-        if name.isupper():
-            lc_name = name.lower()
-            if not self._requires_quotes(lc_name):
-                return lc_name
-        return name
-
-    def _denormalize_name(self, name):
-        if name is None:
-            return None
-        elif (name.islower() and
-              not self._requires_quotes(name)):
-            return name.upper()
-        else:
-            return name
-
-    def _maybe_quote_identifier(self, name):
-        if self._requires_quotes(name):
-            return self.quote_identifier(name)
-        else:
-            return name
-
-
-class MaxDBDDLCompiler(compiler.DDLCompiler):
-    def get_column_specification(self, column, **kw):
-        colspec = [self.preparer.format_column(column),
-                   self.dialect.type_compiler.process(column.type)]
-
-        if not column.nullable:
-            colspec.append('NOT NULL')
-
-        default = column.default
-        default_str = self.get_column_default_string(column)
-
-        # No DDL default for columns specified with non-optional sequence-
-        # this defaulting behavior is entirely client-side. (And as a
-        # consequence, non-reflectable.)
-        if (default and isinstance(default, schema.Sequence) and
-            not default.optional):
-            pass
-        # Regular default
-        elif default_str is not None:
-            colspec.append('DEFAULT %s' % default_str)
-        # Assign DEFAULT SERIAL heuristically
-        elif column.primary_key and column.autoincrement:
-            # For SERIAL on a non-primary key member, use
-            # DefaultClause(text('SERIAL'))
-            try:
-                first = [c for c in column.table.primary_key.columns
-                         if (c.autoincrement and
-                             (isinstance(c.type, sqltypes.Integer) or
-                              (isinstance(c.type, MaxNumeric) and
-                               c.type.precision)) and
-                             not c.foreign_keys)].pop(0)
-                if column is first:
-                    colspec.append('DEFAULT SERIAL')
-            except IndexError:
-                pass
-        return ' '.join(colspec)
-
-    def get_column_default_string(self, column):
-        if isinstance(column.server_default, schema.DefaultClause):
-            if isinstance(column.default.arg, basestring):
-                if isinstance(column.type, sqltypes.Integer):
-                    return str(column.default.arg)
-                else:
-                    return "'%s'" % column.default.arg
-            else:
-                return unicode(self._compile(column.default.arg, None))
-        else:
-            return None
-
-    def visit_create_sequence(self, create):
-        """Creates a SEQUENCE.
-
-        TODO: move to module doc?
-
-        start
-          With an integer value, set the START WITH option.
-
-        increment
-          An integer value to increment by.  Default is the database default.
-
-        maxdb_minvalue
-        maxdb_maxvalue
-          With an integer value, sets the corresponding sequence option.
-
-        maxdb_no_minvalue
-        maxdb_no_maxvalue
-          Defaults to False.  If true, sets the corresponding sequence option.
-
-        maxdb_cycle
-          Defaults to False.  If true, sets the CYCLE option.
-
-        maxdb_cache
-          With an integer value, sets the CACHE option.
-
-        maxdb_no_cache
-          Defaults to False.  If true, sets NOCACHE.
-        """
-        sequence = create.element
-
-        if (not sequence.optional and
-            (not self.checkfirst or
-             not self.dialect.has_sequence(self.connection, sequence.name))):
-
-            ddl = ['CREATE SEQUENCE',
-                   self.preparer.format_sequence(sequence)]
-
-            sequence.increment = 1
-
-            if sequence.increment is not None:
-                ddl.extend(('INCREMENT BY', str(sequence.increment)))
-
-            if sequence.start is not None:
-                ddl.extend(('START WITH', str(sequence.start)))
-
-            opts = dict([(pair[0][6:].lower(), pair[1])
-                         for pair in sequence.kwargs.items()
-                         if pair[0].startswith('maxdb_')])
-
-            if 'maxvalue' in opts:
-                ddl.extend(('MAXVALUE', str(opts['maxvalue'])))
-            elif opts.get('no_maxvalue', False):
-                ddl.append('NOMAXVALUE')
-            if 'minvalue' in opts:
-                ddl.extend(('MINVALUE', str(opts['minvalue'])))
-            elif opts.get('no_minvalue', False):
-                ddl.append('NOMINVALUE')
-
-            if opts.get('cycle', False):
-                ddl.append('CYCLE')
-
-            if 'cache' in opts:
-                ddl.extend(('CACHE', str(opts['cache'])))
-            elif opts.get('no_cache', False):
-                ddl.append('NOCACHE')
-
-            return ' '.join(ddl)
-
-
-class MaxDBDialect(default.DefaultDialect):
-    name = 'maxdb'
-    supports_alter = True
-    supports_unicode_statements = True
-    max_identifier_length = 32
-    supports_sane_rowcount = True
-    supports_sane_multi_rowcount = False
-
-    preparer = MaxDBIdentifierPreparer
-    statement_compiler = MaxDBCompiler
-    ddl_compiler = MaxDBDDLCompiler
-    execution_ctx_cls = MaxDBExecutionContext
-
-    ported_sqla_06 = False
-
-    colspecs = colspecs
-    ischema_names = ischema_names
-
-    # MaxDB-specific
-    datetimeformat = 'internal'
-
-    def __init__(self, _raise_known_sql_errors=False, **kw):
-        super(MaxDBDialect, self).__init__(**kw)
-        self._raise_known = _raise_known_sql_errors
-
-        if self.dbapi is None:
-            self.dbapi_type_map = {}
-        else:
-            self.dbapi_type_map = {
-                'Long Binary': MaxBlob(),
-                'Long byte_t': MaxBlob(),
-                'Long Unicode': MaxText(),
-                'Timestamp': MaxTimestamp(),
-                'Date': MaxDate(),
-                'Time': MaxTime(),
-                datetime.datetime: MaxTimestamp(),
-                datetime.date: MaxDate(),
-                datetime.time: MaxTime(),
-            }
-
-    def do_execute(self, cursor, statement, parameters, context=None):
-        res = cursor.execute(statement, parameters)
-        if isinstance(res, int) and context is not None:
-            context._rowcount = res
-
-    def do_release_savepoint(self, connection, name):
-        # Does MaxDB truly support RELEASE SAVEPOINT <id>?  All my attempts
-        # produce "SUBTRANS COMMIT/ROLLBACK not allowed without SUBTRANS
-        # BEGIN SQLSTATE: I7065"
-        # Note that ROLLBACK TO works fine.  In theory, a RELEASE should
-        # just free up some transactional resources early, before the overall
-        # COMMIT/ROLLBACK so omitting it should be relatively ok.
-        pass
-
-    def _get_default_schema_name(self, connection):
-        return self.identifier_preparer._normalize_name(
-                connection.execute(
-                        'SELECT CURRENT_SCHEMA FROM DUAL').scalar())
-
-    def has_table(self, connection, table_name, schema=None):
-        denormalize = self.identifier_preparer._denormalize_name
-        bind = [denormalize(table_name)]
-        if schema is None:
-            sql = ("SELECT tablename FROM TABLES "
-                   "WHERE TABLES.TABLENAME=? AND"
-                   "  TABLES.SCHEMANAME=CURRENT_SCHEMA ")
-        else:
-            sql = ("SELECT tablename FROM TABLES "
-                   "WHERE TABLES.TABLENAME = ? AND"
-                   "  TABLES.SCHEMANAME=? ")
-            bind.append(denormalize(schema))
-
-        rp = connection.execute(sql, bind)
-        return bool(rp.first())
-
-    @reflection.cache
-    def get_table_names(self, connection, schema=None, **kw):
-        if schema is None:
-            sql = (" SELECT TABLENAME FROM TABLES WHERE "
-                   " SCHEMANAME=CURRENT_SCHEMA ")
-            rs = connection.execute(sql)
-        else:
-            sql = (" SELECT TABLENAME FROM TABLES WHERE "
-                   " SCHEMANAME=? ")
-            matchname = self.identifier_preparer._denormalize_name(schema)
-            rs = connection.execute(sql, matchname)
-        normalize = self.identifier_preparer._normalize_name
-        return [normalize(row[0]) for row in rs]
-
-    def reflecttable(self, connection, table, include_columns):
-        denormalize = self.identifier_preparer._denormalize_name
-        normalize = self.identifier_preparer._normalize_name
-
-        st = ('SELECT COLUMNNAME, MODE, DATATYPE, CODETYPE, LEN, DEC, '
-              '  NULLABLE, "DEFAULT", DEFAULTFUNCTION '
-              'FROM COLUMNS '
-              'WHERE TABLENAME=? AND SCHEMANAME=%s '
-              'ORDER BY POS')
-
-        fk = ('SELECT COLUMNNAME, FKEYNAME, '
-              '  REFSCHEMANAME, REFTABLENAME, REFCOLUMNNAME, RULE, '
-              '  (CASE WHEN REFSCHEMANAME = CURRENT_SCHEMA '
-              '   THEN 1 ELSE 0 END) AS in_schema '
-              'FROM FOREIGNKEYCOLUMNS '
-              'WHERE TABLENAME=? AND SCHEMANAME=%s '
-              'ORDER BY FKEYNAME ')
-
-        params = [denormalize(table.name)]
-        if not table.schema:
-            st = st % 'CURRENT_SCHEMA'
-            fk = fk % 'CURRENT_SCHEMA'
-        else:
-            st = st % '?'
-            fk = fk % '?'
-            params.append(denormalize(table.schema))
-
-        rows = connection.execute(st, params).fetchall()
-        if not rows:
-            raise exc.NoSuchTableError(table.fullname)
-
-        include_columns = set(include_columns or [])
-
-        for row in rows:
-            (name, mode, col_type, encoding, length, scale,
-             nullable, constant_def, func_def) = row
-
-            name = normalize(name)
-
-            if include_columns and name not in include_columns:
-                continue
-
-            type_args, type_kw = [], {}
-            if col_type == 'FIXED':
-                type_args = length, scale
-                # Convert FIXED(10) DEFAULT SERIAL to our Integer
-                if (scale == 0 and
-                    func_def is not None and func_def.startswith('SERIAL')):
-                    col_type = 'INTEGER'
-                    type_args = length,
-            elif col_type in 'FLOAT':
-                type_args = length,
-            elif col_type in ('CHAR', 'VARCHAR'):
-                type_args = length,
-                type_kw['encoding'] = encoding
-            elif col_type == 'LONG':
-                type_kw['encoding'] = encoding
-
-            try:
-                type_cls = ischema_names[col_type.lower()]
-                type_instance = type_cls(*type_args, **type_kw)
-            except KeyError:
-                util.warn("Did not recognize type '%s' of column '%s'" %
-                          (col_type, name))
-                type_instance = sqltypes.NullType
-
-            col_kw = {'autoincrement': False}
-            col_kw['nullable'] = (nullable == 'YES')
-            col_kw['primary_key'] = (mode == 'KEY')
-
-            if func_def is not None:
-                if func_def.startswith('SERIAL'):
-                    if col_kw['primary_key']:
-                        # No special default- let the standard autoincrement
-                        # support handle SERIAL pk columns.
-                        col_kw['autoincrement'] = True
-                    else:
-                        # strip current numbering
-                        col_kw['server_default'] = schema.DefaultClause(
-                            sql.text('SERIAL'))
-                        col_kw['autoincrement'] = True
-                else:
-                    col_kw['server_default'] = schema.DefaultClause(
-                        sql.text(func_def))
-            elif constant_def is not None:
-                col_kw['server_default'] = schema.DefaultClause(sql.text(
-                    "'%s'" % constant_def.replace("'", "''")))
-
-            table.append_column(schema.Column(name, type_instance, **col_kw))
-
-        fk_sets = itertools.groupby(connection.execute(fk, params),
-                                    lambda row: row.FKEYNAME)
-        for fkeyname, fkey in fk_sets:
-            fkey = list(fkey)
-            if include_columns:
-                key_cols = set([r.COLUMNNAME for r in fkey])
-                if key_cols != include_columns:
-                    continue
-
-            columns, referants = [], []
-            quote = self.identifier_preparer._maybe_quote_identifier
-
-            for row in fkey:
-                columns.append(normalize(row.COLUMNNAME))
-                if table.schema or not row.in_schema:
-                    referants.append('.'.join(
-                        [quote(normalize(row[c]))
-                         for c in ('REFSCHEMANAME', 'REFTABLENAME',
-                                   'REFCOLUMNNAME')]))
-                else:
-                    referants.append('.'.join(
-                        [quote(normalize(row[c]))
-                         for c in ('REFTABLENAME', 'REFCOLUMNNAME')]))
-
-            constraint_kw = {'name': fkeyname.lower()}
-            if fkey[0].RULE is not None:
-                rule = fkey[0].RULE
-                if rule.startswith('DELETE '):
-                    rule = rule[7:]
-                constraint_kw['ondelete'] = rule
-
-            table_kw = {}
-            if table.schema or not row.in_schema:
-                table_kw['schema'] = normalize(fkey[0].REFSCHEMANAME)
-
-            ref_key = schema._get_table_key(normalize(fkey[0].REFTABLENAME),
-                                            table_kw.get('schema'))
-            if ref_key not in table.metadata.tables:
-                schema.Table(normalize(fkey[0].REFTABLENAME),
-                             table.metadata,
-                             autoload=True, autoload_with=connection,
-                             **table_kw)
-
-            constraint = schema.ForeignKeyConstraint(
-                            columns, referants, link_to_name=True,
-                            **constraint_kw)
-            table.append_constraint(constraint)
-
-    def has_sequence(self, connection, name):
-        # [ticket:726] makes this schema-aware.
-        denormalize = self.identifier_preparer._denormalize_name
-        sql = ("SELECT sequence_name FROM SEQUENCES "
-               "WHERE SEQUENCE_NAME=? ")
-
-        rp = connection.execute(sql, denormalize(name))
-        return bool(rp.first())
-
-
-def _autoserial_column(table):
-    """Finds the effective DEFAULT SERIAL column of a Table, if any."""
-
-    for index, col in enumerate(table.primary_key.columns):
-        if (isinstance(col.type, (sqltypes.Integer, sqltypes.Numeric)) and
-            col.autoincrement):
-            if isinstance(col.default, schema.Sequence):
-                if col.default.optional:
-                    return index, col
-            elif (col.default is None or
-                  (not isinstance(col.server_default, schema.DefaultClause))):
-                return index, col
-
-    return None, None
-
diff --git a/lib/sqlalchemy/dialects/maxdb/sapdb.py b/lib/sqlalchemy/dialects/maxdb/sapdb.py
deleted file mode 100644 (file)
index 280411b..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-# maxdb/sapdb.py
-# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-from sqlalchemy.dialects.maxdb.base import MaxDBDialect
-
-class MaxDBDialect_sapdb(MaxDBDialect):
-    driver = 'sapdb'
-
-    @classmethod
-    def dbapi(cls):
-        from sapdb import dbapi as _dbapi
-        return _dbapi
-
-    def create_connect_args(self, url):
-        opts = url.translate_connect_args(username='user')
-        opts.update(url.query)
-        return [], opts
-
-
-dialect = MaxDBDialect_sapdb
\ No newline at end of file
index 6dd25350fac07a813021ce2bb4d21a234455d99e..e8f0385b4abe082cd1da93450206fb826080a8ee 100644 (file)
@@ -6,10 +6,6 @@
 
 """Support for the Microsoft SQL Server database.
 
-Connecting
-----------
-
-See the individual driver sections below for details on connecting.
 
 Auto Increment Behavior
 -----------------------
index acb5671ca03714b7ce95c7efb8b1ac59be1fd33b..7f67d78261cf26945ac2f4a2584e5c6561cf3348 100644 (file)
@@ -6,6 +6,26 @@
 
 """Support for the MySQL database.
 
+DBAPI Support
+-------------
+
+The following dialect/driver options are available:
+
+* :ref:`mysqldb`
+
+* :ref:`mysqlconnector`
+
+* :ref:`oursql`
+
+* :ref:`gaerdbms`
+
+* :ref:`pymysql`
+
+* :ref:`mysql_pyodbc`
+
+* :ref:`mysql_zxjdbc`
+
+
 Supported Versions and Features
 -------------------------------
 
index a56d4b7917686ce7cf69f36c86abddbf3144d6c3..29a67a540be78624c0e8de034d56be62bbd2d266 100644 (file)
@@ -10,6 +10,11 @@ changes.
 
 .. versionadded:: 0.7.8
 
+DBAPI
+-----
+
+    https://developers.google.com/appengine/docs/python/cloud-sql/developers-guide
+
 Connecting
 ----------
 
index 104d5330a2037b81fa5229a5f3a66e77adbf4b96..ea8d9322c08a764b87ce25d7d2d7c1719640ec92 100644 (file)
@@ -6,6 +6,9 @@
 
 """Support for the MySQL database via the MySQL Connector/Python adapter.
 
+DBAPI
+-----
+
 MySQL Connector/Python is available at:
 
     https://launchpad.net/myconnpy
index 21b87a0187216651d1f223dd69229bad5dfa9a9d..add0c00ee8f3435183ef0bc6f9ae1f4b37f4e724 100644 (file)
@@ -6,12 +6,13 @@
 
 """Support for the MySQL database via the MySQL-python adapter.
 
+DBAPI
+-----
+
 MySQL-Python is available at:
 
     http://sourceforge.net/projects/mysql-python
 
-At least version 1.2.1 or 1.2.2 should be used.
-
 Connecting
 -----------
 
index 1be3f3b2a94594d565526ebeb0b6bd305b067ec9..ca713a068fdf3d3b8efdacc75fbbed78d6a4de0f 100644 (file)
@@ -6,6 +6,9 @@
 
 """Support for the MySQL database via the oursql adapter.
 
+DBAPI
+-----
+
 OurSQL is available at:
 
     http://packages.python.org/oursql/
index 71548880c6433aad8380b1ce9aa6fe3b6ad93abd..b6f2d4384f19871d92d2ce664bf32447919efb54 100644 (file)
@@ -6,6 +6,9 @@
 
 """Support for the MySQL database via the pymysql adapter.
 
+DBAPI
+-----
+
 pymysql is available at:
 
     http://code.google.com/p/pymysql/
index 03906f8c9b2f8d1938a8eb94f6c90bea4ed2a25e..20cc53be7f1cf4e7c5d63eb7123b504330e8e5b4 100644 (file)
@@ -6,6 +6,9 @@
 
 """Support for the MySQL database via the pyodbc adapter.
 
+DBAPI
+-----
+
 pyodbc is available at:
 
     http://pypi.python.org/pypi/pyodbc/
index d25444cd0fbb2adbd945a64f4aa7a225e787191a..82e7dca8d013f4ed8e490dc7149c2d8a9329bf53 100644 (file)
@@ -6,8 +6,8 @@
 
 """Support for the MySQL database via Jython's zxjdbc JDBC connector.
 
-JDBC Driver
------------
+DBAPI
+-----
 
 The official MySQL JDBC driver is at
 http://dev.mysql.com/downloads/connector/j/.
@@ -15,7 +15,7 @@ http://dev.mysql.com/downloads/connector/j/.
 Connecting
 ----------
 
-Connect string format:
+Connect string::
 
     mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/<database>
 
index c7cc6a7f932c93241e9186346e612c62ffe5f4b6..ee4a82115c7d32709c6c882f6085dcceb0f26af6 100644 (file)
@@ -8,9 +8,6 @@
 
 Oracle version 8 through current (11g at the time of this writing) are supported.
 
-For information on connecting via specific drivers, see the documentation
-for that driver.
-
 Connect Arguments
 -----------------
 
index d23920c5e0bd9ac8f76563b7324b3d614a2cd14f..0d2cb3c8ff68e845a4286eb9874cbd24df3d8541 100644 (file)
@@ -6,8 +6,18 @@
 
 """Support for the PostgreSQL database.
 
-For information on connecting using specific drivers, see the documentation
-section regarding that driver.
+DBAPI Support
+-------------
+
+The following dialect/driver options are available:
+
+* :ref:`psycopg2`
+
+* :ref:`pg8000`
+
+* :ref:`pypostgresql`
+
+* :ref:`zxjdbc`
 
 Sequences/SERIAL
 ----------------
index d7f74bb986d327c255e04322e8e5bea9f7e2ef3e..e19d84b51ade8e4bb89f40c12aa8271aaa909d6a 100644 (file)
@@ -6,11 +6,17 @@
 
 """Support for the PostgreSQL database via the pg8000 driver.
 
+DBAPI
+------
+
+    http://pybrary.net/pg8000/
+
 Connecting
 ----------
 
-URLs are of the form
-``postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]``.
+Connect string format::
+
+    postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
 
 Unicode
 -------
index c5eb9b445b8060cf902030b0f39ffedf2efdf48f..14fb35456873906ad7f67303cb570276a7b02aea 100644 (file)
@@ -6,7 +6,7 @@
 
 """Support for the PostgreSQL database via the psycopg2 driver.
 
-Driver
+DBAPI
 ------
 
 The psycopg2 driver is available at http://pypi.python.org/pypi/psycopg2/ .
@@ -18,8 +18,12 @@ Note that psycopg1 is **not** supported.
 Connecting
 ----------
 
-URLs are of the form
-``postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]``.
+Connect string format::
+
+    postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
+
+psycopg2 Connect Arguments
+-----------------------------------
 
 psycopg2-specific keyword arguments which are accepted by
 :func:`.create_engine()` are:
index a1ba9a3bd5874d22a76d0257480131ef6de3ab05..cf091b3115648fc38735fbbc429d3236b99e6b42 100644 (file)
@@ -6,10 +6,17 @@
 
 """Support for the PostgreSQL database via py-postgresql.
 
+DBAPI
+-----
+
+    http://python.projects.pgfoundry.org/
+
 Connecting
 ----------
 
-URLs are of the form ``postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]``.
+Connect string format::
+
+    postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]
 
 
 """
index 8382da831bf095dff998c99830757fb62f302d50..381424c7f04a0e9c759f0233e53152ca1e2f4896 100644 (file)
@@ -6,11 +6,19 @@
 
 """Support for the PostgreSQL database via the zxjdbc JDBC connector.
 
-JDBC Driver
+DBAPI
 -----------
 
 The official Postgresql JDBC driver is at http://jdbc.postgresql.org/.
 
+Connecting
+----------
+
+Connect string format::
+
+    postgresql+zxjdbc://scott:tiger@localhost/db
+
+
 """
 from ...connectors.zxJDBC import ZxJDBCConnector
 from .base import PGDialect, PGExecutionContext
index eb85cc256068a7e925e1687b8e44eb7362b8b10a..eb03a55c40b4f79d93463d24d15a0cd8988e62ac 100644 (file)
@@ -6,9 +6,6 @@
 
 """Support for the SQLite database.
 
-For information on connecting using a specific driver, see the documentation
-section regarding that driver.
-
 Date and Time Types
 -------------------
 
index 70d3bc0bb12178db61bccc62a448be4c367f18ec..6103574dc622d7798dcd00e29248dd8ac46d1d9a 100644 (file)
@@ -1812,6 +1812,8 @@ class Session(_SessionClassMethods):
         else:
             proc = new.union(dirty).difference(deleted)
 
+        import pdb
+        pdb.set_trace()
         for state in proc:
             is_orphan = _state_mapper(state)._is_orphan(state) and state.has_identity
             flush_context.register_object(state, isdelete=is_orphan)
index 1cba58321cdc3650db31f7b802b2f2ffe682c674..42332b77f6afa95edd533477a3adfa2dd72b8a65 100644 (file)
@@ -226,6 +226,9 @@ class UOWTransaction(object):
     def register_object(self, state, isdelete=False,
                             listonly=False, cancel_delete=False,
                             operation=None, prop=None):
+        if isdelete:
+            import pdb
+            pdb.set_trace()
         if not self.session._contains_state(state):
             if not state.deleted and operation is not None:
                 util.warn("Object of type %s not in session, %s operation "
diff --git a/test/dialect/test_maxdb.py b/test/dialect/test_maxdb.py
deleted file mode 100644 (file)
index 237d6c9..0000000
+++ /dev/null
@@ -1,239 +0,0 @@
-"""MaxDB-specific tests."""
-
-from sqlalchemy.testing import eq_
-import StringIO, sys
-from sqlalchemy import *
-from sqlalchemy import exc, sql
-from sqlalchemy.util.compat import decimal
-from sqlalchemy.databases import maxdb
-from sqlalchemy.testing import *
-
-
-# TODO
-# - add "Database" test, a quick check for join behavior on different
-# max versions
-# - full max-specific reflection suite
-# - datetime tests
-# - the orm/query 'test_has' destabilizes the server- cover here
-
-class ReflectionTest(fixtures.TestBase, AssertsExecutionResults):
-    """Extra reflection tests."""
-
-    __only_on__ = 'maxdb'
-
-    def _test_decimal(self, tabledef):
-        """Checks a variety of FIXED usages.
-
-        This is primarily for SERIAL columns, which can be FIXED (scale-less)
-        or (SMALL)INT.  Ensures that FIXED id columns are converted to
-        integers and that are assignable as such.  Also exercises general
-        decimal assignment and selection behavior.
-        """
-
-        meta = MetaData(testing.db)
-        try:
-            if isinstance(tabledef, basestring):
-                # run textual CREATE TABLE
-                testing.db.execute(tabledef)
-            else:
-                _t = tabledef.tometadata(meta)
-                _t.create()
-            t = Table('dectest', meta, autoload=True)
-
-            vals = [decimal.Decimal('2.2'), decimal.Decimal('23'), decimal.Decimal('2.4'), 25]
-            cols = ['d1','d2','n1','i1']
-            t.insert().execute(dict(zip(cols,vals)))
-            roundtrip = list(t.select().execute())
-            eq_(roundtrip, [tuple([1] + vals)])
-
-            t.insert().execute(dict(zip(['id'] + cols,
-                                        [2] + list(roundtrip[0][1:]))))
-            roundtrip2 = list(t.select(order_by=t.c.id).execute())
-            eq_(roundtrip2, [tuple([1] + vals),
-                                           tuple([2] + vals)])
-        finally:
-            try:
-                testing.db.execute("DROP TABLE dectest")
-            except exc.DatabaseError:
-                pass
-
-    def test_decimal_fixed_serial(self):
-        tabledef = """
-        CREATE TABLE dectest (
-          id FIXED(10) DEFAULT SERIAL PRIMARY KEY,
-          d1 FIXED(10,2),
-          d2 FIXED(12),
-          n1 NUMERIC(12,2),
-          i1 INTEGER)
-          """
-        return self._test_decimal(tabledef)
-
-    def test_decimal_integer_serial(self):
-        tabledef = """
-        CREATE TABLE dectest (
-          id INTEGER DEFAULT SERIAL PRIMARY KEY,
-          d1 DECIMAL(10,2),
-          d2 DECIMAL(12),
-          n1 NUMERIC(12,2),
-          i1 INTEGER)
-          """
-        return self._test_decimal(tabledef)
-
-    def test_decimal_implicit_serial(self):
-        tabledef = """
-        CREATE TABLE dectest (
-          id SERIAL PRIMARY KEY,
-          d1 FIXED(10,2),
-          d2 FIXED(12),
-          n1 NUMERIC(12,2),
-          i1 INTEGER)
-          """
-        return self._test_decimal(tabledef)
-
-    def test_decimal_smallint_serial(self):
-        tabledef = """
-        CREATE TABLE dectest (
-          id SMALLINT DEFAULT SERIAL PRIMARY KEY,
-          d1 FIXED(10,2),
-          d2 FIXED(12),
-          n1 NUMERIC(12,2),
-          i1 INTEGER)
-          """
-        return self._test_decimal(tabledef)
-
-    def test_decimal_sa_types_1(self):
-        tabledef = Table('dectest', MetaData(),
-                         Column('id', Integer, primary_key=True),
-                         Column('d1', DECIMAL(10, 2)),
-                         Column('d2', DECIMAL(12)),
-                         Column('n1', NUMERIC(12,2)),
-                         Column('i1', Integer))
-        return self._test_decimal(tabledef)
-
-    def test_decimal_sa_types_2(self):
-        tabledef = Table('dectest', MetaData(),
-                         Column('id', Integer, primary_key=True),
-                         Column('d1', maxdb.MaxNumeric(10, 2)),
-                         Column('d2', maxdb.MaxNumeric(12)),
-                         Column('n1', maxdb.MaxNumeric(12,2)),
-                         Column('i1', Integer))
-        return self._test_decimal(tabledef)
-
-    def test_decimal_sa_types_3(self):
-        tabledef = Table('dectest', MetaData(),
-                         Column('id', Integer, primary_key=True),
-                         Column('d1', maxdb.MaxNumeric(10, 2)),
-                         Column('d2', maxdb.MaxNumeric),
-                         Column('n1', maxdb.MaxNumeric(12,2)),
-                         Column('i1', Integer))
-        return self._test_decimal(tabledef)
-
-    def test_assorted_type_aliases(self):
-        """Ensures that aliased types are reflected properly."""
-
-        meta = MetaData(testing.db)
-        try:
-            testing.db.execute("""
-            CREATE TABLE assorted (
-              c1 INT,
-              c2 BINARY(2),
-              c3 DEC(4,2),
-              c4 DEC(4),
-              c5 DEC,
-              c6 DOUBLE PRECISION,
-              c7 NUMERIC(4,2),
-              c8 NUMERIC(4),
-              c9 NUMERIC,
-              c10 REAL(4),
-              c11 REAL,
-              c12 CHARACTER(2))
-              """)
-            table = Table('assorted', meta, autoload=True)
-            expected = [maxdb.MaxInteger,
-                        maxdb.MaxNumeric,
-                        maxdb.MaxNumeric,
-                        maxdb.MaxNumeric,
-                        maxdb.MaxNumeric,
-                        maxdb.MaxFloat,
-                        maxdb.MaxNumeric,
-                        maxdb.MaxNumeric,
-                        maxdb.MaxNumeric,
-                        maxdb.MaxFloat,
-                        maxdb.MaxFloat,
-                        maxdb.MaxChar,]
-            for i, col in enumerate(table.columns):
-                self.assert_(isinstance(col.type, expected[i]))
-        finally:
-            try:
-                testing.db.execute("DROP TABLE assorted")
-            except exc.DatabaseError:
-                pass
-
-class DBAPITest(fixtures.TestBase, AssertsExecutionResults):
-    """Asserts quirks in the native Python DB-API driver.
-
-    If any of these fail, that's good- the bug is fixed!
-    """
-
-    __only_on__ = 'maxdb'
-
-    def test_dbapi_breaks_sequences(self):
-        con = testing.db.connect().connection
-
-        cr = con.cursor()
-        cr.execute('CREATE SEQUENCE busto START WITH 1 INCREMENT BY 1')
-        try:
-            vals = []
-            for i in xrange(3):
-                cr.execute('SELECT busto.NEXTVAL FROM DUAL')
-                vals.append(cr.first()[0])
-
-            # should be 1,2,3, but no...
-            self.assert_(vals != [1,2,3])
-            # ...we get:
-            self.assert_(vals == [2,4,6])
-        finally:
-            cr.execute('DROP SEQUENCE busto')
-
-    def test_dbapi_breaks_mod_binds(self):
-        con = testing.db.connect().connection
-
-        cr = con.cursor()
-        # OK
-        cr.execute('SELECT MOD(3, 2) FROM DUAL')
-
-        # Broken!
-        try:
-            cr.execute('SELECT MOD(3, ?) FROM DUAL', [2])
-            self.assert_(False)
-        except:
-            self.assert_(True)
-
-        # OK
-        cr.execute('SELECT MOD(?, 2) FROM DUAL', [3])
-
-    def test_dbapi_breaks_close(self):
-        dialect = testing.db.dialect
-        cargs, ckw = dialect.create_connect_args(testing.db.url)
-
-        # There doesn't seem to be a way to test for this as it occurs in
-        # regular usage- the warning doesn't seem to go through 'warnings'.
-        con = dialect.dbapi.connect(*cargs, **ckw)
-        con.close()
-        del con  # <-- exception during __del__
-
-        # But this does the same thing.
-        con = dialect.dbapi.connect(*cargs, **ckw)
-        self.assert_(con.close == con.__del__)
-        con.close()
-        try:
-            con.close()
-            self.assert_(False)
-        except dialect.dbapi.DatabaseError:
-            self.assert_(True)
-
-    def test_modulo_operator(self):
-        st = str(select([sql.column('col') % 5]).compile(testing.db))
-        eq_(st, 'SELECT mod(col, ?) FROM DUAL')
-
-