Firebird backend
================
-This module implements the Firebird backend, thru the kinterbasdb_
+This module implements the Firebird backend.
+
+Connectivity is usually supplied via the kinterbasdb_
DBAPI module.
Firebird dialects
dialect 3
This is the newer and supported syntax, introduced in Interbase 6.0.
-
-From the user point of view, the biggest change is in date/time
-handling: under dialect 1, there's a single kind of field, ``DATE``
-with a synonim ``DATETIME``, that holds a `timestamp` value, that is a
-date with hour, minute, second. Under dialect 3 there are three kinds,
-a ``DATE`` that holds a date, a ``TIME`` that holds a *time of the
-day* value and a ``TIMESTAMP``, equivalent to the old ``DATE``.
-
-The problem is that the dialect of a Firebird database is a property
-of the database itself [#]_ (that is, any single database has been
-created with one dialect or the other: there is no way to change the
-after creation). SQLAlchemy has a single instance of the class that
-controls all the connections to a particular kind of database, so it
-cannot easily differentiate between the two modes, and in particular
-it **cannot** simultaneously talk with two distinct Firebird databases
-with different dialects.
-
-By default this module is biased toward dialect 3, but you can easily
-tweak it to handle dialect 1 if needed::
-
- from sqlalchemy import types as sqltypes
- from sqlalchemy.databases.firebird import FBDate, colspecs, ischema_names
-
- # Adjust the mapping of the timestamp kind
- ischema_names['TIMESTAMP'] = FBDate
- colspecs[sqltypes.DateTime] = FBDate,
-
-Other aspects may be version-specific. You can use the ``server_version_info()`` method
-on the ``FBDialect`` class to do whatever is needed::
-
- from sqlalchemy.databases.firebird import FBCompiler
-
- if engine.dialect.server_version_info(connection) < (2,0):
- # Change the name of the function ``length`` to use the UDF version
- # instead of ``char_length``
- FBCompiler.LENGTH_FUNCTION_NAME = 'strlen'
-
-Pooling connections
--------------------
-
-The default strategy used by SQLAlchemy to pool the database connections
-in particular cases may raise an ``OperationalError`` with a message
-`"object XYZ is in use"`. This happens on Firebird when there are two
-connections to the database, one is using, or has used, a particular table
-and the other tries to drop or alter the same table. To garantee DDL
-operations success Firebird recommend doing them as the single connected user.
-
-In case your SA application effectively needs to do DDL operations while other
-connections are active, the following setting may alleviate the problem::
-
- from sqlalchemy import pool
- from sqlalchemy.databases.firebird import dialect
-
- # Force SA to use a single connection per thread
- dialect.poolclass = pool.SingletonThreadPool
+
+The SQLAlchemy Firebird dialect detects these versions and
+adjusts its representation of SQL accordingly. However,
+support for dialect 1 is not well tested and probably has
+incompatibilities.
+
+Firebird Locking Behavior
+-------------------------
+
+Firebird locks tables aggressively. For this reason, a DROP TABLE
+may hang until other transactions are released. SQLAlchemy
+does its best to release transactions as quickly as possible. The
+most common cause of hanging transactions is a non-fully consumed
+result set, i.e.::
+
+ result = engine.execute("select * from table")
+ row = result.fetchone()
+ return
+
+Where above, the ``ResultProxy`` has not been fully consumed. The
+connection will be returned to the pool and the transactional state
+rolled back once the Python garbage collector reclaims the
+objects which hold onto the connection, which often occurs asynchronously.
+The above use case can be alleviated by calling ``first()`` on the
+``ResultProxy`` which will fetch the first row and immediately close
+all remaining cursor/connection resources.
RETURNING support
-----------------
.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
.. _kinterbasdb: http://sourceforge.net/projects/kinterbasdb
+
"""
from sqlalchemy import schema as sa_schema
from sqlalchemy import exc, types as sqltypes, sql, util
+from sqlalchemy.sql import expression
from sqlalchemy.engine import base, default, reflection
+from sqlalchemy.sql import compiler
+from sqlalchemy.types import (BIGINT, BLOB, BOOLEAN, CHAR, DATE,
+ FLOAT, INTEGER, NUMERIC, SMALLINT,
+ TEXT, TIME, TIMESTAMP, VARCHAR)
-_initialized_kb = False
+RESERVED_WORDS = set(
+ ["action", "active", "add", "admin", "after", "all", "alter", "and", "any",
+ "as", "asc", "ascending", "at", "auto", "autoddl", "avg", "based", "basename",
+ "base_name", "before", "begin", "between", "bigint", "blob", "blobedit", "buffer",
+ "by", "cache", "cascade", "case", "cast", "char", "character", "character_length",
+ "char_length", "check", "check_point_len", "check_point_length", "close", "collate",
+ "collation", "column", "commit", "committed", "compiletime", "computed", "conditional",
+ "connect", "constraint", "containing", "continue", "count", "create", "cstring",
+ "current", "current_connection", "current_date", "current_role", "current_time",
+ "current_timestamp", "current_transaction", "current_user", "cursor", "database",
+ "date", "day", "db_key", "debug", "dec", "decimal", "declare", "default", "delete",
+ "desc", "descending", "describe", "descriptor", "disconnect", "display", "distinct",
+ "do", "domain", "double", "drop", "echo", "edit", "else", "end", "entry_point",
+ "escape", "event", "exception", "execute", "exists", "exit", "extern", "external",
+ "extract", "fetch", "file", "filter", "float", "for", "foreign", "found", "free_it",
+ "from", "full", "function", "gdscode", "generator", "gen_id", "global", "goto",
+ "grant", "group", "group_commit_", "group_commit_wait", "having", "help", "hour",
+ "if", "immediate", "in", "inactive", "index", "indicator", "init", "inner", "input",
+ "input_type", "insert", "int", "integer", "into", "is", "isolation", "isql", "join",
+ "key", "lc_messages", "lc_type", "left", "length", "lev", "level", "like", "logfile",
+ "log_buffer_size", "log_buf_size", "long", "manual", "max", "maximum", "maximum_segment",
+ "max_segment", "merge", "message", "min", "minimum", "minute", "module_name", "month",
+ "names", "national", "natural", "nchar", "no", "noauto", "not", "null", "numeric",
+ "num_log_buffers", "num_log_bufs", "octet_length", "of", "on", "only", "open", "option",
+ "or", "order", "outer", "output", "output_type", "overflow", "page", "pagelength",
+ "pages", "page_size", "parameter", "password", "plan", "position", "post_event",
+ "precision", "prepare", "primary", "privileges", "procedure", "protected", "public",
+ "quit", "raw_partitions", "rdb$db_key", "read", "real", "record_version", "recreate",
+ "references", "release", "release", "reserv", "reserving", "restrict", "retain",
+ "return", "returning_values", "returns", "revoke", "right", "role", "rollback",
+ "row_count", "runtime", "savepoint", "schema", "second", "segment", "select",
+ "set", "shadow", "shared", "shell", "show", "singular", "size", "smallint",
+ "snapshot", "some", "sort", "sqlcode", "sqlerror", "sqlwarning", "stability",
+ "starting", "starts", "statement", "static", "statistics", "sub_type", "sum",
+ "suspend", "table", "terminator", "then", "time", "timestamp", "to", "transaction",
+ "translate", "translation", "trigger", "trim", "type", "uncommitted", "union",
+ "unique", "update", "upper", "user", "using", "value", "values", "varchar",
+ "variable", "varying", "version", "view", "wait", "wait_time", "weekday", "when",
+ "whenever", "where", "while", "with", "work", "write", "year", "yearday" ])
+colspecs = {
+}
-class FBNumeric(sqltypes.Numeric):
- """Handle ``NUMERIC(precision,scale)`` datatype."""
+ischema_names = {
+ 'SHORT': SMALLINT,
+ 'LONG': BIGINT,
+ 'QUAD': FLOAT,
+ 'FLOAT': FLOAT,
+ 'DATE': DATE,
+ 'TIME': TIME,
+ 'TEXT': TEXT,
+ 'INT64': NUMERIC,
+ 'DOUBLE': FLOAT,
+ 'TIMESTAMP': TIMESTAMP,
+ 'VARYING': VARCHAR,
+ 'CSTRING': CHAR,
+ 'BLOB': BLOB,
+ }
+
+
+# TODO: Boolean type, date conversion types (should be implemented as _FBDateTime, _FBDate, etc.
+# as bind/result functionality is required)
+
+
+class FBTypeCompiler(compiler.GenericTypeCompiler):
+ def visit_datetime(self, type_):
+ return self.visit_TIMESTAMP(type_)
+
+ def visit_TEXT(self, type_):
+ return "BLOB SUB_TYPE 1"
+
+ def visit_BLOB(self, type_):
+ return "BLOB SUB_TYPE 0"
- def get_col_spec(self):
- if self.precision is None:
- return "NUMERIC"
- else:
- return "NUMERIC(%(precision)s, %(scale)s)" % { 'precision': self.precision,
- 'scale' : self.scale }
+class FBCompiler(sql.compiler.SQLCompiler):
+ """Firebird specific idiosincrasies"""
- def bind_processor(self, dialect):
- return None
+ def visit_mod(self, binary, **kw):
+ # Firebird lacks a builtin modulo operator, but there is
+ # an equivalent function in the ib_udf library.
+ return "mod(%s, %s)" % (self.process(binary.left), self.process(binary.right))
- def result_processor(self, dialect):
- if self.asdecimal:
- return None
+ def visit_alias(self, alias, asfrom=False, **kwargs):
+ if self.dialect._version_two:
+ return super(FBCompiler, self).visit_alias(alias, asfrom=asfrom, **kwargs)
else:
- def process(value):
- if isinstance(value, decimal.Decimal):
- return float(value)
- else:
- return value
- return process
-
-
-class FBFloat(sqltypes.Float):
- """Handle ``FLOAT(precision)`` datatype."""
+ # Override to not use the AS keyword which FB 1.5 does not like
+ if asfrom:
+ alias_name = isinstance(alias.name, expression._generated_label) and \
+ self._truncated_identifier("alias", alias.name) or alias.name
+
+ return self.process(alias.original, asfrom=asfrom, **kwargs) + " " + \
+ self.preparer.format_alias(alias, alias_name)
+ else:
+ return self.process(alias.original, **kwargs)
- def get_col_spec(self):
- if not self.precision:
- return "FLOAT"
+ def visit_substring_func(self, func, **kw):
+ s = self.process(func.clauses.clauses[0])
+ start = self.process(func.clauses.clauses[1])
+ if len(func.clauses.clauses) > 2:
+ length = self.process(func.clauses.clauses[2])
+ return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
- return "FLOAT(%(precision)s)" % {'precision': self.precision}
-
-
-class FBInteger(sqltypes.Integer):
- """Handle ``INTEGER`` datatype."""
-
- def get_col_spec(self):
- return "INTEGER"
-
+ return "SUBSTRING(%s FROM %s)" % (s, start)
-class FBSmallInteger(sqltypes.SmallInteger):
- """Handle ``SMALLINT`` datatype."""
+ def visit_length_func(self, function, **kw):
+ if self.dialect._version_two:
+ return "char_length" + self.function_argspec(function)
+ else:
+ return "strlen" + self.function_argspec(function)
- def get_col_spec(self):
- return "SMALLINT"
+ visit_char_length_func = visit_length_func
+ def function_argspec(self, func):
+ if func.clauses:
+ return self.process(func.clause_expr)
+ else:
+ return ""
-class FBDateTime(sqltypes.DateTime):
- """Handle ``TIMESTAMP`` datatype."""
+ def default_from(self):
+ return " FROM rdb$database"
- def get_col_spec(self):
- return "TIMESTAMP"
+ def visit_sequence(self, seq):
+ return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
- def bind_processor(self, dialect):
- def process(value):
- if value is None or isinstance(value, datetime.datetime):
- return value
- else:
- return datetime.datetime(year=value.year,
- month=value.month,
- day=value.day)
- return process
+ def get_select_precolumns(self, select):
+ """Called when building a ``SELECT`` statement, position is just
+ before column list Firebird puts the limit and offset right
+ after the ``SELECT``...
+ """
+ result = ""
+ if select._limit:
+ result += "FIRST %d " % select._limit
+ if select._offset:
+ result +="SKIP %d " % select._offset
+ if select._distinct:
+ result += "DISTINCT "
+ return result
-class FBDate(sqltypes.DateTime):
- """Handle ``DATE`` datatype."""
+ def limit_clause(self, select):
+ """Already taken care of in the `get_select_precolumns` method."""
- def get_col_spec(self):
- return "DATE"
+ return ""
-class FBTime(sqltypes.Time):
- """Handle ``TIME`` datatype."""
+ def _append_returning(self, text, stmt):
+ returning_cols = stmt.kwargs["firebird_returning"]
+ def flatten_columnlist(collist):
+ for c in collist:
+ if isinstance(c, sql.expression.Selectable):
+ for co in c.columns:
+ yield co
+ else:
+ yield c
+ columns = [self.process(c, within_columns_clause=True)
+ for c in flatten_columnlist(returning_cols)]
+ text += ' RETURNING ' + ', '.join(columns)
+ return text
- def get_col_spec(self):
- return "TIME"
+ def visit_update(self, update_stmt):
+ text = super(FBCompiler, self).visit_update(update_stmt)
+ if "firebird_returning" in update_stmt.kwargs:
+ return self._append_returning(text, update_stmt)
+ else:
+ return text
+ def visit_insert(self, insert_stmt):
+ text = super(FBCompiler, self).visit_insert(insert_stmt)
+ if "firebird_returning" in insert_stmt.kwargs:
+ return self._append_returning(text, insert_stmt)
+ else:
+ return text
-class FBText(sqltypes.Text):
- """Handle ``BLOB SUB_TYPE 1`` datatype (aka *textual* blob)."""
+ def visit_delete(self, delete_stmt):
+ text = super(FBCompiler, self).visit_delete(delete_stmt)
+ if "firebird_returning" in delete_stmt.kwargs:
+ return self._append_returning(text, delete_stmt)
+ else:
+ return text
- def get_col_spec(self):
- return "BLOB SUB_TYPE 1"
+class FBDDLCompiler(sql.compiler.DDLCompiler):
+ """Firebird syntactic idiosincrasies"""
-class FBString(sqltypes.String):
- """Handle ``VARCHAR(length)`` datatype."""
+ def visit_create_sequence(self, create):
+ """Generate a ``CREATE GENERATOR`` statement for the sequence."""
- def get_col_spec(self):
- if self.length:
- return "VARCHAR(%(length)s)" % {'length' : self.length}
+ if self.dialect._version_two:
+ return "CREATE SEQUENCE %s" % self.preparer.format_sequence(create.element)
else:
- return "BLOB SUB_TYPE 1"
-
+ return "CREATE GENERATOR %s" % self.preparer.format_sequence(create.element)
-class FBChar(sqltypes.CHAR):
- """Handle ``CHAR(length)`` datatype."""
+ def visit_drop_sequence(self, drop):
+ """Generate a ``DROP GENERATOR`` statement for the sequence."""
- def get_col_spec(self):
- if self.length:
- return "CHAR(%(length)s)" % {'length' : self.length}
+ if self.dialect._version_two:
+ return "DROP SEQUENCE %s" % self.preparer.format_sequence(drop.element)
else:
- return "BLOB SUB_TYPE 1"
+ return "DROP GENERATOR %s" % self.preparer.format_sequence(drop.element)
-class FBBinary(sqltypes.Binary):
- """Handle ``BLOB SUB_TYPE 0`` datatype (aka *binary* blob)."""
-
- def get_col_spec(self):
- return "BLOB SUB_TYPE 0"
-
-
-class FBBoolean(sqltypes.Boolean):
- """Handle boolean values as a ``SMALLINT`` datatype."""
-
- def get_col_spec(self):
- return "SMALLINT"
+class FBDefaultRunner(base.DefaultRunner):
+ """Firebird specific idiosincrasies"""
+ def visit_sequence(self, seq):
+ """Get the next value from the sequence using ``gen_id()``."""
-colspecs = {
- sqltypes.Integer : FBInteger,
- sqltypes.SmallInteger : FBSmallInteger,
- sqltypes.Numeric : FBNumeric,
- sqltypes.Float : FBFloat,
- sqltypes.DateTime : FBDateTime,
- sqltypes.Date : FBDate,
- sqltypes.Time : FBTime,
- sqltypes.String : FBString,
- sqltypes.Binary : FBBinary,
- sqltypes.Boolean : FBBoolean,
- sqltypes.Text : FBText,
- sqltypes.CHAR: FBChar,
-}
+ return self.execute_string("SELECT gen_id(%s, 1) FROM rdb$database" % \
+ self.dialect.identifier_preparer.format_sequence(seq))
+class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
+ """Install Firebird specific reserved words."""
-ischema_names = {
- 'SHORT': lambda r: FBSmallInteger(),
- 'LONG': lambda r: FBInteger(),
- 'QUAD': lambda r: FBFloat(),
- 'FLOAT': lambda r: FBFloat(),
- 'DATE': lambda r: FBDate(),
- 'TIME': lambda r: FBTime(),
- 'TEXT': lambda r: FBString(r['flen']),
- 'INT64': lambda r: FBNumeric(precision=r['fprec'], scale=r['fscale'] * -1), # This generically handles NUMERIC()
- 'DOUBLE': lambda r: FBFloat(),
- 'TIMESTAMP': lambda r: FBDateTime(),
- 'VARYING': lambda r: FBString(r['flen']),
- 'CSTRING': lambda r: FBChar(r['flen']),
- 'BLOB': lambda r: r['stype']==1 and FBText() or FBBinary()
- }
-
-RETURNING_KW_NAME = 'firebird_returning'
-
-class FBExecutionContext(default.DefaultExecutionContext):
- pass
+ reserved_words = RESERVED_WORDS
+ def __init__(self, dialect):
+ super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
class FBDialect(default.DefaultDialect):
"""Firebird dialect"""
+
name = 'firebird'
- supports_sane_rowcount = False
- supports_sane_multi_rowcount = False
+
max_identifier_length = 31
+ supports_sequences = True
+ sequences_optional = False
+ supports_default_values = True
+ supports_empty_insert = False
preexecute_pk_sequences = True
supports_pk_autoincrement = False
-
- def __init__(self, type_conv=200, concurrency_level=1, **kwargs):
- default.DefaultDialect.__init__(self, **kwargs)
-
- self.type_conv = type_conv
- self.concurrency_level = concurrency_level
-
- def dbapi(cls):
- import kinterbasdb
- return kinterbasdb
- dbapi = classmethod(dbapi)
-
- def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- if opts.get('port'):
- opts['host'] = "%s/%s" % (opts['host'], opts['port'])
- del opts['port']
- opts.update(url.query)
-
- type_conv = opts.pop('type_conv', self.type_conv)
- concurrency_level = opts.pop('concurrency_level', self.concurrency_level)
- global _initialized_kb
- if not _initialized_kb and self.dbapi is not None:
- _initialized_kb = True
- self.dbapi.init(type_conv=type_conv, concurrency_level=concurrency_level)
- return ([], opts)
-
- def type_descriptor(self, typeobj):
- return sqltypes.adapt_type(typeobj, colspecs)
-
- def server_version_info(self, connection):
- """Get the version of the Firebird server used by a connection.
-
- Returns a tuple of (`major`, `minor`, `build`), three integers
- representing the version of the attached server.
- """
-
- # This is the simpler approach (the other uses the services api),
- # that for backward compatibility reasons returns a string like
- # LI-V6.3.3.12981 Firebird 2.0
- # where the first version is a fake one resembling the old
- # Interbase signature. This is more than enough for our purposes,
- # as this is mainly (only?) used by the testsuite.
-
- from re import match
-
- fbconn = connection.connection.connection
- version = fbconn.server_version
- m = match('\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+) \w+ (\d+)\.(\d+)', version)
- if not m:
- raise AssertionError("Could not determine version from string '%s'" % version)
- return tuple([int(x) for x in m.group(5, 6, 4)])
-
- def _normalize_name(self, name):
- """Convert the name to lowercase if it is possible"""
-
+ requires_name_normalize = True
+
+ statement_compiler = FBCompiler
+ ddl_compiler = FBDDLCompiler
+ defaultrunner = FBDefaultRunner
+ preparer = FBIdentifierPreparer
+ type_compiler = FBTypeCompiler
+
+ colspecs = colspecs
+ ischema_names = ischema_names
+
+ # defaults to dialect ver. 3,
+ # will be autodetected off upon
+ # first connect
+ _version_two = True
+
+ def initialize(self, connection):
+ super(FBDialect, self).initialize(connection)
+ self._version_two = self.server_version_info > (2, )
+ if not self._version_two:
+ # TODO: whatever other pre < 2.0 stuff goes here
+ self.ischema_names = ischema_names.copy()
+ self.ischema_names['TIMESTAMP'] = sqltypes.DATE
+ self.colspecs = {
+ sqltypes.DateTime :sqltypes.DATE
+ }
+
+ def normalize_name(self, name):
# Remove trailing spaces: FB uses a CHAR() type,
# that is padded with spaces
name = name and name.rstrip()
if name is None:
return None
- elif name.upper() == name and not self.identifier_preparer._requires_quotes(name.lower()):
+ elif name.upper() == name and \
+ not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
else:
return name
- def _denormalize_name(self, name):
- """Revert a *normalized* name to its uppercase equivalent"""
-
+ def denormalize_name(self, name):
if name is None:
return None
- elif name.lower() == name and not self.identifier_preparer._requires_quotes(name.lower()):
+ elif name.lower() == name and \
+ not self.identifier_preparer._requires_quotes(name.lower()):
return name.upper()
else:
return name
- def table_names(self, connection, schema):
- """Return a list of *normalized* table names omitting system relations."""
-
- s = """
- SELECT r.rdb$relation_name
- FROM rdb$relations r
- WHERE r.rdb$system_flag=0
- """
- return [self._normalize_name(row[0]) for row in connection.execute(s)]
-
def has_table(self, connection, table_name, schema=None):
"""Return ``True`` if the given table exists, ignoring the `schema`."""
FROM rdb$relations
WHERE rdb$relation_name=?)
"""
- c = connection.execute(tblqry, [self._denormalize_name(table_name)])
- row = c.fetchone()
- if row is not None:
- return True
- else:
- return False
+ c = connection.execute(tblqry, [self.denormalize_name(table_name)])
+ return c.first() is not None
def has_sequence(self, connection, sequence_name):
"""Return ``True`` if the given sequence (generator) exists."""
FROM rdb$generators
WHERE rdb$generator_name=?)
"""
- c = connection.execute(genqry, [self._denormalize_name(sequence_name)])
- row = c.fetchone()
- if row is not None:
- return True
- else:
- return False
-
- def is_disconnect(self, e):
- if isinstance(e, self.dbapi.OperationalError):
- return 'Unable to complete network request to host' in str(e)
- elif isinstance(e, self.dbapi.ProgrammingError):
- msg = str(e)
- return ('Invalid connection state' in msg or
- 'Invalid cursor state' in msg)
- else:
- return False
+ c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
+ return c.first() is not None
- @reflection.cache
- def get_table_names(self, connection, schema=None, **kw):
+ def table_names(self, connection, schema):
s = """
SELECT DISTINCT rdb$relation_name
FROM rdb$relation_fields WHERE
rdb$system_flag=0 AND rdb$view_context IS NULL
"""
- return [self._normalize_name(row[0]) for row in connection.execute(s)]
+ return [self.normalize_name(row[0]) for row in connection.execute(s)]
+ @reflection.cache
+ def get_table_names(self, connection, schema=None, **kw):
+ return self.table_names(connection, schema)
+
+ @reflection.cache
+ def get_view_names(self, connection, schema=None, **kw):
+ s = """
+ SELECT distinct rdb$view_name
+ FROM rdb$view_relations
+ """
+ return [self.normalize_name(row[0]) for row in connection.execute(s)]
+
+ @reflection.cache
+ def get_view_definition(self, connection, view_name, schema=None, **kw):
+ qry = """
+ SELECT rdb$view_source AS view_source
+ FROM rdb$relations
+ WHERE rdb$relation_name=?;
+ """
+ rp = connection.execute(qry, [self.denormalize_name(view_name)])
+ row = rp.first()
+ if row:
+ return row['view_source']
+ else:
+ return None
+
@reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
# Query to extract the PK/FK constrained fields of the given table
JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
"""
- tablename = self._denormalize_name(table.name)
+ tablename = self.denormalize_name(table_name)
# get primary key fields
c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
- pkfields = [self._normalize_name(r['fname']) for r in c.fetchall()]
+ pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()]
return pkfields
@reflection.cache
def get_column_sequence(self, connection, table_name, column_name,
schema=None, **kw):
- tablename = self._denormalize_name(table_name)
- colname = self._denormalize_name(column_name)
+ tablename = self.denormalize_name(table_name)
+ colname = self.denormalize_name(column_name)
# Heuristic-query to determine the generator associated to a PK field
genqry = """
SELECT trigdep.rdb$depended_on_name AS fgenerator
genc = connection.execute(genqry, [tablename, colname])
genr = genc.fetchone()
if genr is not None:
- return dict(name=self._normalize_name(genr['fgenerator']))
+ return dict(name=self.normalize_name(genr['fgenerator']))
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
ORDER BY r.rdb$field_position
"""
- tablename = self._denormalize_name(table_name)
+ tablename = self.denormalize_name(table_name)
# get all of the fields for this table
c = connection.execute(tblqry, [tablename])
cols = []
row = c.fetchone()
if row is None:
break
- name = self._normalize_name(row['fname'])
+ name = self.normalize_name(row['fname'])
# get the data type
- coltype = ischema_names.get(row['ftype'].rstrip())
+
+ colspec = row['ftype'].rstrip()
+ coltype = self.ischema_names.get(colspec)
if coltype is None:
util.warn("Did not recognize type '%s' of column '%s'" %
- (str(row['ftype']), name))
+ (colspec, name))
coltype = sqltypes.NULLTYPE
+ elif colspec == 'INT64':
+ coltype = coltype(precision=row['fprec'], scale=row['fscale'] * -1)
+ elif colspec in ('VARYING', 'CSTRING'):
+ coltype = coltype(row['flen'])
+ elif colspec == 'TEXT':
+ coltype = TEXT(row['flen'])
+ elif colspec == 'BLOB':
+ if row['stype'] == 1:
+ coltype = TEXT()
+ else:
+ coltype = BLOB()
else:
coltype = coltype(row)
-
+
# does it have a default value?
defvalue = None
if row['fdefault'] is not None:
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
JOIN rdb$index_segments cse ON cse.rdb$index_name=ix1.rdb$index_name
- JOIN rdb$index_segments se ON se.rdb$index_name=ix2.rdb$index_name AND se.rdb$field_position=cse.rdb$field_position
+ JOIN rdb$index_segments se ON se.rdb$index_name=ix2.rdb$index_name AND
+ se.rdb$field_position=cse.rdb$field_position
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
ORDER BY se.rdb$index_name, se.rdb$field_position
"""
- tablename = self._denormalize_name(table_name)
- # get the foreign keys
+ tablename = self.denormalize_name(table_name)
+
c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
- fks = {}
- fkeys = []
- while True:
- row = c.fetchone()
- if not row:
- break
- cname = self._normalize_name(row['cname'])
- if cname in fks:
- fk = fks[cname]
- else:
- fk = {
- 'name' : cname,
- 'constrained_columns' : [],
- 'referred_schema' : None,
- 'referred_table' : None,
- 'referred_columns' : []
- }
- fks[cname] = fk
- fkeys.append(fk)
- fk['referred_table'] = self._normalize_name(row['targetrname'])
- fk['constrained_columns'].append(self._normalize_name(row['fname']))
+ fks = util.defaultdict(lambda:{
+ 'name' : None,
+ 'constrained_columns' : [],
+ 'referred_schema' : None,
+ 'referred_table' : None,
+ 'referred_columns' : []
+ })
+
+ for row in c:
+ cname = self.normalize_name(row['cname'])
+ fk = fks[cname]
+ if not fk['name']:
+ fk['name'] = cname
+ fk['referred_table'] = self.normalize_name(row['targetrname'])
+ fk['constrained_columns'].append(self.normalize_name(row['fname']))
fk['referred_columns'].append(
- self._normalize_name(row['targetfname']))
- return fkeys
-
- def reflecttable(self, connection, table, include_columns):
-
- # get primary key fields
- pkfields = self.get_primary_keys(connection, table.name)
-
- found_table = False
- for col_d in self.get_columns(connection, table.name):
- found_table = True
-
- name = col_d.get('name')
- defvalue = col_d.get('default')
- nullable = col_d.get('nullable')
- coltype = col_d.get('type')
-
- if include_columns and name not in include_columns:
- continue
- args = [name]
-
- kw = {}
- args.append(coltype)
-
- # is it a primary key?
- kw['primary_key'] = name in pkfields
-
- # is it nullable?
- kw['nullable'] = nullable
-
- # does it have a default value?
- if defvalue:
- args.append(sa_schema.DefaultClause(sql.text(defvalue)))
-
- col = sa_schema.Column(*args, **kw)
- if kw['primary_key']:
- # if the PK is a single field, try to see if its linked to
- # a sequence thru a trigger
- if len(pkfields)==1:
- sequence_name = self.get_column_sequence(connection,
- table.name, name)
- if sequence_name is not None:
- col.sequence = sa_schema.Sequence(sequence_name)
- table.append_column(col)
-
- if not found_table:
- raise exc.NoSuchTableError(table.name)
-
- # get the foreign keys
- for fkey_d in self.get_foreign_keys(connection, table.name):
- cname = fkey_d['name']
- constrained_columns = fkey_d['constrained_columns']
- rname = fkey_d['referred_table']
- referred_columns = fkey_d['referred_columns']
-
- sa_schema.Table(rname, table.metadata, autoload=True, autoload_with=connection)
- refspec = ['.'.join(c) for c in \
- zip(constrained_columns, referred_columns)]
- table.append_constraint(sa_schema.ForeignKeyConstraint(
- constrained_columns, refspec, name=cname, link_to_name=True))
+ self.normalize_name(row['targetfname']))
+ return fks.values()
+ @reflection.cache
+ def get_indexes(self, connection, table_name, schema=None, **kw):
+ qry = """
+ SELECT
+ ix.rdb$index_name AS index_name,
+ ix.rdb$unique_flag AS unique_flag,
+ ic.rdb$field_name AS field_name
+
+ FROM rdb$indices ix JOIN rdb$index_segments ic
+ ON ix.rdb$index_name=ic.rdb$index_name
+
+ LEFT OUTER JOIN RDB$RELATION_CONSTRAINTS
+ ON RDB$RELATION_CONSTRAINTS.RDB$INDEX_NAME = ic.RDB$INDEX_NAME
+
+ WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
+ AND RDB$RELATION_CONSTRAINTS.RDB$CONSTRAINT_TYPE IS NULL
+ ORDER BY index_name, field_name
+ """
+ c = connection.execute(qry, [self.denormalize_name(table_name)])
+
+ indexes = util.defaultdict(dict)
+ for row in c:
+ indexrec = indexes[row['index_name']]
+ if 'name' not in indexrec:
+ indexrec['name'] = self.normalize_name(row['index_name'])
+ indexrec['column_names'] = []
+ indexrec['unique'] = bool(row['unique_flag'])
+
+ indexrec['column_names'].append(self.normalize_name(row['field_name']))
+
+ return indexes.values()
+
def do_execute(self, cursor, statement, parameters, **kwargs):
# kinterbase does not accept a None, but wants an empty list
# when there are no arguments.
connection.commit(True)
-class FBCompiler(sql.compiler.SQLCompiler):
- """Firebird specific idiosincrasies"""
-
- def visit_mod(self, binary, **kw):
- # Firebird lacks a builtin modulo operator, but there is
- # an equivalent function in the ib_udf library.
- return "mod(%s, %s)" % (self.process(binary.left), self.process(binary.right))
-
- def visit_alias(self, alias, asfrom=False, **kwargs):
- # Override to not use the AS keyword which FB 1.5 does not like
- if asfrom:
- return self.process(alias.original, asfrom=True, **kwargs) + " " + self.preparer.format_alias(alias, self._anonymize(alias.name))
- else:
- return self.process(alias.original, **kwargs)
-
- def visit_substring_func(self, func, **kw):
- s = self.process(func.clauses.clauses[0])
- start = self.process(func.clauses.clauses[1])
- if len(func.clauses.clauses) > 2:
- length = self.process(func.clauses.clauses[2])
- return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
- else:
- return "SUBSTRING(%s FROM %s)" % (s, start)
-
- # TODO: auto-detect this or something
- LENGTH_FUNCTION_NAME = 'char_length'
-
- def visit_length_func(self, function, **kw):
- return self.LENGTH_FUNCTION_NAME + self.function_argspec(function)
-
- def visit_char_length_func(self, function, **kw):
- return self.LENGTH_FUNCTION_NAME + self.function_argspec(function)
-
- def function_argspec(self, func):
- if func.clauses:
- return self.process(func.clause_expr)
- else:
- return ""
-
- def default_from(self):
- return " FROM rdb$database"
-
- def visit_sequence(self, seq):
- return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
-
- def get_select_precolumns(self, select):
- """Called when building a ``SELECT`` statement, position is just
- before column list Firebird puts the limit and offset right
- after the ``SELECT``...
- """
-
- result = ""
- if select._limit:
- result += "FIRST %d " % select._limit
- if select._offset:
- result +="SKIP %d " % select._offset
- if select._distinct:
- result += "DISTINCT "
- return result
-
- def limit_clause(self, select):
- """Already taken care of in the `get_select_precolumns` method."""
-
- return ""
-
-
- def _append_returning(self, text, stmt):
- returning_cols = stmt.kwargs[RETURNING_KW_NAME]
- def flatten_columnlist(collist):
- for c in collist:
- if isinstance(c, sql.expression.Selectable):
- for co in c.columns:
- yield co
- else:
- yield c
- columns = [self.process(c, within_columns_clause=True)
- for c in flatten_columnlist(returning_cols)]
- text += ' RETURNING ' + ', '.join(columns)
- return text
-
- def visit_update(self, update_stmt):
- text = super(FBCompiler, self).visit_update(update_stmt)
- if RETURNING_KW_NAME in update_stmt.kwargs:
- return self._append_returning(text, update_stmt)
- else:
- return text
-
- def visit_insert(self, insert_stmt):
- text = super(FBCompiler, self).visit_insert(insert_stmt)
- if RETURNING_KW_NAME in insert_stmt.kwargs:
- return self._append_returning(text, insert_stmt)
- else:
- return text
-
- def visit_delete(self, delete_stmt):
- text = super(FBCompiler, self).visit_delete(delete_stmt)
- if RETURNING_KW_NAME in delete_stmt.kwargs:
- return self._append_returning(text, delete_stmt)
- else:
- return text
-
-
-class FBSchemaGenerator(sql.compiler.SchemaGenerator):
- """Firebird syntactic idiosincrasies"""
-
- def visit_sequence(self, sequence):
- """Generate a ``CREATE GENERATOR`` statement for the sequence."""
-
- if not self.checkfirst or not self.dialect.has_sequence(self.connection, sequence.name):
- self.append("CREATE GENERATOR %s" % self.preparer.format_sequence(sequence))
- self.execute()
-
-
-class FBSchemaDropper(sql.compiler.SchemaDropper):
- """Firebird syntactic idiosincrasies"""
-
- def visit_sequence(self, sequence):
- """Generate a ``DROP GENERATOR`` statement for the sequence."""
-
- if not self.checkfirst or self.dialect.has_sequence(self.connection, sequence.name):
- self.append("DROP GENERATOR %s" % self.preparer.format_sequence(sequence))
- self.execute()
-
-
-class FBDefaultRunner(base.DefaultRunner):
- """Firebird specific idiosincrasies"""
-
- def visit_sequence(self, seq):
- """Get the next value from the sequence using ``gen_id()``."""
-
- return self.execute_string("SELECT gen_id(%s, 1) FROM rdb$database" % \
- self.dialect.identifier_preparer.format_sequence(seq))
-
-
-RESERVED_WORDS = set(
- ["action", "active", "add", "admin", "after", "all", "alter", "and", "any",
- "as", "asc", "ascending", "at", "auto", "autoddl", "avg", "based", "basename",
- "base_name", "before", "begin", "between", "bigint", "blob", "blobedit", "buffer",
- "by", "cache", "cascade", "case", "cast", "char", "character", "character_length",
- "char_length", "check", "check_point_len", "check_point_length", "close", "collate",
- "collation", "column", "commit", "committed", "compiletime", "computed", "conditional",
- "connect", "constraint", "containing", "continue", "count", "create", "cstring",
- "current", "current_connection", "current_date", "current_role", "current_time",
- "current_timestamp", "current_transaction", "current_user", "cursor", "database",
- "date", "day", "db_key", "debug", "dec", "decimal", "declare", "default", "delete",
- "desc", "descending", "describe", "descriptor", "disconnect", "display", "distinct",
- "do", "domain", "double", "drop", "echo", "edit", "else", "end", "entry_point",
- "escape", "event", "exception", "execute", "exists", "exit", "extern", "external",
- "extract", "fetch", "file", "filter", "float", "for", "foreign", "found", "free_it",
- "from", "full", "function", "gdscode", "generator", "gen_id", "global", "goto",
- "grant", "group", "group_commit_", "group_commit_wait", "having", "help", "hour",
- "if", "immediate", "in", "inactive", "index", "indicator", "init", "inner", "input",
- "input_type", "insert", "int", "integer", "into", "is", "isolation", "isql", "join",
- "key", "lc_messages", "lc_type", "left", "length", "lev", "level", "like", "logfile",
- "log_buffer_size", "log_buf_size", "long", "manual", "max", "maximum", "maximum_segment",
- "max_segment", "merge", "message", "min", "minimum", "minute", "module_name", "month",
- "names", "national", "natural", "nchar", "no", "noauto", "not", "null", "numeric",
- "num_log_buffers", "num_log_bufs", "octet_length", "of", "on", "only", "open", "option",
- "or", "order", "outer", "output", "output_type", "overflow", "page", "pagelength",
- "pages", "page_size", "parameter", "password", "plan", "position", "post_event",
- "precision", "prepare", "primary", "privileges", "procedure", "protected", "public",
- "quit", "raw_partitions", "rdb$db_key", "read", "real", "record_version", "recreate",
- "references", "release", "release", "reserv", "reserving", "restrict", "retain",
- "return", "returning_values", "returns", "revoke", "right", "role", "rollback",
- "row_count", "runtime", "savepoint", "schema", "second", "segment", "select",
- "set", "shadow", "shared", "shell", "show", "singular", "size", "smallint",
- "snapshot", "some", "sort", "sqlcode", "sqlerror", "sqlwarning", "stability",
- "starting", "starts", "statement", "static", "statistics", "sub_type", "sum",
- "suspend", "table", "terminator", "then", "time", "timestamp", "to", "transaction",
- "translate", "translation", "trigger", "trim", "type", "uncommitted", "union",
- "unique", "update", "upper", "user", "using", "value", "values", "varchar",
- "variable", "varying", "version", "view", "wait", "wait_time", "weekday", "when",
- "whenever", "where", "while", "with", "work", "write", "year", "yearday" ])
-
-
-class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
- """Install Firebird specific reserved words."""
-
- reserved_words = RESERVED_WORDS
-
- def __init__(self, dialect):
- super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
-
-
-dialect = FBDialect
-dialect.statement_compiler = FBCompiler
-dialect.schemagenerator = FBSchemaGenerator
-dialect.schemadropper = FBSchemaDropper
-dialect.defaultrunner = FBDefaultRunner
-dialect.preparer = FBIdentifierPreparer
-dialect.execution_ctx_cls = FBExecutionContext