from alembic.environment import EnvironmentContext
from alembic import util
-# create proxy functions for
+# create proxy functions for
# each method on the EnvironmentContext class.
util.create_module_class_proxy(EnvironmentContext, globals(), locals())
class ColumnNullable(AlterColumn):
def __init__(self, name, column_name, nullable, **kw):
- super(ColumnNullable, self).__init__(name, column_name,
+ super(ColumnNullable, self).__init__(name, column_name,
**kw)
self.nullable = nullable
class ColumnType(AlterColumn):
def __init__(self, name, column_name, type_, **kw):
- super(ColumnType, self).__init__(name, column_name,
+ super(ColumnType, self).__init__(name, column_name,
**kw)
self.type_ = sqltypes.to_instance(type_)
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
- "SET DEFAULT %s" %
+ "SET DEFAULT %s" %
format_server_default(compiler, element.default)
if element.default is not None
else "DROP DEFAULT"
class DefaultImpl(object):
"""Provide the entrypoint for major migration operations,
including database-specific behavioral variances.
-
+
While individual SQL/DDL constructs already provide
for database-specific implementations, variances here
allow for entirely different sequences of operations
to take place for a particular migration, such as
- SQL Server's special 'IDENTITY INSERT' step for
+ SQL Server's special 'IDENTITY INSERT' step for
bulk inserts.
"""
transactional_ddl = False
command_terminator = ";"
- def __init__(self, dialect, connection, as_sql,
+ def __init__(self, dialect, connection, as_sql,
transactional_ddl, output_buffer,
context_opts):
self.dialect = dialect
def bind(self):
return self.connection
- def _exec(self, construct, execution_options=None,
- multiparams=(),
+ def _exec(self, construct, execution_options=None,
+ multiparams=(),
params=util.immutabledict()):
if isinstance(construct, basestring):
construct = text(construct)
def execute(self, sql, execution_options=None):
self._exec(sql, execution_options)
- def alter_column(self, table_name, column_name,
+ def alter_column(self, table_name, column_name,
nullable=None,
server_default=False,
name=None,
):
if nullable is not None:
- self._exec(base.ColumnNullable(table_name, column_name,
+ self._exec(base.ColumnNullable(table_name, column_name,
nullable, schema=schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
self._exec(schema.DropConstraint(const))
def rename_table(self, old_table_name, new_table_name, schema=None):
- self._exec(base.RenameTable(old_table_name,
+ self._exec(base.RenameTable(old_table_name,
new_table_name, schema=schema))
def create_table(self, table):
else:
return True
- def compare_server_default(self, inspector_column,
- metadata_column,
+ def compare_server_default(self, inspector_column,
+ metadata_column,
rendered_metadata_default):
conn_col_default = inspector_column['default']
return conn_col_default != rendered_metadata_default
def start_migrations(self):
"""A hook called when :meth:`.EnvironmentContext.run_migrations`
is called.
-
+
Implementations can set up per-migration-run state here.
-
+
"""
def emit_begin(self):
"""Emit the string ``BEGIN``, or the backend-specific
equivalent, on the current connection context.
-
+
This is used in offline mode and typically
via :meth:`.EnvironmentContext.begin_transaction`.
-
+
"""
self.static_output("BEGIN" + self.command_terminator)
def emit_commit(self):
"""Emit the string ``COMMIT``, or the backend-specific
equivalent, on the current connection context.
-
+
This is used in offline mode and typically
via :meth:`.EnvironmentContext.begin_transaction`.
-
+
"""
self.static_output("COMMIT" + self.command_terminator)
def __init__(self, *arg, **kw):
super(MSSQLImpl, self).__init__(*arg, **kw)
self.batch_separator = self.context_opts.get(
- "mssql_batch_separator",
+ "mssql_batch_separator",
self.batch_separator)
def _exec(self, construct, *args, **kw):
def emit_begin(self):
self.static_output("BEGIN TRANSACTION" + self.command_terminator)
- def alter_column(self, table_name, column_name,
+ def alter_column(self, table_name, column_name,
nullable=None,
server_default=False,
name=None,
"existing_type or a new type_ be passed.")
super(MSSQLImpl, self).alter_column(
- table_name, column_name,
+ table_name, column_name,
nullable=nullable,
type_=type_,
schema=schema,
if existing_server_default is not False or \
server_default is None:
self._exec(
- _exec_drop_col_constraint(self,
- table_name, column_name,
+ _exec_drop_col_constraint(self,
+ table_name, column_name,
'sys.default_constraints')
)
if server_default is not None:
super(MSSQLImpl, self).alter_column(
- table_name, column_name,
+ table_name, column_name,
schema=schema,
server_default=server_default)
if name is not None:
super(MSSQLImpl, self).alter_column(
- table_name, column_name,
+ table_name, column_name,
schema=schema,
name=name)
def bulk_insert(self, table, rows):
if self.as_sql:
self._exec(
- "SET IDENTITY_INSERT %s ON" %
+ "SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(table)
)
super(MSSQLImpl, self).bulk_insert(table, rows)
self._exec(
- "SET IDENTITY_INSERT %s OFF" %
+ "SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.format_table(table)
)
else:
drop_default = kw.pop('mssql_drop_default', False)
if drop_default:
self._exec(
- _exec_drop_col_constraint(self,
- table_name, column,
+ _exec_drop_col_constraint(self,
+ table_name, column,
'sys.default_constraints')
)
drop_check = kw.pop('mssql_drop_check', False)
if drop_check:
self._exec(
- _exec_drop_col_constraint(self,
- table_name, column,
+ _exec_drop_col_constraint(self,
+ table_name, column,
'sys.check_constraints')
)
super(MSSQLImpl, self).drop_column(table_name, column)
class MySQLImpl(DefaultImpl):
__dialect__ = 'mysql'
- def alter_column(self, table_name, column_name,
+ def alter_column(self, table_name, column_name,
nullable=None,
server_default=False,
name=None,
table_name, column_name,
schema=schema,
newname=name if name is not None else column_name,
- nullable =nullable if nullable is not None else
+ nullable =nullable if nullable is not None else
existing_nullable if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
@compiles(schema.DropConstraint, "mysql")
def _mysql_drop_constraint(element, compiler, **kw):
- """Redefine SQLAlchemy's drop constraint to
+ """Redefine SQLAlchemy's drop constraint to
raise errors for invalid constraint type."""
constraint = element.element
def __init__(self, *arg, **kw):
super(OracleImpl, self).__init__(*arg, **kw)
self.batch_separator = self.context_opts.get(
- "oracle_batch_separator",
+ "oracle_batch_separator",
self.batch_separator)
def _exec(self, construct, *args, **kw):
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
- "DEFAULT %s" %
+ "DEFAULT %s" %
format_server_default(compiler, element.default)
if element.default is not None
else "DEFAULT NULL"
__dialect__ = 'postgresql'
transactional_ddl = True
- def compare_server_default(self, inspector_column,
- metadata_column,
+ def compare_server_default(self, inspector_column,
+ metadata_column,
rendered_metadata_default):
# don't do defaults for SERIAL columns
class EnvironmentContext(object):
"""Represent the state made available to an ``env.py`` script.
-
+
:class:`.EnvironmentContext` is normally instantiated
by the commands present in the :mod:`alembic.command`
module. From within an ``env.py`` script, the current
- :class:`.EnvironmentContext` is available via the
+ :class:`.EnvironmentContext` is available via the
``alembic.context`` datamember.
-
+
:class:`.EnvironmentContext` is also a Python context
manager, that is, is intended to be used using the
``with:`` statement. A typical use of :class:`.EnvironmentContext`::
from alembic.config import Config
from alembic.script import ScriptDirectory
-
+
config = Config()
config.set_main_option("script_location", "myapp:migrations")
script = ScriptDirectory.from_config(config)
-
+
def my_function(rev, context):
'''do something with revision "rev", which
- will be the current database revision,
+ will be the current database revision,
and "context", which is the MigrationContext
that the env.py will create'''
-
+
with EnvironmentContext(
config,
script,
tag = "sometag"
):
script.run_env()
-
+
The above script will invoke the ``env.py`` script
within the migration environment. If and when ``env.py``
calls :meth:`.MigrationContext.run_migrations`, the
- ``my_function()`` function above will be called
+ ``my_function()`` function above will be called
by the :class:`.MigrationContext`, given the context
itself as well as the current revision in the database.
-
+
.. note::
For most API usages other than full blown
invocation of migration scripts, the :class:`.MigrationContext`
and :class:`.ScriptDirectory` objects can be created and
- used directly. The :class:`.EnvironmentContext` object
+ used directly. The :class:`.EnvironmentContext` object
is *only* needed when you need to actually invoke the
``env.py`` module present in the migration environment.
-
+
"""
_migration_context = None
_default_opts = None
config = None
- """An instance of :class:`.Config` representing the
+ """An instance of :class:`.Config` representing the
configuration file contents as well as other variables
set programmatically within it."""
script = None
"""An instance of :class:`.ScriptDirectory` which provides
- programmatic access to version files within the ``versions/``
+ programmatic access to version files within the ``versions/``
directory.
-
+
"""
def __init__(self, config, script, **kw):
"""Construct a new :class:`.EnvironmentContext`.
-
+
:param config: a :class:`.Config` instance.
:param script: a :class:`.ScriptDirectory` instance.
:param \**kw: keyword options that will be ultimately
passed along to the :class:`.MigrationContext` when
:meth:`.EnvironmentContext.configure` is called.
-
+
"""
self.config = config
self.script = script
self.context_opts.update(self._default_opts)
def __enter__(self):
- """Establish a context which provides a
+ """Establish a context which provides a
:class:`.EnvironmentContext` object to
env.py scripts.
-
+
The :class:`.EnvironmentContext` will
be made available as ``from alembic import context``.
-
+
"""
alembic.context._install_proxy(self)
return self
alembic.op._remove_proxy()
def is_offline_mode(self):
- """Return True if the current migrations environment
+ """Return True if the current migrations environment
is running in "offline mode".
- This is ``True`` or ``False`` depending
+ This is ``True`` or ``False`` depending
on the the ``--sql`` flag passed.
- This function does not require that the :class:`.MigrationContext`
+ This function does not require that the :class:`.MigrationContext`
has been configured.
"""
"""Return True if the context is configured to expect a
transactional DDL capable backend.
- This defaults to the type of database in use, and
+ This defaults to the type of database in use, and
can be overridden by the ``transactional_ddl`` argument
to :meth:`.configure`
- This function requires that a :class:`.MigrationContext`
+ This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
def get_head_revision(self):
"""Return the hex identifier of the 'head' revision.
- This function does not require that the :class:`.MigrationContext`
+ This function does not require that the :class:`.MigrationContext`
has been configured.
"""
Returns ``None`` if no value is available
or was configured.
- This function does not require that the :class:`.MigrationContext`
+ This function does not require that the :class:`.MigrationContext`
has been configured.
"""
def get_revision_argument(self):
"""Get the 'destination' revision argument.
- This is typically the argument passed to the
+ This is typically the argument passed to the
``upgrade`` or ``downgrade`` command.
- If it was specified as ``head``, the actual
+ If it was specified as ``head``, the actual
version number is returned; if specified
as ``base``, ``None`` is returned.
- This function does not require that the :class:`.MigrationContext`
+ This function does not require that the :class:`.MigrationContext`
has been configured.
"""
"""Return the value passed for the ``--tag`` argument, if any.
The ``--tag`` argument is not used directly by Alembic,
- but is available for custom ``env.py`` configurations that
+ but is available for custom ``env.py`` configurations that
wish to use it; particularly for offline generation scripts
that wish to generate tagged filenames.
- This function does not require that the :class:`.MigrationContext`
+ This function does not require that the :class:`.MigrationContext`
has been configured.
"""
**kw
):
"""Configure a :class:`.MigrationContext` within this
- :class:`.EnvironmentContext` which will provide database
- connectivity and other configuration to a series of
+ :class:`.EnvironmentContext` which will provide database
+ connectivity and other configuration to a series of
migration scripts.
-
- Many methods on :class:`.EnvironmentContext` require that
+
+ Many methods on :class:`.EnvironmentContext` require that
this method has been called in order to function, as they
ultimately need to have database access or at least access
to the dialect in use. Those which do are documented as such.
-
+
The important thing needed by :meth:`.configure` is a
- means to determine what kind of database dialect is in use.
+ means to determine what kind of database dialect is in use.
An actual connection to that database is needed only if
- the :class:`.MigrationContext` is to be used in
+ the :class:`.MigrationContext` is to be used in
"online" mode.
If the :meth:`.is_offline_mode` function returns ``True``,
then no connection is needed here. Otherwise, the
- ``connection`` parameter should be present as an
+ ``connection`` parameter should be present as an
instance of :class:`sqlalchemy.engine.base.Connection`.
This function is typically called from the ``env.py``
script within a migration environment. It can be called
- multiple times for an invocation. The most recent
+ multiple times for an invocation. The most recent
:class:`~sqlalchemy.engine.base.Connection`
for which it was called is the one that will be operated upon
by the next call to :meth:`.run_migrations`.
General parameters:
-
- :param connection: a :class:`~sqlalchemy.engine.base.Connection`
+
+ :param connection: a :class:`~sqlalchemy.engine.base.Connection`
to use
- for SQL execution in "online" mode. When present, is also
+ for SQL execution in "online" mode. When present, is also
used to determine the type of dialect in use.
- :param url: a string database url, or a
+ :param url: a string database url, or a
:class:`sqlalchemy.engine.url.URL` object.
- The type of dialect to be used will be derived from this if
+ The type of dialect to be used will be derived from this if
``connection`` is not passed.
- :param dialect_name: string name of a dialect, such as
+ :param dialect_name: string name of a dialect, such as
"postgresql", "mssql", etc.
- The type of dialect to be used will be derived from this if
+ The type of dialect to be used will be derived from this if
``connection`` and ``url`` are not passed.
- :param transactional_ddl: Force the usage of "transactional"
+ :param transactional_ddl: Force the usage of "transactional"
DDL on or off;
- this otherwise defaults to whether or not the dialect in
+ this otherwise defaults to whether or not the dialect in
use supports it.
- :param output_buffer: a file-like object that will be used
+ :param output_buffer: a file-like object that will be used
for textual output
- when the ``--sql`` option is used to generate SQL scripts.
+ when the ``--sql`` option is used to generate SQL scripts.
Defaults to
- ``sys.stdout`` if not passed here and also not present on
+ ``sys.stdout`` if not passed here and also not present on
the :class:`.Config`
- object. The value here overrides that of the :class:`.Config`
+ object. The value here overrides that of the :class:`.Config`
object.
- :param starting_rev: Override the "starting revision" argument
+ :param starting_rev: Override the "starting revision" argument
when using ``--sql`` mode.
- :param tag: a string tag for usage by custom ``env.py`` scripts.
+ :param tag: a string tag for usage by custom ``env.py`` scripts.
Set via the ``--tag`` option, can be overridden here.
:param template_args: dictionary of template arguments which
will be added to the template argument environment when
:param version_table: The name of the Alembic version table.
The default is ``'alembic_version'``.
- Parameters specific to the autogenerate feature, when
+ Parameters specific to the autogenerate feature, when
``alembic revision`` is run with the ``--autogenerate`` feature:
-
- :param target_metadata: a :class:`sqlalchemy.schema.MetaData`
+
+ :param target_metadata: a :class:`sqlalchemy.schema.MetaData`
object that
- will be consulted during autogeneration. The tables present
+ will be consulted during autogeneration. The tables present
will be compared against
- what is locally available on the target
+ what is locally available on the target
:class:`~sqlalchemy.engine.base.Connection`
to produce candidate upgrade/downgrade operations.
-
- :param compare_type: Indicates type comparison behavior during
+
+ :param compare_type: Indicates type comparison behavior during
an autogenerate
- operation. Defaults to ``False`` which disables type
- comparison. Set to
- ``True`` to turn on default type comparison, which has varied
+ operation. Defaults to ``False`` which disables type
+ comparison. Set to
+ ``True`` to turn on default type comparison, which has varied
accuracy depending on backend.
-
- To customize type comparison behavior, a callable may be
+
+ To customize type comparison behavior, a callable may be
specified which
- can filter type comparisons during an autogenerate operation.
+ can filter type comparisons during an autogenerate operation.
The format of this callable is::
-
- def my_compare_type(context, inspected_column,
+
+ def my_compare_type(context, inspected_column,
metadata_column, inspected_type, metadata_type):
# return True if the types are different,
# False if not, or None to allow the default implementation
# to compare these types
pass
-
+
``inspected_column`` is a dictionary structure as returned by
:meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas
``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
the local model environment.
-
- A return value of ``None`` indicates to allow default type
+
+ A return value of ``None`` indicates to allow default type
comparison to proceed.
- :param compare_server_default: Indicates server default comparison
- behavior during
- an autogenerate operation. Defaults to ``False`` which disables
- server default
- comparison. Set to ``True`` to turn on server default comparison,
- which has
+ :param compare_server_default: Indicates server default comparison
+ behavior during
+ an autogenerate operation. Defaults to ``False`` which disables
+ server default
+ comparison. Set to ``True`` to turn on server default comparison,
+ which has
varied accuracy depending on backend.
-
- To customize server default comparison behavior, a callable may
+
+ To customize server default comparison behavior, a callable may
be specified
- which can filter server default comparisons during an
+ which can filter server default comparisons during an
autogenerate operation.
- defaults during an autogenerate operation. The format of this
+ defaults during an autogenerate operation. The format of this
callable is::
-
- def my_compare_server_default(context, inspected_column,
+
+ def my_compare_server_default(context, inspected_column,
metadata_column, inspected_default, metadata_default,
rendered_metadata_default):
# return True if the defaults are different,
``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
the local model environment.
- A return value of ``None`` indicates to allow default server default
- comparison
- to proceed. Note that some backends such as Postgresql actually
+ A return value of ``None`` indicates to allow default server default
+ comparison
+ to proceed. Note that some backends such as Postgresql actually
execute
the two defaults on the database side to compare for equivalence.
- :param upgrade_token: When autogenerate completes, the text of the
- candidate upgrade operations will be present in this template
- variable when ``script.py.mako`` is rendered. Defaults to
+ :param upgrade_token: When autogenerate completes, the text of the
+ candidate upgrade operations will be present in this template
+ variable when ``script.py.mako`` is rendered. Defaults to
``upgrades``.
- :param downgrade_token: When autogenerate completes, the text of the
+ :param downgrade_token: When autogenerate completes, the text of the
candidate downgrade operations will be present in this
- template variable when ``script.py.mako`` is rendered. Defaults to
+ template variable when ``script.py.mako`` is rendered. Defaults to
``downgrades``.
- :param alembic_module_prefix: When autogenerate refers to Alembic
+ :param alembic_module_prefix: When autogenerate refers to Alembic
:mod:`alembic.operations` constructs, this prefix will be used
(i.e. ``op.create_table``) Defaults to "``op.``".
- Can be ``None`` to indicate no prefix.
-
- :param sqlalchemy_module_prefix: When autogenerate refers to
- SQLAlchemy
- :class:`~sqlalchemy.schema.Column` or type classes, this prefix
+ Can be ``None`` to indicate no prefix.
+
+ :param sqlalchemy_module_prefix: When autogenerate refers to
+ SQLAlchemy
+ :class:`~sqlalchemy.schema.Column` or type classes, this prefix
will be used
(i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``".
- Can be ``None`` to indicate no prefix.
+ Can be ``None`` to indicate no prefix.
Note that when dialect-specific types are rendered, autogenerate
- will render them using the dialect module name, i.e. ``mssql.BIT()``,
+ will render them using the dialect module name, i.e. ``mssql.BIT()``,
``postgresql.UUID()``.
-
+
Parameters specific to individual backends:
-
- :param mssql_batch_separator: The "batch separator" which will
- be placed between each statement when generating offline SQL Server
- migrations. Defaults to ``GO``. Note this is in addition to the
- customary semicolon ``;`` at the end of each statement; SQL Server
- considers the "batch separator" to denote the end of an
- individual statement execution, and cannot group certain
+
+ :param mssql_batch_separator: The "batch separator" which will
+ be placed between each statement when generating offline SQL Server
+ migrations. Defaults to ``GO``. Note this is in addition to the
+ customary semicolon ``;`` at the end of each statement; SQL Server
+ considers the "batch separator" to denote the end of an
+ individual statement execution, and cannot group certain
dependent operations in one step.
:param oracle_batch_separator: The "batch separator" which will
- be placed between each statement when generating offline
+ be placed between each statement when generating offline
Oracle migrations. Defaults to ``/``. Oracle doesn't add a
semicolon between statements like most other backends.
)
def run_migrations(self, **kw):
- """Run migrations as determined by the current command line
+ """Run migrations as determined by the current command line
configuration
- as well as versioning information present (or not) in the current
+ as well as versioning information present (or not) in the current
database connection (if one is present).
The function accepts optional ``**kw`` arguments. If these are
- passed, they are sent directly to the ``upgrade()`` and
+ passed, they are sent directly to the ``upgrade()`` and
``downgrade()``
functions within each target revision file. By modifying the
``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()``
database in use, can be passed from a custom ``env.py`` script
to the migration functions.
- This function requires that a :class:`.MigrationContext` has
+ This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
function's documentation for full detail including
caveats and limitations.
- This function requires that a :class:`.MigrationContext` has
+ This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
- self.get_context().execute(sql,
+ self.get_context().execute(sql,
execution_options=execution_options)
def static_output(self, text):
"""Emit text directly to the "offline" SQL stream.
-
- Typically this is for emitting comments that
+
+ Typically this is for emitting comments that
start with --. The statement is not treated
as a SQL execution, no ; or batch separator
is added, etc.
-
+
"""
self.get_context().impl.static_output(text)
def begin_transaction(self):
- """Return a context manager that will
+ """Return a context manager that will
enclose an operation within a "transaction",
as defined by the environment's offline
and transactional DDL settings.
e.g.::
-
+
with context.begin_transaction():
context.run_migrations()
-
+
:meth:`.begin_transaction` is intended to
- "do the right thing" regardless of
+ "do the right thing" regardless of
calling context:
-
+
* If :meth:`.is_transactional_ddl` is ``False``,
returns a "do nothing" context manager
which otherwise produces no transactional
transaction and is itself a context manager,
which will roll back if an exception
is raised.
-
- Note that a custom ``env.py`` script which
+
+ Note that a custom ``env.py`` script which
has more specific transactional needs can of course
manipulate the :class:`~sqlalchemy.engine.base.Connection`
directly to produce transactional state in "online"
def get_context(self):
"""Return the current :class:`.MigrationContext` object.
- If :meth:`.EnvironmentContext.configure` has not been
+ If :meth:`.EnvironmentContext.configure` has not been
called yet, raises an exception.
"""
def get_bind(self):
"""Return the current 'bind'.
- In "online" mode, this is the
+ In "online" mode, this is the
:class:`sqlalchemy.engine.base.Connection` currently being used
to emit SQL to the database.
- This function requires that a :class:`.MigrationContext`
+ This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
log = logging.getLogger(__name__)
class MigrationContext(object):
- """Represent the database state made available to a migration
+ """Represent the database state made available to a migration
script.
:class:`.MigrationContext` is the front end to an actual
database connection, or alternatively a string output
stream given a particular database dialect,
from an Alembic perspective.
-
- When inside the ``env.py`` script, the :class:`.MigrationContext`
- is available via the
+
+ When inside the ``env.py`` script, the :class:`.MigrationContext`
+ is available via the
:meth:`.EnvironmentContext.get_context` method,
which is available at ``alembic.context``::
-
+
# from within env.py script
from alembic import context
migration_context = context.get_context()
-
+
For usage outside of an ``env.py`` script, such as for
utility routines that want to check the current version
- in the database, the :meth:`.MigrationContext.configure`
+ in the database, the :meth:`.MigrationContext.configure`
method to create new :class:`.MigrationContext` objects.
- For example, to get at the current revision in the
+ For example, to get at the current revision in the
database using :meth:`.MigrationContext.get_current_revision`::
-
+
# in any application, outside of an env.py script
from alembic.migration import MigrationContext
from sqlalchemy import create_engine
-
+
engine = create_engine("postgresql://mydatabase")
conn = engine.connect()
-
+
context = MigrationContext.configure(conn)
current_rev = context.get_current_revision()
-
+
The above context can also be used to produce
Alembic migration operations with an :class:`.Operations`
instance::
self._user_compare_type = opts.get('compare_type', False)
self._user_compare_server_default = opts.get(
- 'compare_server_default',
+ 'compare_server_default',
False)
version_table = opts.get('version_table', 'alembic_version')
log.info("Context impl %s.", self.impl.__class__.__name__)
if self.as_sql:
log.info("Generating static SQL")
- log.info("Will assume %s DDL.",
- "transactional" if self.impl.transactional_ddl
+ log.info("Will assume %s DDL.",
+ "transactional" if self.impl.transactional_ddl
else "non-transactional")
@classmethod
opts={},
):
"""Create a new :class:`.MigrationContext`.
-
+
This is a factory method usually called
by :meth:`.EnvironmentContext.configure`.
-
- :param connection: a :class:`~sqlalchemy.engine.base.Connection`
- to use for SQL execution in "online" mode. When present,
+
+ :param connection: a :class:`~sqlalchemy.engine.base.Connection`
+ to use for SQL execution in "online" mode. When present,
is also used to determine the type of dialect in use.
- :param url: a string database url, or a
+ :param url: a string database url, or a
:class:`sqlalchemy.engine.url.URL` object.
- The type of dialect to be used will be derived from this if
+ The type of dialect to be used will be derived from this if
``connection`` is not passed.
- :param dialect_name: string name of a dialect, such as
- "postgresql", "mssql", etc. The type of dialect to be used will be
+ :param dialect_name: string name of a dialect, such as
+ "postgresql", "mssql", etc. The type of dialect to be used will be
derived from this if ``connection`` and ``url`` are not passed.
:param opts: dictionary of options. Most other options
- accepted by :meth:`.EnvironmentContext.configure` are passed via
+ accepted by :meth:`.EnvironmentContext.configure` are passed via
this dictionary.
"""
def get_current_revision(self):
"""Return the current revision, usually that which is present
in the ``alembic_version`` table in the database.
-
+
If this :class:`.MigrationContext` was configured in "offline"
- mode, that is with ``as_sql=True``, the ``starting_rev``
+ mode, that is with ``as_sql=True``, the ``starting_rev``
parameter is returned instead, if any.
-
+
"""
if self.as_sql:
return self._start_from_rev
)
def run_migrations(self, **kw):
- """Run the migration scripts established for this :class:`.MigrationContext`,
+ """Run the migration scripts established for this :class:`.MigrationContext`,
if any.
-
+
The commands in :mod:`alembic.command` will set up a function
that is ultimately passed to the :class:`.MigrationContext`
- as the ``fn`` argument. This function represents the "work"
+ as the ``fn`` argument. This function represents the "work"
that will be done when :meth:`.MigrationContext.run_migrations`
is called, typically from within the ``env.py`` script of the
migration environment. The "work function" then provides an iterable
- of version callables and other version information which
+ of version callables and other version information which
in the case of the ``upgrade`` or ``downgrade`` commands are the
list of version scripts to invoke. Other commands yield nothing,
in the case that a command wants to run some other operation
against the database such as the ``current`` or ``stamp`` commands.
-
- :param \**kw: keyword arguments here will be passed to each
+
+ :param \**kw: keyword arguments here will be passed to each
migration callable, that is the ``upgrade()`` or ``downgrade()``
method within revision scripts.
-
+
"""
current_rev = rev = False
self.impl.start_migrations()
def execute(self, sql):
"""Execute a SQL construct or string statement.
-
+
The underlying execution mechanics are used, that is
- if this is "offline mode" the SQL is written to the
+ if this is "offline mode" the SQL is written to the
output buffer, otherwise the SQL is emitted on
the current SQLAlchemy connection.
def dump(construct, *multiparams, **params):
self.impl._exec(construct)
- return create_engine("%s://" % self.dialect.name,
+ return create_engine("%s://" % self.dialect.name,
strategy="mock", executor=dump)
@property
In online mode, this is an instance of
:class:`sqlalchemy.engine.base.Connection`, and is suitable
- for ad-hoc execution of any kind of usage described
- in :ref:`sqlexpression_toplevel` as well as
+ for ad-hoc execution of any kind of usage described
+ in :ref:`sqlexpression_toplevel` as well as
for usage with the :meth:`sqlalchemy.schema.Table.create`
and :meth:`sqlalchemy.schema.MetaData.create_all` methods
of :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.MetaData`.
- Note that when "standard output" mode is enabled,
+ Note that when "standard output" mode is enabled,
this bind will be a "mock" connection handler that cannot
return results and is only appropriate for a very limited
subset of commands.
return user_value
return self.impl.compare_type(
- inspector_column,
+ inspector_column,
metadata_column)
- def _compare_server_default(self, inspector_column,
- metadata_column,
+ def _compare_server_default(self, inspector_column,
+ metadata_column,
rendered_metadata_default):
if self._user_compare_server_default is False:
return user_value
return self.impl.compare_server_default(
- inspector_column,
- metadata_column,
+ inspector_column,
+ metadata_column,
rendered_metadata_default)
from alembic.operations import Operations
from alembic import util
-# create proxy functions for
+# create proxy functions for
# each method on the Operations class.
util.create_module_class_proxy(Operations, globals(), locals())
# access to the values within the .ini file in use.
config = context.config
-# Interpret the config file for Python logging.
+# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
-
+
Calls to context.execute() here emit the given string to the
script output.
-
+
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
In this scenario we need to create an Engine
and associate a connection with the context.
-
+
"""
engine = engine_from_config(
- config.get_section(config.config_ini_section),
- prefix='sqlalchemy.',
+ config.get_section(config.config_ini_section),
+ prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
- connection=connection,
+ connection=connection,
target_metadata=target_metadata
)
import logging
logging.fileConfig(options.config_file)
-# gather section names referring to different
+# gather section names referring to different
# databases. These are named "engine1", "engine2"
# in the sample .ini file.
db_names = options.get_main_option('databases')
# add your model's MetaData objects here
-# for 'autogenerate' support. These must be set
-# up to hold just those tables targeting a
-# particular database. table.tometadata() may be
+# for 'autogenerate' support. These must be set
+# up to hold just those tables targeting a
+# particular database. table.tometadata() may be
# helpful here in case a "copy" of
# a MetaData is needed.
# from myapp import mymodel
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
-
+
Calls to context.execute() here emit the given string to the
script output.
-
+
"""
# for the --sql use case, run migrations for each URL into
# individual files.
engines = {}
for name in re.split(r',\s*', db_names):
engines[name] = rec = {}
- rec['url'] = context.config.get_section_option(name,
+ rec['url'] = context.config.get_section_option(name,
"sqlalchemy.url")
for name, rec in engines.items():
In this scenario we need to create an Engine
and associate a connection with the context.
-
+
"""
# for the direct-to-DB use case, start a transaction on all
"""Pylons bootstrap environment.
-Place 'pylons_config_file' into alembic.ini, and the application will
+Place 'pylons_config_file' into alembic.ini, and the application will
be loaded from there.
"""
# specify here how the engine is acquired
# engine = meta.engine
raise NotImplementedError("Please specify engine connectivity here")
-
+
if isinstance(engine, Engine):
connection = engine.connect()
else:
raise Exception(
'Expected engine instance got %s instead' % type(engine)
)
-
+
context.configure(
connection=connection,
target_metadata=target_metadata
def create_module_class_proxy(cls, globals_, locals_):
"""Create module level proxy functions for the
methods on a given class.
-
+
The functions will have a compatible signature
as the methods. A proxy is established
using the ``_install_proxy(obj)`` function,
defaulted_vals = ()
apply_kw = inspect.formatargspec(
- name_args, spec[1], spec[2],
+ name_args, spec[1], spec[2],
defaulted_vals,
formatvalue=lambda x: '=' + x)
def coerce_resource_to_filename(fname):
"""Interpret a filename as either a filesystem location or as a package resource.
-
+
Names that are non absolute paths and contain a colon
are interpreted as resources and coerced to a file location.
-
+
"""
if not os.path.isabs(fname) and ":" in fname:
import pkg_resources
def pyc_file_from_path(path):
"""Given a python source path, locate the .pyc.
-
+
See http://www.python.org/dev/peps/pep-3147/
#detecting-pep-3147-availability
http://www.python.org/dev/peps/pep-3147/#file-extension-checks
-
+
"""
import imp
has3147 = hasattr(imp, 'get_tag')
testing_config.read(['test.cfg'])
def sqlite_db():
- # sqlite caches table pragma info
+ # sqlite caches table pragma info
# per connection, so create a new
# engine for each assertion
dir_ = os.path.join(staging_directory, 'scripts')
self.assertion = []
self.dialect = dialect
self.as_sql = as_sql
- # TODO: this might need to
+ # TODO: this might need to
# be more like a real connection
# as tests get more involved
self.connection = None
self.as_sql = as_sql
def assert_(self, *sql):
- # TODO: make this more flexible about
+ # TODO: make this more flexible about
# whitespace and such
eq_(self.impl.assertion, list(sql))
def test_bulk_insert_wrong_cols():
context = op_fixture('postgresql')
- t1 = table("ins_table",
+ t1 = table("ins_table",
column('id', Integer),
column('v1', String()),
column('v2', String()),
def test_bulk_insert_as_sql():
context = _test_bulk_insert('default', True)
context.assert_(
- "INSERT INTO ins_table (id, v1, v2) VALUES (1, 'row v1', 'row v5')",
- "INSERT INTO ins_table (id, v1, v2) VALUES (2, 'row v2', 'row v6')",
+ "INSERT INTO ins_table (id, v1, v2) VALUES (1, 'row v1', 'row v5')",
+ "INSERT INTO ins_table (id, v1, v2) VALUES (2, 'row v2', 'row v6')",
"INSERT INTO ins_table (id, v1, v2) VALUES (3, 'row v3', 'row v7')",
"INSERT INTO ins_table (id, v1, v2) VALUES (4, 'row v4', 'row v8')"
)
def test_bulk_insert_as_sql_pg():
context = _test_bulk_insert('postgresql', True)
context.assert_(
- "INSERT INTO ins_table (id, v1, v2) VALUES (1, 'row v1', 'row v5')",
- "INSERT INTO ins_table (id, v1, v2) VALUES (2, 'row v2', 'row v6')",
+ "INSERT INTO ins_table (id, v1, v2) VALUES (1, 'row v1', 'row v5')",
+ "INSERT INTO ins_table (id, v1, v2) VALUES (2, 'row v2', 'row v6')",
"INSERT INTO ins_table (id, v1, v2) VALUES (3, 'row v3', 'row v7')",
"INSERT INTO ins_table (id, v1, v2) VALUES (4, 'row v4', 'row v8')"
)
def test_bulk_insert_as_sql_mssql():
context = _test_bulk_insert('mssql', True)
# SQL server requires IDENTITY_INSERT
- # TODO: figure out if this is safe to enable for a table that
+ # TODO: figure out if this is safe to enable for a table that
# doesn't have an IDENTITY column
context.assert_(
- 'SET IDENTITY_INSERT ins_table ON',
- "INSERT INTO ins_table (id, v1, v2) VALUES (1, 'row v1', 'row v5')",
- "INSERT INTO ins_table (id, v1, v2) VALUES (2, 'row v2', 'row v6')",
- "INSERT INTO ins_table (id, v1, v2) VALUES (3, 'row v3', 'row v7')",
- "INSERT INTO ins_table (id, v1, v2) VALUES (4, 'row v4', 'row v8')",
+ 'SET IDENTITY_INSERT ins_table ON',
+ "INSERT INTO ins_table (id, v1, v2) VALUES (1, 'row v1', 'row v5')",
+ "INSERT INTO ins_table (id, v1, v2) VALUES (2, 'row v2', 'row v6')",
+ "INSERT INTO ins_table (id, v1, v2) VALUES (3, 'row v3', 'row v7')",
+ "INSERT INTO ins_table (id, v1, v2) VALUES (4, 'row v4', 'row v8')",
'SET IDENTITY_INSERT ins_table OFF'
)
self.conn.close()
def test_single_insert_round_trip(self):
- self.op.bulk_insert(self.t1,
+ self.op.bulk_insert(self.t1,
[{'data':"d1", "x":"x1"}]
)
from sqlalchemy import Column
def upgrade():
- op.create_table("sometable",
+ op.create_table("sometable",
Column("data", ENUM("one", "two", "three", name="pgenum"))
)
def upgrade():
enum = ENUM("one", "two", "three", name="pgenum", create_type=False)
enum.create(op.get_bind(), checkfirst=False)
- op.create_table("sometable",
+ op.create_table("sometable",
Column("data", enum)
)
)
""")
cls.bind.execute("""
- insert into tab (col) values
+ insert into tab (col) values
('old data 1'),
('old data 2.1'),
('old data 3')
ctx = self.autogen_context['context']
return ctx.impl.compare_server_default(
cols[0],
- col,
+ col,
rendered)
def test_compare_current_timestamp(self):