From 2c69cdb3502017d4b3a98d3c32466a95063939f3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 22 Nov 2008 19:22:42 +0000 Subject: [PATCH] - Tickets [ticket:1200]. - Added note about create_session() defaults. - Added section about metadata.reflect(). - Updated `TypeDecorator` section. - Rewrote the "threadlocal" strategy section of the docs due to recent confusion over this feature. - ordered the init arguments in the docs for sessionmaker(). - other edits --- CHANGES | 13 +++ doc/build/content/dbengine.txt | 146 +++++++++++---------------------- doc/build/content/metadata.txt | 18 ++++ doc/build/content/session.txt | 2 + doc/build/content/types.txt | 32 ++++---- lib/sqlalchemy/orm/session.py | 32 ++++---- lib/sqlalchemy/schema.py | 21 +++-- 7 files changed, 132 insertions(+), 132 deletions(-) diff --git a/CHANGES b/CHANGES index c2b437d5d1..b3a05ebe9d 100644 --- a/CHANGES +++ b/CHANGES @@ -30,6 +30,19 @@ CHANGES - the "passive" flag on session.is_modified() is correctly propagated to the attribute manager. + +- documentation + - Tickets [ticket:1200]. + + - Added note about create_session() defaults. + + - Added section about metadata.reflect(). + + - Updated `TypeDecorator` section. + + - Rewrote the "threadlocal" strategy section of + the docs due to recent confusion over this + feature. - postgres - Calling alias.execute() in conjunction with diff --git a/doc/build/content/dbengine.txt b/doc/build/content/dbengine.txt index 9544d06893..dd18aee1a2 100644 --- a/doc/build/content/dbengine.txt +++ b/doc/build/content/dbengine.txt @@ -147,10 +147,7 @@ A list of all standard options, as well as several that are used by particular d * **pool_size=5** - the number of connections to keep open inside the connection pool. This used with `QueuePool` as well as `SingletonThreadPool`. * **pool_recycle=-1** - this setting causes the pool to recycle connections after the given number of seconds has passed. It defaults to -1, or no timeout. For example, setting to 3600 means connections will be recycled after one hour. Note that MySQL in particular will **disconnect automatically** if no activity is detected on a connection for eight hours (although this is configurable with the MySQLDB connection itself and the server configuration as well). * **pool_timeout=30** - number of seconds to wait before giving up on getting a connection from the pool. This is only used with `QueuePool`. -* **strategy='plain'** - the Strategy argument is used to select alternate implementations of the underlying Engine object, which coordinates operations between dialects, compilers, connections, and so on. Currently, the only alternate strategy besides the default value of "plain" is the "threadlocal" strategy, which selects the usage of the `TLEngine` class that provides a modified connection scope for connectionless executions. Connectionless execution as well as further detail on this setting are described in [dbengine_implicit](rel:dbengine_implicit). -* **threaded=True** - used by cx_Oracle; sets the `threaded` parameter of the connection indicating thread-safe usage. cx_Oracle docs indicate setting this flag to `False` will speed performance by 10-15%. While this defaults to `False` in cx_Oracle, SQLAlchemy defaults it to `True`, preferring stability over early optimization. -* **use_ansi=True** - used only by Oracle; when False, the Oracle driver attempts to support a particular "quirk" of Oracle versions 8 and previous, that the LEFT OUTER JOIN SQL syntax is not supported, and the "Oracle join" syntax of using `column1(+)=column2` must be used in order to achieve a LEFT OUTER JOIN. -* **use_oids=False** - used only by Postgres, will enable the column name "oid" as the object ID column, which is also used for the default sort order of tables. Postgres as of 8.1 has object IDs disabled by default. +* **strategy='plain'** - used to invoke alternate `Engine` implementations. Currently available is the `threadlocal` strategy, which is described in [dbengine_implicit_strategies](rel:dbengine_implicit_strategies). ### More On Connections {@name=connections} @@ -294,45 +291,28 @@ In both "connectionless" examples, the `Connection` is created behind the scenes #### Using the Threadlocal Execution Strategy {@name=strategies} -With connectionless execution, each returned `ResultProxy` object references its own distinct DBAPI connection object. This means that multiple executions will result in multiple DBAPI connections being used at the same time; the example below illustrates this: +The "threadlocal" engine strategy is used by non-ORM applications which wish to bind a transaction to the current thread, such that all parts of the application can participate in that transaction implicitly without the need to explicitly reference a `Connection`. "threadlocal" is designed for a very specific pattern of use, and is not appropriate unless this very specfic pattern, described below, is what's desired. It has **no impact** on the "thread safety" of SQLAlchemy components or one's application. It also should not be used when using an ORM `Session` object, as the `Session` itself represents an ongoing transaction and itself handles the job of maintaining connection and transactional resources. - {python} - db = create_engine('mysql://localhost/test') - - # execute one statement and receive results. r1 now references a DBAPI connection resource. - r1 = db.execute("select * from table1") - - # execute a second statement and receive results. r2 now references a *second* DBAPI connection resource. - r2 = db.execute("select * from table2") - for row in r1: - ... - for row in r2: - ... - # release connection 1 - r1.close() - - # release connection 2 - r2.close() - -Where above, we have two result sets in scope at the same time, therefore we have two distinct DBAPI connections, both separately checked out from the connection pool, in scope at the same time. - -An option exists to `create_engine()` called `strategy="threadlocal"`, which changes this behavior. When this option is used, the `Engine` which is returned by `create_engine()` is a special subclass of engine called `TLEngine`. This engine, when it creates the `Connection` used by a connectionless execution, checks a **threadlocal variable** for an existing DBAPI connection that was already checked out from the pool, within the current thread. If one exists, it uses that one. +Enabling `threadlocal` is achieved as follows: -The usage of "threadlocal" modifies the underlying behavior of our example above, as follows: - - {python title="Threadlocal Strategy"} + {python} db = create_engine('mysql://localhost/test', strategy='threadlocal') + +When the engine above is used in a "connectionless" style, meaning `engine.execute()` is called, a DBAPI connection is retrieved from the connection pool and then associated with the current thread. Subsequent operations on the `Engine` while the DBAPI connection remains checked out will make use of the *same* DBAPI connection object. The connection stays allocated until all returned `ResultProxy` objects are closed, which occurs for a particular `ResultProxy` after all pending results are fetched, or immediately for an operation which returns no rows (such as an INSERT). + {python} # execute one statement and receive results. r1 now references a DBAPI connection resource. r1 = db.execute("select * from table1") # execute a second statement and receive results. r2 now references the *same* resource as r1 r2 = db.execute("select * from table2") - for row in r1: - ... - for row in r2: - ... + # fetch a row on r1 (assume more results are pending) + row1 = r1.fetchone() + + # fetch a row on r2 (same) + row2 = r2.fetchone() + # close r1. the connection is still held by r2. r1.close() @@ -340,85 +320,59 @@ The usage of "threadlocal" modifies the underlying behavior of our example above # are returned to the pool. r2.close() -Where above, we again have two result sets in scope at the same time, but because they are present in the same thread, there is only **one DBAPI connection in use**. +The above example does not illustrate any pattern that is particularly useful, as it is not a frequent occurence that two execute/result fetching operations "leapfrog" one another. There is a slight savings of connection pool checkout overhead between the two operations, and an implicit sharing of the same transactional context, but since there is no explicitly declared transaction, this association is short lived. -While the above distinction may not seem like much, it has several potentially desirable effects. One is that you can in some cases reduce the number of concurrent connections checked out from the connection pool, in the case that a `ResultProxy` is still opened and a second statement is issued. A second advantage is that by limiting the number of checked out connections in a thread to just one, you eliminate the issue of deadlocks within a single thread, such as when connection A locks a table, and connection B attempts to read from the same table in the same thread, it will "deadlock" on waiting for connection A to release its lock; the `threadlocal` strategy eliminates this possibility. +The real usage of "threadlocal" comes when we want several operations to occur within the scope of a shared transaction. The `Engine` now has `begin()`, `commit()` and `rollback()` methods which will retrieve a connection resource from the pool and establish a new transaction, maintaining the connection against the current thread until the transaction is committed or rolled back: -A third advantage to the `threadlocal` strategy is that it allows the `Transaction` object to be used in combination with connectionless execution. Recall from the section on transactions, that the `Transaction` is returned by the `begin()` method on a `Connection`; all statements which wish to participate in this transaction must be executed by the same `Connection`, thereby forcing the usage of an explicit connection. However, the `TLEngine` provides a `Transaction` that is local to the current thread; using it, one can issue many "connectionless" statements within a thread and they will all automatically partake in the current transaction, as in the example below: - - {python title="threadlocal connection sharing"} - # get a TLEngine - engine = create_engine('mysql://localhost/test', strategy='threadlocal') - - engine.begin() + {python} + db.begin() try: - engine.execute("insert into users values (?, ?)", 1, "john") - users.update(users.c.user_id==5).execute(name='ed') - engine.commit() + call_operation1() + call_operation2() + db.commit() except: - engine.rollback() - -Notice that no `Connection` needed to be used; the `begin()` method on `TLEngine` (which note is not available on the regular `Engine`) created a `Transaction` as well as a `Connection`, and held onto both in a context corresponding to the current thread. Each `execute()` call made use of the same connection, allowing them all to participate in the same transaction. - -Complex application flows can take advantage of the "threadlocal" strategy in order to allow many disparate parts of an application to take place in the same transaction automatically. The example below demonstrates several forms of "connectionless execution" as well as some specialized explicit ones: - - {python title="threadlocal connection sharing"} - engine = create_engine('mysql://localhost/test', strategy='threadlocal') + db.rollback() + +`call_operation1()` and `call_operation2()` can make use of the `Engine` as a global variable, using the "connectionless" execution style, and their operations will participate in the same transaction: - def dosomethingimplicit(): - table1.execute("some sql") - table1.execute("some other sql") + {python} + def call_operation1(): + engine.execute("insert into users values (?, ?)", 1, "john") + + def call_operation2(): + users.update(users.c.user_id==5).execute(name='ed') - def dosomethingelse(): - table2.execute("some sql") - conn = engine.contextual_connect() - # do stuff with conn - conn.execute("some other sql") - conn.close() +When using threadlocal, operations that do call upon the `engine.connect()` method will receive a `Connection` that is **outside** the scope of the transaction. This can be used for operations such as logging the status of an operation regardless of transaction success: - def dosomethingtransactional(): - conn = engine.contextual_connect() - trans = conn.begin() - # do stuff - trans.commit() - - engine.begin() + {python} + db.begin() + conn = db.connect() try: - dosomethingimplicit() - dosomethingelse() - dosomethingtransactional() - engine.commit() + conn.execute(log_table.insert(), message="Operation started") + call_operation1() + call_operation2() + db.commit() + conn.execute(log_table.insert(), message="Operation succeeded") except: - engine.rollback() - -In the above example, the program calls three functions `dosomethingimplicit()`, `dosomethingelse()` and `dosomethingtransactional()`. All three functions use either connectionless execution, or a special function `contextual_connect()` which we will describe in a moment. These two styles of execution both indicate that all executions will use the same connection object. Additionally, the method `dosomethingtransactional()` begins and commits its own `Transaction`. But only one transaction is used, too; it's controlled completely by the `engine.begin()`/`engine.commit()` calls at the bottom. Recall that `Transaction` supports "nesting" behavior, whereby transactions begun on a `Connection` which already has a transaction open, will "nest" into the enclosing transaction. Since the transaction opened in `dosomethingtransactional()` occurs using the same connection which already has a transaction begun, it "nests" into that transaction and therefore has no effect on the actual transaction scope (unless it calls `rollback()`). - -Some of the functions in the above example make use of a method called `engine.contextual_connect()`. This method is available on both `Engine` as well as `TLEngine`, and returns the `Connection` that applies to the current **connection context**. When using the `TLEngine`, this is just another term for the "thread local connection" that is being used for all connectionless executions. When using just the regular `Engine` (i.e. the "default" strategy), `contextual_connect()` is synonymous with `connect()`. Below we illustrate that two connections opened via `contextual_connect()` at the same time, both reference the same underlying DBAPI connection: - - {python title="Contextual Connection"} - # threadlocal strategy - db = create_engine('mysql://localhost/test', strategy='threadlocal') - - conn1 = db.contextual_connect() - conn2 = db.contextual_connect() - - >>> conn1.connection is conn2.connection - True + db.rollback() + conn.execute(log_table.insert(), message="Operation failed") + finally: + conn.close() -The basic idea of `contextual_connect()` is that it's the "connection used by connectionless execution". It's different from the `connect()` method in that `connect()` is always used when handling an explicit `Connection`, which will always reference distinct DBAPI connection. Using `connect()` in combination with `TLEngine` allows one to "circumvent" the current thread local context, as in this example where a single statement issues data to the database externally to the current transaction: +Functions which are written to use an explicit `Connection` object, but wish to participate in the threadlocal transaction, can receive their `Connection` object from the `contextual_connect()` method, which returns a `Connection` that is **inside** the scope of the transaction: {python} - engine.begin() - engine.execute("insert into users values (?, ?)", 1, "john") - connection = engine.connect() - connection.execute(users.update(users.c.user_id==5).execute(name='ed')) - engine.rollback() + conn = db.contextual_connect() + call_operation3(conn) + conn.close() + +Calling `close()` on the "contextual" connection does not release the connection resources to the pool if other resources are making use of it. A resource-counting mechanism is employed so that the connection is released back to the pool only when all users of that connection, including the transaction established by `engine.begin()`, have been completed. -In the above example, a thread-local transaction is begun, but is later rolled back. The statement `insert into users values (?, ?)` is executed without using a connection, therefore uses the thread-local transaction. So its data is rolled back when the transaction is rolled back. However, the `users.update()` statement is executed using a distinct `Connection` returned by the `engine.connect()` method, so it therefore is not part of the threadlocal transaction; it autocommits immediately. +So remember - if you're not sure if you need to use `strategy="threadlocal"` or not, the answer is **no** ! It's driven by a specific programming pattern that is generally not the norm. ### Configuring Logging {@name=logging} -As of the 0.3 series of SQLAlchemy, Python's standard [logging](http://www.python.org/doc/lib/module-logging.html) module is used to implement informational and debug log output. This allows SQLAlchemy's logging to integrate in a standard way with other applications and libraries. The `echo` and `echo_pool` flags that are present on `create_engine()`, as well as the `echo_uow` flag used on `Session`, all interact with regular loggers. +Python's standard [logging](http://www.python.org/doc/lib/module-logging.html) module is used to implement informational and debug log output with SQLAlchemy. This allows SQLAlchemy's logging to integrate in a standard way with other applications and libraries. The `echo` and `echo_pool` flags that are present on `create_engine()`, as well as the `echo_uow` flag used on `Session`, all interact with regular loggers. This section assumes familiarity with the above linked logging module. All logging performed by SQLAlchemy exists underneath the `sqlalchemy` namespace, as used by `logging.getLogger('sqlalchemy')`. When logging has been configured (i.e. such as via `logging.basicConfig()`), the general namespace of SA loggers that can be turned on is as follows: diff --git a/doc/build/content/metadata.txt b/doc/build/content/metadata.txt index 8a8b4d5f89..bc92baf75e 100644 --- a/doc/build/content/metadata.txt +++ b/doc/build/content/metadata.txt @@ -186,7 +186,25 @@ Individual columns can be overridden with explicit values when reflecting tables ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode ... autoload=True) + +##### Reflecting All Tables at Once {@name=reflectall} + +The `MetaData` object can also get a listing of tables and reflect the full set. This is achieved by using the `reflect()` method. After calling it, all located tables are present within the `MetaData`s dictionary of tables: + + {python} + meta = MetaData() + meta.reflect(bind=someengine) + users_table = meta.tables['users'] + addresses_table = meta.tables['addresses'] +`metadata.reflect()` is also a handy way to clear or drop all tables in a database: + + {python} + meta = MetaData() + meta.reflect(bind=someengine) + for table in reversed(meta.sorted_tables): + someengine.execute(table.delete()) + #### Specifying the Schema Name {@name=schema} Some databases support the concept of multiple schemas. A `Table` can reference this by specifying the `schema` keyword argument: diff --git a/doc/build/content/session.txt b/doc/build/content/session.txt index 0e077c2cae..41b167e78e 100644 --- a/doc/build/content/session.txt +++ b/doc/build/content/session.txt @@ -86,6 +86,8 @@ As an alternative to `sessionmaker()`, `create_session()` is a function which ca {python} session = create_session(bind=myengine, autocommit=True, autoflush=False) +Note that `create_session()` disables all optional "automation" by default. Called with no arguments, the session produced is not autoflushing, does not auto-expire, and does not maintain a transaction (i.e. it begins and commits a new transaction for each `flush()`). SQLAlchemy uses `create_session()` extensively within its own unit tests. + ### Configurational Arguments {@name=configuration} Configurational arguments accepted by `sessionmaker()` and `create_session()` are the same as that of the `Session` class itself, and are described at [docstrings_sqlalchemy.orm_modfunc_sessionmaker](rel:docstrings_sqlalchemy.orm_modfunc_sessionmaker). diff --git a/doc/build/content/types.txt b/doc/build/content/types.txt index 0329a46150..cee82a62cb 100644 --- a/doc/build/content/types.txt +++ b/doc/build/content/types.txt @@ -19,29 +19,31 @@ Following is a rundown of the standard types. #### String -This type is the base type for all string and character types, such as `Unicode`, `TEXT`, `CLOB`, etc. By default it generates a VARCHAR in DDL. It includes an argument `length`, which indicates the length in characters of the type, as well as `convert_unicode` and `assert_unicode`, which are booleans. `length` will be used as the length argument when generating DDL. If `length` is omitted, the `String` type resolves into the `TEXT` type. +This type is the base class for all string and character types. `String` includes a `length` parameter, which will be used as the "length" when generating DDL for types such as `CHAR` and `VARCHAR`. The base `String` type will usually generate the DDL of `VARCHAR`. `length` has no other usage and can be omitted if DDL is not being generated. -`convert_unicode=True` indicates that incoming strings, if they are Python `unicode` strings, will be encoded into a raw bytestring using the `encoding` attribute of the dialect (defaults to `utf-8`). Similarly, raw bytestrings coming back from the database will be decoded into `unicode` objects on the way back. +#### Unicode -`assert_unicode` is set to `None` by default. When `True`, it indicates that incoming bind parameters will be checked that they are in fact `unicode` objects, else an error is raised. A value of `'warn'` instead raises a warning. Setting it to `None` indicates that the dialect-level `convert_unicode` setting should take place, whereas setting it to `False` disables it unconditionally (this flag is new as of version 0.4.2). +The `Unicode` type is a `String` which converts Python unicode objects (i.e., strings that are defined as `u'somevalue'`) into encoded bytestrings when passing the value to the database, and similarly decodes values from the database back into Python unicode objects. The encoding used is configured on the dialect using the `encoding` parameter, which defaults to utf-8. -Both `convert_unicode` and `assert_unicode` may be set at the engine level as flags to `create_engine()`. +When using the `Unicode` type, it is only appropriate to pass Python unicode objects, and not plain strings. If a bytestring is passed, a warning is issued. If you notice your application raising these warnings but you're not sure where, the Python `warnings` filter can be used to turn these warnings into exceptions which will illustrate a stack trace: -#### Unicode + {python} + import warnings + warnings.simplefilter('error') -The `Unicode` type is shorthand for `String` with `convert_unicode=True` and `assert_unicode='warn'`. When writing a Unicode-aware application, it is strongly recommended that this type is used, and that only Unicode strings are used in the application. By "Unicode string" we mean a string with a u, i.e. `u'hello'`. Otherwise, particularly when using the ORM, data will be converted to Unicode when it returns from the database, but local data which was generated locally will not be in Unicode format, which can create confusion. +Any `String` type or subtype can be turned into a `Unicode` type by passing the flags `convert_unicode=True, assert_unicode='warn'` to the constructor. The `Unicode` type itself is shorthand for this notation. The `create_engine()` call also accepts these flags which when passed will establish their settings as the default setting for all `String` types. #### Text / UnicodeText -These are the "unbounded" versions of ``String`` and ``Unicode``. They have no "length" parameter, and generate a column type of TEXT or CLOB. +The `Text` and `UnicodeText` types are the same as `String` and `Unicode` except they do not take a length parameter. They differ only in that they generate DDL of `TEXT` or `CLOB` instead of `VARCHAR`. #### Numeric -Numeric types return `decimal.Decimal` objects by default. The flag `asdecimal=False` may be specified which enables the type to pass data straight through. Numeric also takes "precision" and "scale" arguments which are used when CREATE TABLE is issued. +Numeric types return `decimal.Decimal` objects by default. The flag `asdecimal=False` may be specified which causes data to be passed straight from the DBAPI's preferred return type, which may be either `Decimal` or `float`. Numeric also takes "precision" and "scale" arguments which are used when generating DDL. #### Float -Float types return Python floats. Float also takes a "precision" argument which is used when CREATE TABLE is issued. +Float types return Python floats. Float also takes a "precision" argument which is used when generating DDL. #### DateTime/Date/Time @@ -57,7 +59,7 @@ The Binary type generates BLOB or BYTEA when tables are created, and also conver #### Boolean -Boolean typically uses BOOLEAN or SMALLINT on the CREATE TABLE side, and returns Python `True` or `False`. +Boolean typically uses BOOLEAN or SMALLINT on the DDL side, and on the Python side deals in `True` or `False`. #### PickleType @@ -81,7 +83,7 @@ These are subclasses of the generic types and include: class BLOB(Binary) class BOOLEAN(Boolean) -The idea behind the SQL-specific types is that a CREATE TABLE statement would generate the exact type specified. +The idea behind the SQL-specific types is that DDL (i.e. during a CREATE TABLE statement) would generate the exact type specified in all cases. This also implies that some of these types may not be supported by all dialects. ### Dialect Specific Types {@name=dialect} @@ -102,13 +104,13 @@ Or some PostgreSQL types: table = Table('foo', meta, Column('ipaddress', PGInet), - Column('elements', PGArray(str)) # PGArray is available in 0.4, and takes a type argument + Column('elements', PGArray(str)) ) ### Creating your Own Types {@name=custom} -User-defined types can be created which can augment the bind parameter and result processing capabilities of the built in types. This is usually achieved using the `TypeDecorator` class, which "decorates" the behavior of any existing type. As of version 0.4.2, the new `process_bind_param()` and `process_result_value()` methods should be used: +User-defined types can be created which can augment the bind parameter and result processing capabilities of the built in types. This is usually achieved using the `TypeDecorator` class, which "decorates" the behavior of any existing type. {python} import sqlalchemy.types as types @@ -128,7 +130,7 @@ User-defined types can be created which can augment the bind parameter and resul def copy(self): return MyType(self.impl.length) -`TypeDecorator` should generally be used for any user-defined type which redefines the behavior of another type, including other `TypeDecorator` subclasses such as `PickleType`, and the new `process_...()` methods described above should be used. +The reason that type behavior is modified using class decoration instead of subclassing is due to the way dialect specific types are used. Such as with the example above, when using the mysql dialect, the actual type in use will be a `sqlalchemy.databases.mysql.MSString` instance. `TypeDecorator` handles the mechanics of passing the values between user-defined `process_` methods and the current dialect-specific type in use. To build a type object from scratch, which will not have a corresponding database-specific implementation, subclass `TypeEngine`: @@ -152,7 +154,7 @@ To build a type object from scratch, which will not have a corresponding databas return value return process -The `bind_processor` and `result_processor` methods return a callable which will be used to process data at the bind parameter and result row level. If either processing is not necessary, the method can return `None` (this reduces the workload as well). +The `bind_processor` and `result_processor` methods return a callable which will be used to process data at the bind parameter and result row level. If processing is not necessary, the method should return `None`. Once you make your type, it's immediately useable: diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 15e751cca9..a0f2f1f521 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -83,18 +83,6 @@ def sessionmaker(bind=None, class_=None, autoflush=True, autocommit=False, by any of these methods, the ``Session`` is ready for the next usage, which will again acquire and maintain a new connection/transaction. - expire_on_commit - Defaults to ``True``. When ``True``, all instances will be fully expired after - each ``commit()``, so that all attribute/object access subsequent to a completed - transaction will load from the most recent database state. - - _enable_transaction_accounting - Defaults to ``True``. A legacy-only flag which when ``False`` - disables *all* 0.5-style object accounting on transaction boundaries, - including auto-expiry of instances on rollback and commit, maintenance of - the "new" and "deleted" lists upon rollback, and autoflush - of pending changes upon begin(), all of which are interdependent. - autoflush When ``True``, all query operations will issue a ``flush()`` call to this ``Session`` before proceeding. This is a convenience feature so @@ -138,6 +126,18 @@ def sessionmaker(bind=None, class_=None, autoflush=True, autocommit=False, Deprecated. Use ``logging.getLogger('sqlalchemy.orm.unitofwork').setLevel(logging.DEBUG)``. + _enable_transaction_accounting + Defaults to ``True``. A legacy-only flag which when ``False`` + disables *all* 0.5-style object accounting on transaction boundaries, + including auto-expiry of instances on rollback and commit, maintenance of + the "new" and "deleted" lists upon rollback, and autoflush + of pending changes upon begin(), all of which are interdependent. + + expire_on_commit + Defaults to ``True``. When ``True``, all instances will be fully expired after + each ``commit()``, so that all attribute/object access subsequent to a completed + transaction will load from the most recent database state. + extension An optional [sqlalchemy.orm.session#SessionExtension] instance, or a list of such instances, which @@ -145,6 +145,10 @@ def sessionmaker(bind=None, class_=None, autoflush=True, autocommit=False, post-rollback event. User- defined code may be placed within these hooks using a user-defined subclass of ``SessionExtension``. + query_cls + Class which should be used to create new Query objects, as returned + by the ``query()`` method. Defaults to [sqlalchemy.orm.query#Query]. + twophase When ``True``, all transactions will be started using [sqlalchemy.engine_TwoPhaseTransaction]. During a ``commit()``, after @@ -153,10 +157,6 @@ def sessionmaker(bind=None, class_=None, autoflush=True, autocommit=False, called. This allows each database to roll back the entire transaction, before each transaction is committed. - query_cls - Class which should be used to create new Query objects, as returned - by the ``query()`` method. Defaults to [sqlalchemy.orm.query#Query]. - weak_identity_map When set to the default value of ``False``, a weak-referencing map is used; instances which are not externally referenced will be garbage diff --git a/lib/sqlalchemy/schema.py b/lib/sqlalchemy/schema.py index 74f3e6093f..aad1199060 100644 --- a/lib/sqlalchemy/schema.py +++ b/lib/sqlalchemy/schema.py @@ -1417,6 +1417,9 @@ class MetaData(SchemaItem): ``Connection``. If bound, the [sqlalchemy.schema#Table] objects in the collection and their columns may participate in implicit SQL execution. + The `Table` objects themselves are stored in the `metadata.tables` + dictionary. + The ``bind`` property may be assigned to dynamically. A common pattern is to start unbound and then bind later when an engine is available:: @@ -1448,8 +1451,8 @@ class MetaData(SchemaItem): Defaults to False. ``bind`` is required when this option is set. For finer control over loaded tables, use the ``reflect`` method of ``MetaData``. - """ + """ self.tables = {} self.bind = bind self.metadata = self @@ -1488,8 +1491,8 @@ class MetaData(SchemaItem): string or ``URL``, will be passed to ``create_engine()`` along with ``\**kwargs`` to produce the engine which to connect to. Otherwise connects directly to the given ``Engine``. + """ - global URL if URL is None: from sqlalchemy.engine.url import URL @@ -1505,6 +1508,7 @@ class MetaData(SchemaItem): This property may be assigned an ``Engine`` or ``Connection``, or assigned a string or URL to automatically create a basic ``Engine`` for this bind with ``create_engine()``. + """ return self._bind @@ -1523,14 +1527,21 @@ class MetaData(SchemaItem): bind = property(bind, _bind_to) def clear(self): + """Clear all Table objects from this MetaData.""" + # TODO: why have clear()/remove() but not all + # other accesors/mutators for the tables dict ? self.tables.clear() def remove(self, table): + """Remove the given Table object from this MetaData.""" + # TODO: scan all other tables and remove FK _column del self.tables[table.key] @util.deprecated('Deprecated. Use ``metadata.sorted_tables``') def table_iterator(self, reverse=True, tables=None): + """Deprecated - use metadata.sorted_tables().""" + from sqlalchemy.sql.util import sort_tables if tables is None: tables = self.tables.values() @@ -1578,8 +1589,8 @@ class MetaData(SchemaItem): filter the list of potential table names. The callable is called with a table name and this ``MetaData`` instance as positional arguments and should return a true value for any table to reflect. - """ + """ reflect_opts = {'autoload': True} if bind is None: bind = _bind_or_error(self) @@ -1644,8 +1655,8 @@ class MetaData(SchemaItem): # triggers MetaData listeners too: some.table.create() - """ + """ if event not in self.ddl_events: raise LookupError(event) self.ddl_listeners[event].append(listener) @@ -1694,8 +1705,8 @@ class MetaData(SchemaItem): checkfirst Defaults to True, only issue DROPs for tables confirmed to be present in the target database. - """ + """ if bind is None: bind = _bind_or_error(self) for listener in self.ddl_listeners['before-drop']: -- 2.47.3