From 751d2b340b407c288b1c789bebd972d445267e46 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 15 Jul 2007 01:50:27 +0000 Subject: [PATCH] more query methods, overhauliung docs for forwards 0.4 method --- doc/build/content/datamapping.txt | 417 +++++++++++--------------- doc/build/content/dbengine.txt | 9 +- doc/build/content/metadata.txt | 68 +---- doc/build/content/sqlconstruction.txt | 45 +-- doc/build/content/tutorial.txt | 8 +- doc/build/content/unitofwork.txt | 41 +-- lib/sqlalchemy/ansisql.py | 4 +- lib/sqlalchemy/engine/base.py | 8 +- lib/sqlalchemy/orm/query.py | 85 +++++- lib/sqlalchemy/orm/session.py | 14 +- test/orm/query.py | 30 ++ 11 files changed, 359 insertions(+), 370 deletions(-) diff --git a/doc/build/content/datamapping.txt b/doc/build/content/datamapping.txt index b1fddfcd99..b738250317 100644 --- a/doc/build/content/datamapping.txt +++ b/doc/build/content/datamapping.txt @@ -8,17 +8,7 @@ Data Mapping {@name=datamapping} Data mapping describes the process of defining *Mapper* objects, which associate table metadata with user-defined classes. -The `Mapper`'s role is to perform SQL operations upon the database, associating individual table rows with instances of those classes, and individual database columns with properties upon those instances, to transparently associate in-memory objects with a persistent database representation. - -When a `Mapper` is created to associate a `Table` object with a class, all of the columns defined in the `Table` object are associated with the class via property accessors, which add overriding functionality to the normal process of setting and getting object attributes. These property accessors keep track of changes to object attributes; these changes will be stored to the database when the application "flushes" the current state of objects (known as a *Unit of Work*). - -Two objects provide the primary interface for interacting with Mappers and the "unit of work", which are the `Query` object and the `Session` object. `Query` deals with selecting objects from the database, whereas `Session` provides a context for loaded objects and the ability to communicate changes on those objects back to the database. - -The primary method on `Query` for loading objects is its `select()` method, which has similar arguments to a `sqlalchemy.sql.Select` object. But this select method executes automatically and returns results, instead of awaiting an execute() call. Instead of returning a cursor-like object, it returns an array of objects. - -The three configurational elements to be defined, i.e. the `Table` metadata, the user-defined class, and the `Mapper`, are typically defined as module-level variables, and may be defined in any fashion suitable to the application, with the only requirement being that the class and table metadata are described before the mapper. For the sake of example, we will be defining these elements close together, but this should not be construed as a requirement; since SQLAlchemy is not a framework, those decisions are left to the developer or an external framework. - -Also, keep in mind that the examples in this section deal with explicit `Session` objects mapped directly to `Engine` objects, which represents the most explicit style of using the ORM. Options exist for how this is configured, including binding `Table` objects directly to `Engines` (described in [metadata_tables_binding](rel:metadata_tables_binding)), as well as using an auto-generating "contextual" session via the SessionContext plugin (described in [plugins_sessioncontext](rel:plugins_sessioncontext)). +When a `Mapper` is created to associate a `Table` object with a class, all of the columns defined in the `Table` object are associated with the class via property accessors, which add overriding functionality to the normal process of setting and getting object attributes. These property accessors keep track of changes to object attributes; these changes will be stored to the database when the application "flushes" the current state of objects. This pattern is called a *Unit of Work* pattern. ### Synopsis {@name=synopsis} @@ -52,13 +42,13 @@ Thats all for configuration. Next, we will create an `Engine` and bind it to a engine = create_engine("sqlite://mydb.db") # session - session = create_session(bind_to=engine) + session = create_session(bind=engine) -The `session` represents a "workspace" which can load objects and persist changes to the database. A `Session` [[doc](rel:unitofwork)] [[api](rel:docstrings_sqlalchemy.orm.session_Session)] is best created as local to a particular set of related data operations, such as scoped within a function call, or within a single application request cycle. Next we illustrate a rudimental query which will load a single object instance. We will modify one of its attributes and persist the change back to the database. +The `session` represents a "workspace" which can load objects and persist changes to the database. Note also that the `bind` parameter is optional; if the underlying `Table` objects are bound as described in [metadata_tables_binding](rel:metadata_tables_binding), it's not needed. A `Session` [[doc](rel:unitofwork)] [[api](rel:docstrings_sqlalchemy.orm.session_Session)] is best created as local to a particular set of related data operations, such as scoped within a function call, or within a single application request cycle. Next we illustrate a rudimental query which will load a single object instance. We will modify one of its attributes and persist the change back to the database. {python} # select - {sql}user = session.query(User).selectfirst_by(user_name='fred') + {sql}user = session.query(User).filter_by(user_name='fred').first() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.fullname AS users_fullname, users.password AS users_password FROM users @@ -81,7 +71,7 @@ Things to note from the above include that the loaded `User` object has an attri ### The Query Object {@name=query} -The method `session.query(class_or_mapper)` returns a `Query` object [[api](rel:docstrings_sqlalchemy.orm.query_Query)]. `Query` implements a large set of methods which are used to produce and execute select statements tailored for loading object instances. It returns objects in most cases either as lists of objects or as single instances, depending on the type of query issued. +The method `session.query(class_or_mapper)` returns a `Query` object [[api](rel:docstrings_sqlalchemy.orm.query_Query)] (note that the API described here is the 0.4 API; the 0.3 API is still present and described in the generated documentation). `Query` implements methods which are used to produce and execute select statements tailored for loading object instances. In all cases, objects, not fetchable result sets, are returned. The objects may be in a list, or it may be a single object. In some cases each returned object may actually be a tuple of multiple objects. A `Query` is always initially generated starting with the `Session` that we're working with, and is always created relative to a particular class, which is the primary class we wish to load. @@ -97,21 +87,29 @@ Alternatively, an actual `Mapper` instance can be specified instead of a class: # create query for the User query = session.query(usermapper) - -Dealing with mappers explicitly as above is usually not needed except for more advanced patterns where a class may have multiple mappers associated with it. -Once we have a query, we can start loading objects. The two most rudimental and general purpose methods are `select()` and `select_by()`. `select()` is oriented towards query criterion constructed as a `ClauseElement` object, which is the kind of object generated when constructing SQL expressions as described in the [SQL](rel:sql) section. `select_by()` can also accomodate `ClauseElement` objects but is generally more oriented towards keyword arguments which correspond to mapped attribute names. In both cases, the criterion specified is used to construct the `WHERE` criterion of the generated SQL. The `Query` object will use this criterion to **compile** the full SQL query issued to the database, combining the `WHERE` condition with the appropriate column selection clauses and `FROM` criterion, incuding whatever modifications are required to control ordering, number of rows returned, joins for loading related objects, etc. +A query which joins across multiple tables may also be used to request multiple entities, such as: -The general form of `select_by()` is: + {python} + query = session.query(User, Address) + +Once we have a query, we can start loading objects. The methods `filter()` and `filter_by()` handle narrowing results, and the methods `all()`, `one()`, and `first()` exist to return all, exactly one, or the first result of the total set of results. Note that all methods are *generative*, meaning that on each call that doesn't return results, you get a **new** `Query` instance. + +The `filter_by()` method works with keyword arguments, which are combined together via AND: {python} - def select_by(self, *clause_elements, **keyword_criterion) + {sql}result = session.query(User).filter_by(name='john', fullname='John Smith').all() + SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, + users.fullname AS users_fullname, users.password AS users_password + FROM users + WHERE users.user_name = :users_user_name AND users.fullname = :users_fullname + ORDER BY users.oid + {'users_user_name': 'john', 'users_fullname': 'John Smith'} -Where `*clause_elements` is a set of zero or more `ClauseElement` objects, and `**keyword_criterion` are key/value pairs each of which correspond to a simple equality comparison. The full set of clause elements and key/value pairs are joined together in the resulting SQL statement via `AND`. +Whereas `filter()` works with constructed SQL expressions, i.e. those described in [sql](rel:sql): {python} - # using select_by with keyword arguments - {sql}result = query.select_by(name='john', fullname='John Smith') + {sql}result = session.query(User).filter(users_table.c.name=='john').all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.fullname AS users_fullname, users.password AS users_password FROM users @@ -119,85 +117,131 @@ Where `*clause_elements` is a set of zero or more `ClauseElement` objects, and ` ORDER BY users.oid {'users_user_name': 'john', 'users_fullname': 'John Smith'} - # using select_by with preceding ClauseElements followed by keywords - result = query.select_by(users_table.c.user_id>224, - {sql} users_table.c.user_name=='john', fullname='John Smith') +Sometimes, constructing SQL via expressions can be cumbersome. For quick SQL expression, the `filter()` method can also accomdate straight text: + + {python} + {sql}result = session.query(User).filter("user_id>224").all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.fullname AS users_fullname, users.password AS users_password FROM users - WHERE users.user_id>:users_user_id AND users.user_name = :users_user_name - AND users.fullname = :users_fullname + WHERE users.user_id>224 ORDER BY users.oid - {'users_user_name': 'john', 'users_fullname': 'John Smith', 'users_user_id': 224} + {} -Note that a `ClauseElement` is generated by each boolean operator (i.e. `==`, `>`) that's performed against a `Column` object - recall that this operator is overloaded to return a binary clause construct. But the `fullname="John Smith"` argument is not using any kind of overloading, and is a regular Python keyword argument. Additionally, while `ClauseElements` are constructed against the `Column` elements configured on the mapped `Table`, the keyword-based criterion is constructed against the class-level attribute names which were configured by the mapper. While they are the same name as their columns in this particular example, they can be configured to have names distinct from their columns. So it follows that using `ClauseElements` for criterion are closer to the relational side of things, and using keyword arguments are closer towards the domain object side of things. +When using text, bind parameters can be specified the same way as in a `text()` clause, using a colon. To specify the bind parameter values, use the `params()` method: -The `select()` method, unlike the `select_by()` method, is purely `ClauseElement`/relationally oriented and has no domain-level awareness. Its basic argument signature: + {python} + {sql}result = session.query(User).filter("user_id>:value").params(value=224).all() + SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, + users.fullname AS users_fullname, users.password AS users_password + FROM users + WHERE users.user_id>:value + ORDER BY users.oid + {'value': 224} + +Multiple `filter()` and `filter_by()` expressions may be combined together. The resulting statement groups them using AND. {python} - def select(self, clause_element, **additional_options) + result = session.query(User).filter(users_table.c.user_id>224).filter_by(name='john'). + {sql} filter(users.c.fullname=='John Smith').all() + SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, + users.fullname AS users_fullname, users.password AS users_password + FROM users + WHERE users.user_id>:users_user_id AND users.user_name = :users_user_name + AND users.fullname = :users_fullname + ORDER BY users.oid + {'users_user_name': 'john', 'users_fullname': 'John Smith', 'users_user_id': 224} -The `clause_element` argument again represents a `ClauseElement` which corresponds to the `WHERE` criterion of a `SELECT` statement. Here, there is only a single `ClauseElement` and it will be used to form the entire set of criterion. A basic `select()` operation looks like this: +Note that all conjunctives are available explicitly, such as `and_()` and `or_()`, when using `filter()`: {python} - result = query.select(users_table.c.user_name=='john') + result = session.query(User).filter(and_(users_table.c.user_id>224, or_(users_table.c.name=='john', users_table.c.name=='ed'))) -To generate `AND` criterion the way `select_by()` does, you use the `and_()` construct from the sql construction system, which generates just another `ClauseElement` containing two sub-`ClauseElement`s: +Its also straightforward to use an entirely string-based statement, using `from_statement()`; just ensure that the columns clause of the statement contains the column names normally used by the mapper (here illusrtated using an asterisk): {python} - result = query.select(and_(users_table.c.user_name=='john', - users_table.c.fullname=='John Smith')) + {sql}result = session.query(User).from_statement("select * from users").all() + select * from users + {} -From this, one can see that `select()` is stricter in its operation and does not make any kind of assumptions about how to join multiple criterion together. Of course in all cases where `ClauseElement` is used, any expression can be created using combinations of `and_()`, `or_()`, `not_()`, etc. +`from_statement()` can also accomodate `select()` constructs: {python} - result = query.select( - or_(users_table.c.user_name == 'john', users_table.c.user_name=='fred') - {sql}) + {sql}result = session.query(User).from_statement(select([users], users.c.name<'e', + having=users.c.name==func.max(users.c.name), group_by=[c for c in users.c])).all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.fullname AS users_fullname, users.password AS users_password FROM users - WHERE users.user_name = :users_user_name OR users.user_name = :users_user_name_1 + WHERE users.user_name>:users_user_name HAVING users.user_name == max(users.user_name) + GROUP BY users.user_id, users.user_name, users.fullname, users.password ORDER BY users.oid - {'users_user_name': 'john', 'users_user_name_1': 'fred'} + {'users_user_name': 'e'} + +Any set of filtered criterion (or no criterion) can be distilled into a rowcount statement using `count()`: + + {python} + {sql}num = session.query(Users).filter(users_table.c.user_id>224).count() + SELECT count(users.id) FROM users WHERE users.user_id>:users_user_id + {'users_user_id': 224} -The keyword portion of `select()` is used to indicate further modifications to the generated SQL, including common arguments like ordering, limits and offsets: +Rows are limited and offset using `limit()` and `offset()`: {python} - result = query.select(users_table.c.user_name=='john', - {sql} order_by=[users_table.c.fullname], limit=10, offset=12) + {sql}result = session.query(User).limit(20).offset(5).all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.fullname AS users_fullname, users.password AS users_password - FROM users - WHERE users.user_name = :users_user_name - ORDER BY users.fullname LIMIT 10 OFFSET 12 - {'users_user_name': 'john'} - -#### Issuing Full SELECT statements {@name=fullselect} + FROM users ORDER BY users.oid + LIMIT 20 OFFSET 5 + {} -`select()` also provides an additional calling style, which is to pass a fully constructed `Select` construct to it. +And ordering is applied, using `Column` objects and related SQL constructs, with `order_by()`: {python} - # using a full select object - {sql}result = query.select(users_table.select(users_table.c.user_name=='john')) + {sql}result = session.query(User).order_by(desc(users_table.c.user_name)).all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.fullname AS users_fullname, users.password AS users_password - FROM users - WHERE users.user_name = :users_user_name - {'users_user_name': 'john'} + FROM users ORDER BY users.user_name DESC + {} + +The `first()` and `one()` methods will also limit rows. In the case of `first()`, rows are limited to just one, and the result is returned as a scalar. In the case of `one()`, rows are limited to *two*; however, only one is returned. If two rows are matched, an exception is raised. -When a full select is passed, the `Query` object does not generate its own SQL. Instead, the select construct passed in is used without modification, except that its `use_labels` flag is switched on to prevent column name collisions. Therefore its expected that the construct will include the proper column clause as appropriate to the mapped class being loaded. + {python} + # load the first result + user = session.query(User).first() + + # load exactly *one* result - if more than one result matches, an exception is raised + user = session.query(User).filter_by(name='jack').one() -The techniques used when querying with a full select construct are similar to another query method called `instances()`, which is described in [advdatamapping_resultset](rel:advdatamapping_resultset). +The `Query`, when evaluated as in an iterative context, executes results immediately, using whatever state has been built up: -#### Two General Calling Styles {@name=callingstyles} + {python} + {sql}result = list(session.query(User)) + SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, + users.fullname AS users_fullname, users.password AS users_password + FROM users ORDER BY users.oid + {} + +Array indexes and slices work too: -The twin calling styles presented by `select()` and `select_by()` are mirrored in several other methods on `Query`. For each method that indicates a verb such as `select()` and accepts a single `ClauseElement` followed by keyword-based options, the `_by()` version accepts a list of `ClauseElement` objects followed by keyword-based argument criterion. These include: + {python} + {sql}result = list(session.query(User)[1:3]) + SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, + users.fullname AS users_fullname, users.password AS users_password + FROM users ORDER BY users.oid + LIMIT 2 OFFSET 1 + {} -* `selectfirst()` / `selectfirst_by()` - select with LIMIT 1 and return a single object instance -* `selectone()` / `selectone_by()` - select with LIMIT 2, assert that only one row is present, and return a single object instance -* `filter()` / `filter_by()` - apply the criterion to the `Query` object generatively, and return a new `Query` object with the criterion built in. -* `count()` / `count_by()` - return the total number of object instances that would be returned. +A scalar index returns a scalar result immediately: + + {python} + {sql}user = session.query(User)[2] + SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, + users.fullname AS users_fullname, users.password AS users_password + FROM users ORDER BY users.oid + LIMIT 1 OFFSET 2 + {} + +Later in this chapter, we'll discuss how to configure relations between mapped classes. Once that's done, we'll discuss how to use table joins in [datamapping_joins](rel:datamapping_joins). #### Loading by Primary Key {@name=primarykey} @@ -209,7 +253,7 @@ The `get()` method loads a single instance, given the primary key value of the d The `get()` method, because it has the actual primary key value of the instance, can return an already-loaded instance from the `Session` without performing any SQL. It is the only method on `Query` that does not issue SQL to the database in all cases. -To issue a composite primary key to `get()`, use a tuple. The order of the arguments matches that of the table meta data. +To issue a composite primary key to `get()`, use a tuple. The order of the arguments matches that of the primary key columns of the table: {python} myobj = query.get((27, 3, 'receipts')) @@ -221,61 +265,9 @@ Another special method on `Query` is `load()`. This method has the same signatu Some of the above examples above illustrate the usage of the mapper's Table object to provide the columns for a WHERE Clause. These columns are also accessible off of the mapped class directly. When a mapper is assigned to a class, it also attaches a special property accessor `c` to the class itself, which can be used just like the table metadata to access the columns of the table: {python} - userlist = session.query(User).select(User.c.user_id==12) - -#### Generative Query Methods {@name=generative} - -The `filter()` and `filter_by()` methods on `Query` are known as "generative" methods, in that instead of returning results, they return a newly constructed `Query` object which contains the "filter" criterion as part of its internal state. This is another method of stringing together criterion via `AND` clauses. But when using the `Query` generatively, it becomes a much more flexible object, as there are generative methods for adding all kinds of criterion and modifiers, some of which are relationally oriented and others which are domain oriented. The key behavior of a "generative" method is that calling it produces a **new** `Query` object, which contains all the attributes of the previous `Query` object plus some new modifications. - - {python} - # create a Query - query = session.query(User) - - # filter by user_name property using filter_by() - query = query.filter_by(user_name="john") - - # filter by email address column using filter() - query = query.filter(User.c.fullname=="John Smith") - - # execute - the list() method returns a list. this is equivalent to just saying query.select() - {sql}result = query.list() - SELECT users.user_name AS users_user_name, users.fullname as users_fullname, - users.password AS users_password, users.user_id AS users_user_id - FROM users - WHERE ((users.user_name = :users_user_name) - AND users.fullname = :users_fullname) ORDER BY users.oid + userlist = session.query(User).filter(User.c.user_id==12).first() -Other generative behavior includes list-based indexing and slice operations, which translate into `LIMIT` and `OFFSET` criterion: - - {python} - {sql}session.query(User).filter(user_table.c.fullname.like('j%'))[20:30] - SELECT users.user_name AS users_user_name, users.fullname as users_fullname, - users.password AS users_password, users.user_id AS users_user_id - FROM users - WHERE users.fullname LIKE :users_fullname - ORDER BY users.oid LIMIT 10 OFFSET 20 - {'users_fullname': 'j%'} - -Iterable behavior: - - {python} - for user in session.query(User).filter_by(user_name='john'): - # etc. - -Applying modifiers generatively: - - {python} - {sql}session.query(User).filter(user_table.c.fullname.like('j%')). - limit(10).offset(20).order_by(user_table.c.user_id).list() - SELECT users.user_name AS users_user_name, users.fullname as users_fullname, - users.password AS users_password, users.user_id AS users_user_id - FROM users - WHERE users.fullname LIKE :users_fullname - ORDER BY users.user_id LIMIT 10 OFFSET 20 - -There is no restriction to mixing the "generative" methods like `filter()`, `join()`, `order_by()` etc. with the "non-generative" methods like `select()` and `select_by()`. The only difference is that `select...` methods return results immediately, whereas generative methods return a new `Query` from which results can be retrieved using any number of methods such as `list()`, `select()`, `select_by()`, `selectfirst()`, etc. Options that are specified to the `select...` methods build upon options that were already built up generatively. - -The `Query` object will be described further in subsequent sections dealing with joins. Also, be sure to consult the full generated documentation for `Query`: [Query](rel:docstrings_sqlalchemy.orm.query_Query). +In version 0.4 of SQLAlchemy, the "c" prefix will no longer be needed. ### Saving Objects {@name=saving} @@ -304,7 +296,7 @@ When a mapper is created, the target class has its mapped properties decorated b sess.save(myuser2) # load a third User from the database - {sql}myuser3 = sess.query(User).select(User.c.user_name=='fred')[0] + {sql}myuser3 = sess.query(User).filter_by(name='fred').all()[0] SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.password AS users_password FROM users WHERE users.user_name = :users_user_name @@ -322,8 +314,6 @@ When a mapper is created, the target class has its mapped properties decorated b INSERT INTO users (user_name, password) VALUES (:user_name, :password) {'password': 'lalalala', 'user_name': 'ed'} -The requirement that new instances be explicitly stored in the `Session` via `save()` operation can be modified by using the [plugins_sessioncontext](rel:plugins_sessioncontext) extension module. - The mapped class can also specify whatever methods and/or constructor it wants: {python} @@ -351,8 +341,6 @@ The mapped class can also specify whatever methods and/or constructor it wants: Note that the **__init__() method is not called when the instance is loaded**. This is so that classes can define operations that are specific to their initial construction which are not re-called when the object is restored from the database, and is similar in concept to how Python's `pickle` module calls `__new__()` when deserializing instances. To allow `__init__()` to be called at object load time, or to define any other sort of on-load operation, create a `MapperExtension` which supplies the `create_instance()` method (see [advdatamapping_extending](rel:advdatamapping_extending), as well as the example in the FAQ). -SQLAlchemy will only place columns into UPDATE statements for which the value of the attribute has changed. This is to conserve database traffic and also to successfully interact with a "deferred" attribute, which is a mapped object attribute against the mapper's primary table that isnt loaded until referenced by the application. - ### Defining and Using Relationships {@name=relations} So that covers how to map the columns in a table to an object, how to load objects, create new ones, and save changes. The next step is how to define an object's relationships to other database-persisted objects. This is done via the `relation` function [[doc](rel:advdatamapping_properties_relationoptions)][[api](rel:docstrings_sqlalchemy.orm_modfunc_relation)] provided by the `orm` module. @@ -413,9 +401,11 @@ Lets do some operations with these classes and see what happens: {python} engine = create_engine('sqlite:///mydb.db') + + # create tables metadata.create_all(engine) - session = create_session(bind_to=engine) + session = create_session(bind=engine) u = User('jane', 'hihilala') u.addresses.append(Address('123 anywhere street', 'big city', 'UT', '76543')) @@ -432,7 +422,7 @@ Lets do some operations with these classes and see what happens: A lot just happened there! The `Mapper` figured out how to relate rows in the addresses table to the users table, and also upon flush had to determine the proper order in which to insert rows. After the insert, all the `User` and `Address` objects have their new primary and foreign key attributes populated. -Also notice that when we created a `Mapper` on the `User` class which defined an `addresses` relation, the newly created `User` instance magically had an "addresses" attribute which behaved like a list. This list is in reality a Python `property` which will return an instance of `sqlalchemy.orm.attributes.InstrumentedList`. This is a generic collection-bearing object which can represent lists, sets, dictionaries, or any user-defined collection class which has an `append()` method. By default it represents a list: +Also notice that when we created a `Mapper` on the `User` class which defined an `addresses` relation, the newly created `User` instance magically had an "addresses" attribute which behaved like a list. This list is in reality a Python `property` which will return an instance of `sqlalchemy.orm.attributes.InstrumentedList`. This is a generic collection-bearing object which can represent lists, sets, dictionaries, or any user-defined collection class. By default it represents a list: {python} del u.addresses[1] @@ -523,47 +513,16 @@ The `backref()` function is often used to set up a bi-directional one-to-one rel mapper(Address, addresses_table, properties={ 'user' : relation(User, backref=backref('address', uselist=False)) }) - - -### Selecting from Relationships {@name=selectrelations} - -We've seen how the `relation` specifier affects the saving of an object and its child items, how does it affect selecting them? By default, the relation keyword indicates that the related property should be attached a *lazy loader* when instances of the parent object are loaded from the database; this is just a callable function that when accessed will invoke a second SQL query to load the child objects of the parent. - - {python} - # define a mapper - mapper(User, users_table, properties = { - 'addresses' : relation(mapper(Address, addresses_table)) - }) - - # select users where username is 'jane', get the first element of the list - # this will incur a load operation for the parent table - {sql}user = session.query(User).select(User.c.user_name=='jane')[0] - SELECT users.user_id AS users_user_id, - users.user_name AS users_user_name, users.password AS users_password - FROM users WHERE users.user_name = :users_user_name ORDER BY users.oid - {'users_user_name': 'jane'} - - # iterate through the User object's addresses. this will incur an - # immediate load of those child items - {sql}for a in user.addresses: - SELECT addresses.address_id AS addresses_address_id, - addresses.user_id AS addresses_user_id, addresses.street AS addresses_street, - addresses.city AS addresses_city, addresses.state AS addresses_state, - addresses.zip AS addresses_zip FROM addresses - WHERE addresses.user_id = :users_user_id ORDER BY addresses.oid - {'users_user_id': 1} - print repr(a) - -#### Selecting With Joins {@name=queryjoins} +### Querying with Joins {@name=joins} When using mappers that have relationships to other mappers, the need to specify query criterion across multiple tables arises. SQLAlchemy provides several core techniques which offer this functionality. -When specifying columns to the `select()` method (including variants like `selectfirst()`, `selectone()`, etc.) or the generative `filter()` method of `Query`, if the columns are attached to a table other than the mapped table, that table is automatically added to the "FROM" clause of the query. This is the same behavior that occurs when creating a non-ORM `select` object. Using this feature, joins can be created when querying: +One way is just to build up the join criterion yourself. This is easy to do using `filter()`: {python} - {sql}l = session.query(User).select(and_(users.c.user_id==addresses.c.user_id, - addresses.c.street=='123 Green Street')) + {sql}l = session.query(User).filter(users.c.user_id==addresses.c.user_id). + filter(addresses.c.street=='123 Green Street').all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.password AS users_password FROM users, addresses @@ -576,7 +535,7 @@ Above, we specified selection criterion that included columns from both the `use {python} # this is usually not what you want to do - {sql}l = session.query(User).select(addresses.c.street=='123 Green Street') + {sql}l = session.query(User).filter(addresses.c.street=='123 Green Street').all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.password AS users_password FROM users, addresses @@ -586,11 +545,11 @@ Above, we specified selection criterion that included columns from both the `use The above join will return all rows of the `users` table, even those that do not correspond to the `addresses` table, in a **cartesian product** with the matching rows from the `addresses` table. -Another way to specify joins more explicitly is to use the `from_obj` parameter of `select()`, or the generative `select_from()` method. These allow you to explicitly place elements in the FROM clause of the query, which could include lists of tables and/or `Join` constructs: +A way to specify joins very explicitly, using the SQL `join()` construct, is possible via the `select_from()` method on `Query`: {python} - {sql}l = session.query(User).select(addresses_table.c.street=='123 Green Street', - from_obj=[users_table.join(addresses_table)]) + {sql}l = session.query(User).select_from(users_table.join(addresses_table)). + filter(addresses_table.c.street=='123 Green Street').all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.password AS users_password FROM users JOIN addresses ON users.user_id=addresses.user_id @@ -598,13 +557,11 @@ Another way to specify joins more explicitly is to use the `from_obj` parameter ORDER BY users.oid {'addresses_street', '123 Green Street'} -In the above example, the `join()` function by default creates a natural join between the two tables, so we were able to avoid having to specify the join condition between `users` and `addresses` explicitly. - -Using a generative approach: +But the easiest way of all is automatically, using the `join()` method on `Query`. Just give this method the path from A to B, using the name of a mapped relationship directly: {python} - {sql}l = session.query(User).filter(addresses_table.c.street=='123 Green Street'). - select_from(users_table.join(addresses_table)).list() + {sql}l = session.query(User).join('addresses'). + filter(addresses_table.c.street=='123 Green Street').all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.password AS users_password FROM users JOIN addresses ON users.user_id=addresses.user_id @@ -612,42 +569,34 @@ Using a generative approach: ORDER BY users.oid {'addresses_street', '123 Green Street'} -Note that `select_from()` takes either a single scalar element or a list of selectables (i.e. `select_from([table1, table2, table3.join(table4), ...])`), which are added to the current list of "from" elements. As is the behavior of constructed SQL, `join` elements used in the `from_obj` parameter or the `select_from()` method will replace instances of the individual tables they represent. - -#### Creating Joins Using filter\_by() and select\_by() {@name=relselectby} - -Another way that joins can be created is by using the `select_by` method or the generative `filter_by` methods of `Query`, which have the ability to create joins across relationships automatically. These methods are in many circumstances more convenient than, but not as flexible as, the more SQL-level approach using the `select()`/`select_from()` methods described in the previous section. - -To issue a join using `select_by()`, just specify a key in the keyword-based argument list which is not present in the primary mapper's list of properties or columns, but *is* present in the property list of some relationship down the line of objects. The `Query` object will recursively traverse along the mapped relationships starting with the lead class and descending into child classes, until it finds a property matching the given name. For each new mapper it encounters along the path to the located property, it constructs a join across that relationship: +In all cases, we can get both the `User` and the `Address` object back, by telling the session we want both. This returns the results as a tuple: {python} - {sql}l = session.query(User).select_by(street='123 Green Street') - SELECT users.user_id AS users_user_id, - users.user_name AS users_user_name, users.password AS users_password - FROM users, addresses - WHERE users.user_id=addresses.user_id - AND addresses.street=:addresses_street - ORDER BY users.oid - {'addresses_street', '123 Green Street'} + l = session.query(User, Address).join('addresses'). + filter(addresses_table.c.street=='123 Green Street').all() + for result in l: + print "User:", l[0] + print "Address:", l[1] + +The above syntax is shorthand for using the `add_entity()` method: -The above example is shorthand for: + session.query(User).add_entity(Address).join('addresses').all() + +To join across multiple relationships, specify them in a list. Below, we load a `ShoppingCart`, limiting its `cartitems` collection to the single item which has a `price` object whose `amount` column is 47.95: {python} - l = session.query(User).select(and_( - Address.c.user_id==User.c.user_id, - Address.c.street=='123 Green Street') - ) + session.query(ShoppingCart).join(['cartitems', 'price']).filter_by(amount=47.95).one() -`select_by()` and its related functions can compare not only column-based attributes to column-based values, but also relations to object instances: +`filter_by()` can also generate joins in some cases, such as when comparing to an object instance: {python} # get an instance of Address. assume its primary key identity # is 12. - someaddress = session.query(Address).get_by(street='123 Green Street') + someaddress = session.query(Address).filter_by(street='123 Green Street').one() # look for User instances which have the # "someaddress" instance in their "addresses" collection - {sql}l = session.query(User).select_by(addresses=someaddress) + {sql}l = session.query(User).filter_by(addresses=someaddress).all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, users.password AS users_password FROM users, addresses @@ -656,67 +605,59 @@ The above example is shorthand for: ORDER BY users.oid {'addresses_addresses_id': 12} -Where above, the comparison denoted by `addresses=someaddress` is constructed by comparing all the primary key columns in the `Address` mapper to each corresponding primary key value in the `someaddress` entity. In other words, its equivalent to saying `select_by(address_id=someaddress.address_id)`. - -#### Generating Join Criterion Using join() {@name=jointo} - -The `join()` method is a generative method which can apply join conditions to a `Query` based on the names of relationships, using a similar mechanism as that of `select_by()` and similar methods. By specifying the string name of a relation as its only argument, the resulting `Query` will automatically join from the starting class' mapper to the target mapper, indicated by searching for a relationship of that name along the relationship path. - - {python} - {sql}l = session.query(User).join('addresses').list() - SELECT users.user_name AS users_user_name, users.password AS users_password, - users.user_id AS users_user_id - FROM users JOIN addresses ON users.user_id = addresses.user_id - ORDER BY users.oid +You can also create joins in "reverse", that is, to find an object with a certain parent. This is accomplished using `with_parent()`: -One drawback of this method of locating a relationship is that its not *deterministic*. If the same relationship name occurs on more than one traversal path, its only possible to locate one of those relationships. Similarly, if relationships are added to mapped classes, queries that worked fine may suddenly experience a similar conflict and produce unexpected results. - -So to specify a deterministic path to `join()`, send the relationship name or a path of names as a list. Such as: - {python} - l = session.query(User).join(['addresses']).list() + # load a user + someuser = session.query(User).get(2) -Where above, if the "addresses" relation is not present directly on the `User` class, an error is raised. + # load an address with that user as a parent and email address foo@bar.com + {sql}someaddresses = session.query(Address).with_parent(someuser). + filter_by(email_address="foo@bar.com").all() + SELECT addresses.address_id AS addresses_address_id, + addresses.user_id AS addresses_user_id, addresses.street AS addresses_street, + addresses.city AS addresses_city, addresses.state AS addresses_state, + addresses.zip AS addresses_zip FROM addresses + WHERE addresses.email_address = :addresses_email_address AND + addresses.user_id = :users_user_id ORDER BY addresses.oid + {'users_user_id': 1, 'addresses_email_address': 'foo@bar.com'} -To traverse more deeply into relationships, specify multiple relationship names in the order in which they are constructed: +### Loading Relationships {@name=selectrelations} - {python} - orders = session.query(Order).join(['customer', 'addresses']).select_by(email_address="foo@bar.com") +We've seen how the `relation` specifier affects the saving of an object and its child items, and also how it allows us to build joins. How to we get the actual related items loaded ? By default, the `relation()` function indicates that the related property should be attached a *lazy loader* when instances of the parent object are loaded from the database; this is just a callable function that when accessed will invoke a second SQL query to load the child objects of the parent. -For those familiar with older versions of `Query`, the `join()` method is an easier-to-use version of the `join_by()`, `join_to()` and `join_via()` methods, all of which produce `ClauseElements` that can be constructed into a query criterion. Consult the generated documentation for information on these methods. + {python} + # define a user mapper + mapper(User, users_table, properties = { + 'addresses' : relation(Address) + }) -#### Joining from a Parent Object {@name=joinparent} + # define an address mapper + mapper(Address, addresses_table) -(new in 0.3.7) To help in navigating collections, the `with_parent()` generative method adds criterion which corresponds to instances which belong to a particular parent. This method makes use of the same "lazy loading" criterion used to load relationships normally, which means for a typical non-many-to-many relationship it will **not** actually create a join, and instead places bind parameters at the point at which the parent table's columns are normally specified. This means you get a lighter weight query which also works with self-referential relationships, which otherwise would require an explicit `alias` object in order to create self-joins. For example, to load all the `Address` objects which belong to a particular `User`: + # select users where username is 'jane', get the first element of the list + # this will incur a load operation for the parent table + {sql}user = session.query(User).filter(User.c.user_name=='jane')[0] + SELECT users.user_id AS users_user_id, + users.user_name AS users_user_name, users.password AS users_password + FROM users WHERE users.user_name = :users_user_name ORDER BY users.oid + {'users_user_name': 'jane'} - {python} - # load a user - someuser = session.query(User).get(2) - - # load the addresses of that user - {sql}addresses = session.query(Address).with_parent(someuser).list() + # iterate through the User object's addresses. this will incur an + # immediate load of those child items + {sql}for a in user.addresses: SELECT addresses.address_id AS addresses_address_id, addresses.user_id AS addresses_user_id, addresses.street AS addresses_street, addresses.city AS addresses_city, addresses.state AS addresses_state, addresses.zip AS addresses_zip FROM addresses WHERE addresses.user_id = :users_user_id ORDER BY addresses.oid {'users_user_id': 1} - - # filter the results - {sql}someaddresses = session.query(Address).with_parent(someuser). - filter_by(email_address="foo@bar.com").list() - SELECT addresses.address_id AS addresses_address_id, - addresses.user_id AS addresses_user_id, addresses.street AS addresses_street, - addresses.city AS addresses_city, addresses.state AS addresses_state, - addresses.zip AS addresses_zip FROM addresses - WHERE addresses.email_address = :addresses_email_address AND - addresses.user_id = :users_user_id ORDER BY addresses.oid - {'users_user_id': 1, 'addresses_email_address': 'foo@bar.com'} - + + print repr(a) #### Eager Loading {@name=eagerload} -Eager Loading describes the loading of parent and child objects across a relation using a single query. The purpose of eager loading is strictly one of performance enhancement; eager loading has **no impact** on the results of a query, except that when traversing child objects within the results, lazy loaders will not need to issue separate queries to load those child objects. +Eager Loading is another way for relationships to be loaded. It describes the loading of parent and child objects across a relation using a single query. The purpose of eager loading is strictly one of performance enhancement; eager loading has **no impact** on the results of a query, except that when traversing child objects within the results, lazy loaders will not need to issue separate queries to load those child objects. Eager Loading is enabled on a per-relationship basis, either as the default for a particular relationship, or for a single query using query options, diff --git a/doc/build/content/dbengine.txt b/doc/build/content/dbengine.txt index 6785471667..c03d569374 100644 --- a/doc/build/content/dbengine.txt +++ b/doc/build/content/dbengine.txt @@ -158,7 +158,7 @@ The `execute()` methods on both `Engine` and `Connection` can also receive SQL c The above SQL construct is known as a `select()`. The full range of SQL constructs available are described in [sql](rel:sql). -Both `Connection` and `Engine` fulfill an interface known as `Connectable` which specifies common functionality between the two objects, namely being able to call `connect()` to return a `Connection` object (`Connection` just returns itself), and being able to call `execute()` to get a result set. Following this, most SQLAlchemy functions and objects which accept an `Engine` as a parameter or attribute with which to execute SQL will also accept a `Connection`. In SQLAlchemy 0.3, this argument is frequently named `connectable` or `engine`. In the 0.4 series of SQLAlchemy, its consistently named `bind`. +Both `Connection` and `Engine` fulfill an interface known as `Connectable` which specifies common functionality between the two objects, namely being able to call `connect()` to return a `Connection` object (`Connection` just returns itself), and being able to call `execute()` to get a result set. Following this, most SQLAlchemy functions and objects which accept an `Engine` as a parameter or attribute with which to execute SQL will also accept a `Connection`. As of SQLAlchemy 0.3.9, this argument is named `bind`. {python title="Specify Engine or Connection"} engine = create_engine('sqlite:///:memory:') @@ -168,11 +168,11 @@ Both `Connection` and `Engine` fulfill an interface known as `Connectable` which table = Table('sometable', metadata, Column('col1', Integer)) # create the table with the Engine - table.create(connectable=engine) + table.create(bind=engine) # drop the table with a Connection off the Engine connection = engine.connect() - table.drop(connectable=connection) + table.drop(bind=connection) Connection facts: @@ -229,6 +229,7 @@ Note that SQLAlchemy's Object Relational Mapper also provides a way to control t Transaction Facts: * the Transaction object, just like its parent Connection, is **not threadsafe**. + * SQLAlchemy 0.4 will feature transactions with two-phase commit capability as well as SAVEPOINT capability. #### Understanding Autocommit @@ -240,7 +241,7 @@ The above transaction example illustrates how to use `Transaction` so that sever ### Connectionless Execution, Implicit Execution {@name=implicit} -Recall from the first section we mentioned executing with and without a `Connection`. `Connectionless` execution refers to calling the `execute()` method on an object which is not a `Connection`, which could be on the the `Engine` itself, or could be a constructed SQL object. When we say "implicit", we mean that we are calling the `execute()` method on an object which is neither a `Connection` nor an `Engine` object; this can only be used with constructed SQL objects which have their own `execute()` method, and can be "bound" to an `Engine`. A description of "constructed SQL objects" may be found in [sql](rel:sql). +Recall from the first section we mentioned executing with and without a `Connection`. `Connectionless` execution refers to calling the `execute()` method on an object which is not a `Connection`, which could be on the the `Engine` itself, or could be a constructed SQL object. When we say "implicit", we mean that we are calling the `execute()` method on an object which is neither a `Connection` nor an `Engine` object; this can only be used with constructed SQL objects which have their own `execute()` method, and can be "bound" to an `Engine`. A description of "constructed SQL objects" may be found in [sql](rel:sql). A summary of all three methods follows below. First, assume the usage of the following `MetaData` and `Table` objects; while we haven't yet introduced these concepts, for now you only need to know that we are representing a database table, and are creating an "executeable" SQL construct which issues a statement to the database. These objects are described in [metadata](rel:metadata). diff --git a/doc/build/content/metadata.txt b/doc/build/content/metadata.txt index 7088a08aed..ff854bd39c 100644 --- a/doc/build/content/metadata.txt +++ b/doc/build/content/metadata.txt @@ -96,8 +96,8 @@ And `Table` provides an interface to the table's properties as well as that of i # access the table's MetaData: employees.metadata - # access the table's Engine, if its MetaData is bound: - employees.engine + # access the table's bound Engine or Connection, if its MetaData is bound: + employees.bind # access a column's name, type, nullable, primary key, foreign key employees.c.employee_id.name @@ -117,9 +117,9 @@ And `Table` provides an interface to the table's properties as well as that of i # get the table related by a foreign key fcolumn = employees.c.employee_dept.foreign_key.column.table -#### Binding MetaData to an Engine {@name=binding} +#### Binding MetaData to an Engine or Connection {@name=binding} -A `MetaData` object can be associated with an `Engine` (or an individual `Connection`); this process is called **binding**. This allows the `MetaData` and the elements which it contains to perform operations against the database directly, using the connection resources to which it's bound. Common operations which are made more convenient through binding include being able to generate SQL constructs which know how to execute themselves, creating `Table` objects which query the database for their column and constraint information, and issuing CREATE or DROP statements. +A `MetaData` object can be associated with an `Engine` or an individual `Connection`; this process is called **binding**. The term used to describe "an engine or a connection" is often referred to as a **connectable**. Binding allows the `MetaData` and the elements which it contains to perform operations against the database directly, using the connection resources to which it's bound. Common operations which are made more convenient through binding include being able to generate SQL constructs which know how to execute themselves, creating `Table` objects which query the database for their column and constraint information, and issuing CREATE or DROP statements. To bind `MetaData` to an `Engine`, use the `connect()` method: @@ -130,7 +130,7 @@ To bind `MetaData` to an `Engine`, use the `connect()` method: meta = MetaData() # bind to an engine - meta.connect(engine) + meta.bind = engine Once this is done, the `MetaData` and its contained `Table` objects can access the database directly: @@ -212,42 +212,6 @@ Within the `MetaData` collection, this table will be identified by the combinati Note that these clauses are not supported on SQLite, and require `InnoDB` tables when used with MySQL. They may also not be supported on other databases. -#### Enabling Table / Column Quoting {@name=quoting} - -Feature Status: [Alpha Implementation][alpha_implementation] - -Many table, schema, or column names require quoting to be enabled. Reasons for this include names that are the same as a database reserved word, or for identifiers that use MixedCase, where the database would normally "fold" the case convention into lower or uppercase (such as Postgres). SQLAlchemy will attempt to automatically determine when quoting should be used. It will determine a value for every identifier name called `case_sensitive`, which defaults to `False` if the identifer name uses no uppercase letters, or `True` otherwise. This flag may be explicitly set on any schema item as well (schema items include `Table`, `Column`, `MetaData`, `Sequence`, etc.) to override this default setting, where objects will inherit the setting from an enclosing object if not explicitly overridden. - -When `case_sensitive` is `True`, the dialect will do what it has to in order for the database to recognize the casing. For Postgres and Oracle, this means using quoted identifiers. - -Identifiers that match known SQL reserved words (such as "asc", "union", etc.) will also be quoted according to the dialect's quoting convention regardless of the `case_sensitive` setting. - -To force quoting for an identifier, set the "quote=True" flag on `Column` or `Table`, as well as the `quote_schema=True` flag for `Table`. - - {python} - table2 = Table('WorstCase2', metadata, - # desc is a reserved word, which will be quoted. - Column('desc', Integer, primary_key=True), - - # if using a reserved word which SQLAlchemy doesn't know about, - # specify quote=True - Column('some_reserved_word', Integer, quote=True, primary_key=True), - - # MixedCase uses a mixed case convention. - # it will be automatically quoted since it is case sensitive - Column('MixedCase', Integer), - - # Union is both a reserved word and mixed case - Column('Union', Integer), - - # normal_column doesnt require quoting - Column('normal_column', String(30))) - - # to use tables where case_sensitive is False by default regardless - # of idenfifier casings, set "case_sensitive" to false at any level - # (or true to force case sensitive for lowercase identifiers as well) - lowercase_metadata = MetaData(case_sensitive=False) - #### Other Options {@name=options} `Tables` may support database-specific options, such as MySQL's `engine` option that can specify "MyISAM", "InnoDB", and other backends for the table: @@ -262,11 +226,11 @@ To force quoting for an identifier, set the "quote=True" flag on `Column` or `Ta ### Creating and Dropping Database Tables {@name=creating} -Creating and dropping individual tables can be done via the `create()` and `drop()` methods of `Table`; these methods take an optional `connectable` parameter which references an `Engine` or a `Connection`. If not supplied, the `Engine` bound to the `MetaData` will be used, else an error is raised: +Creating and dropping individual tables can be done via the `create()` and `drop()` methods of `Table`; these methods take an optional `bind` parameter which references an `Engine` or a `Connection`. If not supplied, the `Engine` bound to the `MetaData` will be used, else an error is raised: {python} meta = MetaData() - meta.connect('sqlite:///:memory:') + meta.bind = 'sqlite:///:memory:' employees = Table('employees', meta, Column('employee_id', Integer, primary_key=True), @@ -284,17 +248,17 @@ Creating and dropping individual tables can be done via the `create()` and `drop `drop()` method: {python} - {sql}employees.drop(connectable=e) + {sql}employees.drop(bind=e) DROP TABLE employees {} The `create()` and `drop()` methods also support an optional keyword argument `checkfirst` which will issue the database's appropriate pragma statements to check if the table exists before creating or dropping: {python} - employees.create(connectable=e, checkfirst=True) + employees.create(bind=e, checkfirst=True) employees.drop(checkfirst=False) -Entire groups of Tables can be created and dropped directly from the `MetaData` object with `create_all()` and `drop_all()`. These methods always check for the existence of each table before creating or dropping. Each method takes an optional `connectable` keyword argument which can reference an `Engine` or a `Connection`. If no engine is specified, the underlying bound `Engine`, if any, is used: +Entire groups of Tables can be created and dropped directly from the `MetaData` object with `create_all()` and `drop_all()`. These methods always check for the existence of each table before creating or dropping. Each method takes an optional `bind` keyword argument which can reference an `Engine` or a `Connection`. If no engine is specified, the underlying bound `Engine`, if any, is used: {python} engine = create_engine('sqlite:///:memory:') @@ -315,7 +279,7 @@ Entire groups of Tables can be created and dropped directly from the `MetaData` Column('pref_value', String(100)) ) - {sql}metadata.create_all(connectable=engine) + {sql}metadata.create_all(bind=engine) PRAGMA table_info(users){} CREATE TABLE users( user_id INTEGER NOT NULL PRIMARY KEY, @@ -397,7 +361,7 @@ A PassiveDefault indicates an column default that is executed upon INSERT by the {python} t = Table('test', meta, - Column('mycolumn', DateTime, PassiveDefault(text("sysdate"))) + Column('mycolumn', DateTime, PassiveDefault("sysdate")) ) A create call for the above table will produce: @@ -415,7 +379,7 @@ PassiveDefault also sends a message to the `Engine` that data is available after Column('my_id', Integer, primary_key=True), # an on-insert database-side default - Column('data1', Integer, PassiveDefault(text("d1_func()"))), + Column('data1', Integer, PassiveDefault("d1_func()")), ) # insert a row r = mytable.insert().execute(name='fred') @@ -444,9 +408,9 @@ A table with a sequence looks like: Column("createdate", DateTime()) ) -The Sequence is used with Postgres or Oracle to indicate the name of a database sequence that will be used to create default values for a column. When a table with a Sequence on a column is created in the database by SQLAlchemy, the database sequence object is also created. Similarly, the database sequence is dropped when the table is dropped. Sequences are typically used with primary key columns. When using Postgres, if an integer primary key column defines no explicit Sequence or other default method, SQLAlchemy will create the column with the SERIAL keyword, and will pre-execute a sequence named "tablename_columnname_seq" in order to retrieve new primary key values, if they were not otherwise explicitly stated. Oracle, which has no "auto-increment" keyword, requires that a Sequence be created for a table if automatic primary key generation is desired. +The Sequence is used with Postgres or Oracle to indicate the name of a database sequence that will be used to create default values for a column. When a table with a Sequence on a column is created in the database by SQLAlchemy, the database sequence object is also created. Similarly, the database sequence is dropped when the table is dropped. Sequences are typically used with primary key columns. When using Postgres, if an integer primary key column defines no explicit Sequence or other default method, SQLAlchemy will create the column with the SERIAL keyword, and will pre-execute a sequence named "tablename_columnname_seq" in order to retrieve new primary key values, if they were not otherwise explicitly stated. Oracle, which has no "auto-increment" keyword, requires that a Sequence be specified for a table if automatic primary key generation is desired. -A Sequence object can be defined on a Table that is then used for a non-sequence-supporting database. In that case, the Sequence object is simply ignored. Note that a Sequence object is **entirely optional for all databases except Oracle**, as other databases offer options for auto-creating primary key values, such as AUTOINCREMENT, SERIAL, etc. SQLAlchemy will use these default methods for creating primary key values if no Sequence is present on the table metadata. +A Sequence object can be defined on a Table that is then also used with a non-sequence-supporting database. In that case, the Sequence object is simply ignored. Note that a Sequence object is **entirely optional for all databases except Oracle**, as other databases offer options for auto-creating primary key values, such as AUTOINCREMENT, SERIAL, etc. SQLAlchemy will use these default methods for creating primary key values if no Sequence is present on the table metadata. A sequence can also be specified with `optional=True` which indicates the Sequence should only be used on a database that requires an explicit sequence, and not those that supply some other method of providing integer values. At the moment, it essentially means "use this sequence only with Oracle and not Postgres". @@ -527,7 +491,7 @@ The `Index` objects will be created along with the CREATE statements for the tab # define an index i = Index('someindex', sometable.c.col5) - # create the index, will use the table's connectable, or specify the connectable keyword argument + # create the index, will use the table's bound connectable if the `bind` keyword argument not specified i.create() ### Adapting Tables to Alternate Metadata {@name=adapting} diff --git a/doc/build/content/sqlconstruction.txt b/doc/build/content/sqlconstruction.txt index e8077f5bd1..e517581804 100644 --- a/doc/build/content/sqlconstruction.txt +++ b/doc/build/content/sqlconstruction.txt @@ -74,8 +74,7 @@ As mentioned above, `ClauseElement` structures can also be executed with a `Conn engine = create_engine('sqlite:///myfile.db') conn = engine.connect() - s = users.select() - {sql}result = conn.execute(s) + {sql}result = conn.execute(users.select()) SELECT users.user_id, users.user_name, users.password FROM users {} @@ -83,16 +82,21 @@ As mentioned above, `ClauseElement` structures can also be executed with a `Conn #### Binding ClauseElements to Engines {@name=binding} -For queries that don't contain any tables, `ClauseElement`s that represent a fully executeable statement support an `engine` keyword parameter which can bind the object to an `Engine`, thereby allowing implicit execution: +For queries that don't contain any "bound" tables, `ClauseElement`s that represent a fully executeable statement support an `bind` keyword parameter which can bind the object to an `Engine` or `Connection`, thereby allowing implicit execution: {python} + # select using a table + {sql}select([users], bind=myengine).execute() + SELECT users.user_id, users.user_name, users.password FROM users + {} + # select a literal - {sql}select(["current_time"], engine=myengine).execute() + {sql}select(["current_time"], bind=myengine).execute() SELECT current_time {} # select a function - {sql}select([func.now()], engine=db).execute() + {sql}select([func.now()], bind=db).execute() SELECT now() {} @@ -127,6 +131,11 @@ The object returned by `execute()` is a `sqlalchemy.engine.ResultProxy` object, # or get the underlying DBAPI cursor object cursor = result.cursor + # after an INSERT, return the last inserted primary key value + # returned as a list of primary key values for *one* row + # (a list since primary keys can be composite) + id = result.last_inserted_ids() + # close the result. If the statement was implicitly executed # (i.e. without an explicit Connection), this will # return the underlying connection resources back to @@ -366,10 +375,10 @@ Functions also are callable as standalone values: {python} # call the "now()" function - time = func.now(engine=myengine).scalar() + time = func.now(bind=myengine).scalar() # call myfunc(1,2,3) - myvalue = func.myfunc(1, 2, 3, engine=db).execute() + myvalue = func.myfunc(1, 2, 3, bind=db).execute() # or call them off the engine db.func.now().scalar() @@ -385,14 +394,14 @@ You can drop in a literal value anywhere there isnt a column to attach to via th {'literal_1': 'bar', 'literal': 'foo'} # literals have all the same comparison functions as columns - {sql}select([literal('foo') == literal('bar')], engine=myengine).scalar() + {sql}select([literal('foo') == literal('bar')], bind=myengine).scalar() SELECT :literal = :literal_1 {'literal_1': 'bar', 'literal': 'foo'} Literals also take an optional `type` parameter to give literals a type. This can sometimes be significant, for example when using the "+" operator with SQLite, the String type is detected and the operator is converted to "||": {python} - {sql}select([literal('foo', type=String) + 'bar'], engine=e).execute() + {sql}select([literal('foo', type=String) + 'bar'], bind=e).execute() SELECT ? || ? ['foo', 'bar'] @@ -712,7 +721,7 @@ Throughout all these examples, SQLAlchemy is busy creating bind parameters where #### Precompiling a Query {@name=precompiling} -By throwing the `compile()` method onto the end of any query object, the query can be "compiled" by the SQLEngine into a `sqlalchemy.sql.Compiled` object just once, and the resulting compiled object reused, which eliminates repeated internal compilation of the SQL string: +By throwing the `compile()` method onto the end of any query object, the query can be "compiled" by the Engine into a `sqlalchemy.sql.Compiled` object just once, and the resulting compiled object reused, which eliminates repeated internal compilation of the SQL string: {python}s = users.select(users.c.user_name==bindparam('username')).compile() s.execute(username='fred') @@ -721,7 +730,7 @@ By throwing the `compile()` method onto the end of any query object, the query c ### Literal Text Blocks {@name=textual} -The sql package tries to allow free textual placement in as many ways as possible. In the examples below, note that the from_obj parameter is used only when no other information exists within the select object with which to determine table metadata. Also note that in a query where there isnt even table metadata used, the SQLEngine to be used for the query has to be explicitly specified: +The sql package tries to allow free textual placement in as many ways as possible. In the examples below, note that the from_obj parameter is used only when no other information exists within the select object with which to determine table metadata. Also note that in a query where there isnt even table metadata used, the Engine to be used for the query has to be explicitly specified: {python} # strings as column clauses @@ -759,12 +768,12 @@ The sql package tries to allow free textual placement in as many ways as possibl {sql}select( ["*"], from_obj=["(select user_id, user_name from users)"], - engine=db).execute() + bind=db).execute() SELECT * FROM (select user_id, user_name from users) {} # a full query - {sql}text("select user_name from users", engine=db).execute() + {sql}text("select user_name from users", bind=db).execute() SELECT user_name FROM users {} @@ -773,22 +782,22 @@ The sql package tries to allow free textual placement in as many ways as possibl Use the format `':paramname'` to define bind parameters inside of a text block. They will be converted to the appropriate format upon compilation: - {python}t = engine.text("select foo from mytable where lala=:hoho") + {python}t = text("select foo from mytable where lala=:hoho", bind=engine) r = t.execute(hoho=7) Bind parameters can also be explicit, which allows typing information to be added. Just specify them as a list with keys that match those inside the textual statement: - {python}t = engine.text("select foo from mytable where lala=:hoho", - bindparams=[bindparam('hoho', type=types.String)]) + {python}t = text("select foo from mytable where lala=:hoho", + bindparams=[bindparam('hoho', type=types.String)], bind=engine) r = t.execute(hoho="im hoho") Result-row type processing can be added via the `typemap` argument, which is a dictionary of return columns mapped to types: {python}# specify DateTime type for the 'foo' column in the result set # sqlite, for example, uses result-row post-processing to construct dates - t = engine.text("select foo from mytable where lala=:hoho", + t = text("select foo from mytable where lala=:hoho", bindparams=[bindparam('hoho', type=types.String)], - typemap={'foo':types.DateTime} + typemap={'foo':types.DateTime}, bind=engine ) r = t.execute(hoho="im hoho") diff --git a/doc/build/content/tutorial.txt b/doc/build/content/tutorial.txt index 615f275f9a..1f01ce9a65 100644 --- a/doc/build/content/tutorial.txt +++ b/doc/build/content/tutorial.txt @@ -86,13 +86,13 @@ Configuring SQLAlchemy for your database consists of creating objects called `Ta {python} >>> metadata = MetaData() - >>> metadata.connect(db) + >>> metadata.bind = db -An equivalent operation is to create the `MetaData` object directly with an Engine URL, which calls the `create_engine` call for us: +An equivalent operation is to create the `MetaData` object directly with the Engine: {python} - >>> metadata = MetaData('sqlite:///tutorial.db') - + >>> metadata = MetaData(db) + Now, when we tell "metadata" about the tables in our database, we can issue CREATE statements for those tables, as well as execute SQL statements derived from them, without needing to open or close any connections; that will be all done automatically. Note that SQLALchemy fully supports the usage of explicit Connection objects for all SQL operations, which may be in conjunction with plain `MetaData` objects that are entirely unbound to any Engine, providing a more decoupled pattern that allows finer-grained control of connections than the "bound" approach this tutorial will present. For the purposes of this tutorial, we will stick with "bound" objects, as it allows us to focus more on SA's general concepts, leaving explicit connection management as a more advanced topic. diff --git a/doc/build/content/unitofwork.txt b/doc/build/content/unitofwork.txt index 261799e41b..184ece0259 100644 --- a/doc/build/content/unitofwork.txt +++ b/doc/build/content/unitofwork.txt @@ -45,13 +45,13 @@ A common option used with `create_session()` is to specify a specific `Engine` o # create a Session that will use this engine for all operations. # it will open and close Connections as needed. - session = create_session(bind_to=e) + session = create_session(bind=e) # open a Connection conn = e.connect() # create a Session that will use this specific Connection for all operations - session = create_session(bind_to=conn) + session = create_session(bind=conn) The session to which an object is attached can be acquired via the `object_session()` function, which returns the appropriate `Session` if the object is pending or persistent, or `None` if the object is transient or detached: @@ -291,7 +291,7 @@ Both of these methods receive two arguments; in the case of `bind_mapper()`, it sess.bind_mapper(mymapper, sqlite_connection) # bind mymapper operations to a single SQLite connection sess.bind_table(email_addresses_table, engine2) # bind operations with the email_addresses_table to mysql -Normally, when a `Session` is created via `create_session()` with no arguments, the Session has no awareness of individual `Engines`, and when mappers use the `Session` to retrieve connections, the underlying `MetaData` each `Table` is associated with is expected to be "bound" to an `Engine`, else no engine can be located and an exception is raised. A second form of `create_session()` takes the argument `bind_to=engine_or_connection`, where all SQL operations performed by this `Session` use the single `Engine` or `Connection` (collectively known as a `Connectable`) passed to the constructor. With `bind_mapper()` and `bind_table()`, the operations of individual mapper and/or tables are bound to distinct engines or connections, thereby overriding not only the engine which may be "bound" to the underlying `MetaData`, but also the `Engine` or `Connection` which may have been passed to the `create_session()` function. Configurations which interact with multiple explicit database connections at one time must use either or both of these methods in order to associate `Session` operations with the appropriate connection resource. +Normally, when a `Session` is created via `create_session()` with no arguments, the Session has no awareness of individual `Engines`, and when mappers use the `Session` to retrieve connections, the underlying `MetaData` each `Table` is associated with is expected to be "bound" to an `Engine`, else no engine can be located and an exception is raised. A second form of `create_session()` takes the argument `bind=engine_or_connection`, where all SQL operations performed by this `Session` use the single `Engine` or `Connection` (collectively known as a `Connectable`) passed to the constructor. With `bind_mapper()` and `bind_table()`, the operations of individual mapper and/or tables are bound to distinct engines or connections, thereby overriding not only the engine which may be "bound" to the underlying `MetaData`, but also the `Engine` or `Connection` which may have been passed to the `create_session()` function. Configurations which interact with multiple explicit database connections at one time must use either or both of these methods in order to associate `Session` operations with the appropriate connection resource. Binding a `Mapper` to a resource takes precedence over a `Table` bind, meaning if mapper A is associated with table B, and the Session binds mapper A to connection X and table B to connection Y, an operation with mapper A will use connection X, not connection Y. @@ -389,7 +389,8 @@ Note that while SessionTransaction is capable of tracking multiple transactions SessionTransaction Facts: * SessionTransaction, like its parent Session object, is **not threadsafe**. - + * SessionTransaction will no longer be necessary in SQLAlchemy 0.4, where its functionality is to be merged with the Session itself. + #### Using SQL with SessionTransaction {@name=sql} The SessionTransaction can interact with direct SQL queries in two general ways. Either specific `Connection` objects can be associated with the `SessionTransaction`, which are then useable both for direct SQL as well as within `flush()` operations performed by the `SessionTransaction`, or via accessing the `Connection` object automatically referenced within the `SessionTransaction`. @@ -430,7 +431,7 @@ The transactions issued by `SessionTransaction` as well as internally by the `Se {python title="Transactions with Sessions"} connection = engine.connect() # Connection - session = create_session(bind_to=connection) # Session bound to the Connection + session = create_session(bind=connection) # Session bound to the Connection trans = connection.begin() # start transaction try: stuff = session.query(MyClass).select() # Session operation uses connection @@ -442,33 +443,3 @@ The transactions issued by `SessionTransaction` as well as internally by the `Se raise trans.commit() # commit -### Analyzing Object Flushes {@name=logging} - -The session module can log an extensive display of its "flush plans", which is a graph of its internal representation of objects before they are written to the database. To turn this logging on: - - {python} - # make an Session with echo_uow - session = create_session(echo_uow=True) - -The `flush()` operation will then dump to the standard output displays like the following: - - {code} - Task dump: - - UOWTask(6034768, 'User/users/None') - | - |- Save User(6016624) - | |-Process User(6016624).addresses - | - |- UOWTask(6034832, 'Address/email_addresses/None') - | |- Save Address(6034384) - | |- Save Address(6034256) - | |---- - | - |---- - -The above graph can be read straight downwards to determine the order of operations. It indicates "save User 6016624, process each element in the 'addresses' list on User 6016624, save Address 6034384, Address 6034256". - -Of course, one can also get a good idea of the order of operations just by logging the actual SQL statements executed. - - diff --git a/lib/sqlalchemy/ansisql.py b/lib/sqlalchemy/ansisql.py index 090368a58e..a0f37e1707 100644 --- a/lib/sqlalchemy/ansisql.py +++ b/lib/sqlalchemy/ansisql.py @@ -840,8 +840,8 @@ class ANSISchemaGenerator(ANSISchemaBase): def get_column_default_string(self, column): if isinstance(column.default, schema.PassiveDefault): - if isinstance(column.default.arg, str): - return repr(column.default.arg) + if isinstance(column.default.arg, basestring): + return "'%s'" % column.default.arg else: return str(self._compile(column.default.arg, None)) else: diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 82ec73f598..99688a48fd 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -326,14 +326,11 @@ class ExecutionContext(object): """Return the list of the primary key values for the last insert statement executed. This does not apply to straight textual clauses; only to - ``sql.Insert`` objects compiled against a ``schema.Table`` object, - which are executed via `execute()`. The order of + ``sql.Insert`` objects compiled against a ``schema.Table`` object. + The order of items in the list is the same as that of the Table's 'primary_key' attribute. - In some cases, this method may invoke a query back to the - database to retrieve the data, based on the "lastrowid" value - in the cursor. """ raise NotImplementedError() @@ -865,6 +862,7 @@ class ResultProxy(object): rowcount = property(lambda s:s.context.get_rowcount()) connection = property(lambda s:s.context.connection) + lastrowid = property(lambda s:s.cursor.lastrowid) def _init_metadata(self): if hasattr(self, '_ResultProxy__props'): diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index dd73c2aeec..9d31df8c0d 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -11,7 +11,12 @@ from sqlalchemy.orm.interfaces import OperationContext, SynonymProperty __all__ = ['Query', 'QueryContext', 'SelectionContext'] class Query(object): - """Encapsulates the object-fetching operations provided by Mappers.""" + """Encapsulates the object-fetching operations provided by Mappers. + + Note that this particular version of Query contains the 0.3 API as well as most of the + 0.4 API for forwards compatibility. A large part of the API here is deprecated (but still present) + in the 0.4 series. + """ def __init__(self, class_or_mapper, session=None, entity_name=None, lockmode=None, with_options=None, extension=None, **kwargs): if isinstance(class_or_mapper, type): @@ -43,6 +48,7 @@ class Query(object): self._offset = kwargs.pop('offset', None) self._limit = kwargs.pop('limit', None) self._criterion = None + self._params = {} self._col = None self._func = None self._joinpoint = self.mapper @@ -52,6 +58,8 @@ class Query(object): opt.process_query(self) def _clone(self): + # yes, a little embarassing here. + # go look at 0.4 for the simple version. q = Query.__new__(Query) q.mapper = self.mapper q.select_mapper = self.select_mapper @@ -68,6 +76,7 @@ class Query(object): q.extension.append(ext) q._offset = self._offset q._limit = self._limit + q._params = self._params q._group_by = self._group_by q._get_clause = self._get_clause q._from_obj = list(self._from_obj) @@ -129,6 +138,8 @@ class Query(object): The criterion is constructed in the same way as the ``select_by()`` method. + + this method is deprecated in 0.4. """ ret = self.extension.get_by(self, *args, **params) @@ -167,6 +178,7 @@ class Query(object): result = usermapper.select_by(user_name = 'fred') + this method is deprecated in 0.4. """ ret = self.extension.select_by(self, *args, **params) @@ -181,6 +193,8 @@ class Query(object): The criterion is constructed in the same way as the ``select_by()`` method. + + this method is deprecated in 0.4. """ return self._join_by(args, params) @@ -192,6 +206,8 @@ class Query(object): locate the property, and will return a ClauseElement representing a join from this Query's mapper to the endmost mapper. + + this method is deprecated in 0.4. """ [keys, p] = self._locate_prop(key) @@ -203,6 +219,8 @@ class Query(object): from one mapper to the next, return a ClauseElement representing a join from this Query's mapper to the endmost mapper. + + this method is deprecated in 0.4. """ mapper = self.mapper @@ -224,6 +242,8 @@ class Query(object): The criterion is constructed in the same way as the ``select_by()`` method. + + this method is deprecated in 0.4. """ return self.get_by(*args, **params) @@ -234,6 +254,8 @@ class Query(object): The criterion is constructed in the same way as the ``select_by()`` method. + + this method is deprecated in 0.4. """ ret = self.select_whereclause(self.join_by(*args, **params), limit=2) @@ -250,6 +272,8 @@ class Query(object): The criterion is constructed in the same way as the ``select_by()`` method. + + this method is deprecated in 0.4. """ return self.count(self.join_by(*args, **params)) @@ -261,6 +285,7 @@ class Query(object): the given criterion represents ``WHERE`` criterion only, LIMIT 1 is applied to the fully generated statement. + this method is deprecated in 0.4. """ if isinstance(arg, sql.FromClause) and arg.supports_execution(): @@ -281,6 +306,7 @@ class Query(object): ``WHERE`` criterion only, LIMIT 2 is applied to the fully generated statement. + this method is deprecated in 0.4. """ if isinstance(arg, sql.FromClause) and arg.supports_execution(): @@ -308,6 +334,8 @@ class Query(object): In this case, the developer must ensure that an adequate set of columns exists in the rowset with which to build new object instances. + + this method is deprecated in 0.4. """ ret = self.extension.select(self, arg=arg, **kwargs) @@ -321,6 +349,8 @@ class Query(object): def select_whereclause(self, whereclause=None, params=None, **kwargs): """Given a ``WHERE`` criterion, create a ``SELECT`` statement, execute and return the resulting instances. + + this method is deprecated in 0.4. """ statement = self.compile(whereclause, **kwargs) return self._select_statement(statement, params=params) @@ -328,6 +358,9 @@ class Query(object): def count(self, whereclause=None, params=None, **kwargs): """Given a ``WHERE`` criterion, create a ``SELECT COUNT`` statement, execute and return the resulting count value. + + the additional arguments to this method are is deprecated in 0.4. + """ if self._criterion: if whereclause is not None: @@ -353,6 +386,8 @@ class Query(object): def select_statement(self, statement, **params): """Given a ``ClauseElement``-based statement, execute and return the resulting instances. + + this method is deprecated in 0.4. """ return self._select_statement(statement, params=params) @@ -360,6 +395,8 @@ class Query(object): def select_text(self, text, **params): """Given a literal string-based statement, execute and return the resulting instances. + + this method is deprecated in 0.4. use from_statement() instead. """ t = sql.text(text) @@ -501,12 +538,27 @@ class Query(object): q = self._clone() q.lockmode = mode return q + + def params(self, **kwargs): + """add values for bind parameters which may have been specified in filter().""" + + q = self._clone() + q._params = q._params.copy() + q._params.update(kwargs) + return q def filter(self, criterion): """apply the given filtering criterion to the query and return the newly resulting ``Query`` the criterion is any sql.ClauseElement applicable to the WHERE clause of a select. """ + + if isinstance(criterion, basestring): + criterion = sql.text(criterion) + + if criterion is not None and not isinstance(criterion, sql.ClauseElement): + raise exceptions.ArgumentError("filter() argument must be of type sqlalchemy.sql.ClauseElement or string") + q = self._clone() if q._criterion is not None: q._criterion = q._criterion & criterion @@ -827,6 +879,8 @@ class Query(object): """Return the results represented by this ``Query`` as a list. This results in an execution of the underlying query. + + this method is deprecated in 0.4. use all() instead. """ return list(self) @@ -869,8 +923,23 @@ class Query(object): return self._col_aggregate(self._col, self._func) def all(self): + """Return the results represented by this ``Query`` as a list. + + This results in an execution of the underlying query. + """ return self.list() + def from_statement(self, statement): + """execute a full select() statement, or literal textual string as a SELECT statement. + + this method is for forwards compatibility with 0.4. + """ + if isinstance(statement, basestring): + statement = sql.text(statement) + q = self._clone() + q._statement = statement + return q + def scalar(self): """Return the first result of this ``Query``. @@ -890,13 +959,13 @@ class Query(object): this Query's session/mapper, return the resulting list of instances. - After execution, close the ResultProxy and its underlying - resources. This method is one step above the ``instances()`` - method, which takes the executed statement's ResultProxy - directly. + this method is deprecated in 0.4. Use from_statement() instead. """ - result = self.session.execute(self.mapper, clauseelement, params=params) + p = self._params + if params is not None: + p.update(params) + result = self.session.execute(self.mapper, clauseelement, params=p) try: return self.instances(result, **kwargs) finally: @@ -994,8 +1063,6 @@ class Query(object): def _select_statement(self, statement, params=None, **kwargs): statement.use_labels = True - if params is None: - params = {} return self.execute(statement, params=params, **kwargs) def _should_nest(self, querycontext): @@ -1018,6 +1085,8 @@ class Query(object): def compile(self, whereclause = None, **kwargs): """Given a WHERE criterion, produce a ClauseElement-based statement suitable for usage in the execute() method. + + the arguments to this function are deprecated and are removed in version 0.4. """ if self._criterion: diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 053b6cea9e..9a0438fc93 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -274,15 +274,21 @@ class Session(object): raise exceptions.InvalidRequestError("Could not locate any Engine or Connection bound to mapper '%s'" % str(mapper)) return e - def query(self, mapper_or_class, entity_name=None, **kwargs): + def query(self, mapper_or_class, *addtl_entities, **kwargs): """Return a new ``Query`` object corresponding to this ``Session`` and the mapper, or the classes' primary mapper. """ - + + entity_name = kwargs.pop('entity_name', None) + if isinstance(mapper_or_class, type): - return query.Query(_class_mapper(mapper_or_class, entity_name=entity_name), self, **kwargs) + q = query.Query(_class_mapper(mapper_or_class, entity_name=entity_name), self, **kwargs) else: - return query.Query(mapper_or_class, self, **kwargs) + q = query.Query(mapper_or_class, self, **kwargs) + + for ent in addtl_entities: + q = q.add_entity(ent) + return q def _sql(self): class SQLProxy(object): diff --git a/test/orm/query.py b/test/orm/query.py index a3d6787dd8..7e3a515c8b 100644 --- a/test/orm/query.py +++ b/test/orm/query.py @@ -114,9 +114,36 @@ class FilterTest(QueryTest): def test_basic(self): assert [User(id=7), User(id=8), User(id=9),User(id=10)] == create_session().query(User).all() + def test_limit(self): + assert [User(id=8), User(id=9)] == create_session().query(User).limit(2).offset(1).all() + + assert [User(id=8), User(id=9)] == list(create_session().query(User)[1:3]) + + assert User(id=8) == create_session().query(User)[1] + def test_onefilter(self): assert [User(id=8), User(id=9)] == create_session().query(User).filter(users.c.name.endswith('ed')).all() +class CountTest(QueryTest): + def test_basic(self): + assert 4 == create_session().query(User).count() + + assert 2 == create_session().query(User).filter(users.c.name.endswith('ed')).count() + +class TextTest(QueryTest): + def test_fulltext(self): + assert [User(id=7), User(id=8), User(id=9),User(id=10)] == create_session().query(User).from_statement("select * from users").all() + + def test_fragment(self): + assert [User(id=8), User(id=9)] == create_session().query(User).filter("id in (8, 9)").all() + + assert [User(id=9)] == create_session().query(User).filter("name='fred'").filter("id=9").all() + + assert [User(id=9)] == create_session().query(User).filter("name='fred'").filter(users.c.id==9).all() + + def test_binds(self): + assert [User(id=8), User(id=9)] == create_session().query(User).filter("id in (:id1, :id2)").params(id1=8, id2=9).all() + class ParentTest(QueryTest): def test_o2m(self): @@ -305,6 +332,9 @@ class InstancesTest(QueryTest): q = sess.query(User).add_entity(Address) l = q.join('addresses').filter_by(email_address='ed@bettyboop.com').all() assert l == [(user8, address3)] + + q = sess.query(User, Address).join('addresses').filter_by(email_address='ed@bettyboop.com') + assert q.all() == [(user8, address3)] def test_multi_columns(self): sess = create_session() -- 2.47.2