]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
merged -r5841:5869 of trunk, including a local information_schema.py for MSSQL
authorMike Bayer <mike_mp@zzzcomputing.com>
Sun, 29 Mar 2009 21:40:01 +0000 (21:40 +0000)
committerMike Bayer <mike_mp@zzzcomputing.com>
Sun, 29 Mar 2009 21:40:01 +0000 (21:40 +0000)
22 files changed:
CHANGES
README.unittests
doc/build/Makefile
doc/build/mappers.rst
doc/build/ormtutorial.rst
doc/build/reference/orm/collections.rst [new file with mode: 0644]
doc/build/reference/orm/index.rst
lib/sqlalchemy/dialects/mssql/base.py
lib/sqlalchemy/dialects/mssql/information_schema.py [new file with mode: 0644]
lib/sqlalchemy/dialects/sqlite/base.py
lib/sqlalchemy/orm/dynamic.py
lib/sqlalchemy/orm/session.py
lib/sqlalchemy/orm/util.py
lib/sqlalchemy/schema.py
lib/sqlalchemy/sql/expression.py
test/ext/declarative.py
test/orm/dynamic.py
test/orm/lazy_relations.py
test/orm/query.py
test/profiling/alltests.py
test/sql/select.py
test/testlib/config.py

diff --git a/CHANGES b/CHANGES
index 503031d4118f0050470ac5e602679a0a7ab7bf1f..8aa1709c6e8b9215def8955ebc512fa50f541f1f 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -3,6 +3,34 @@
 =======
 CHANGES
 =======
+0.5.4
+=====
+- orm
+    - Fixed the "set collection" function on "dynamic" relations
+      to initiate events correctly.  Previously a collection
+      could only be assigned to a pending parent instance,
+      otherwise modified events would not be fired correctly.
+      Set collection is now compatible with merge(), 
+      fixes [ticket:1352].
+
+    - Lazy loader will not use get() if the "lazy load"
+      SQL clause matches the clause used by get(), but
+      contains some parameters hardcoded.  Previously
+      the lazy strategy would fail with the get().  Ideally
+      get() would be used with the hardcoded parameters
+      but this would require further development.
+      [ticket:1357]
+
+- sql
+    - Fixed __repr__() and other _get_colspec() methods on 
+      ForeignKey constructed from __clause_element__() style
+      construct (i.e. declarative columns).  [ticket:1353]
+      
+- mssql
+    - Corrected problem with information schema not working with a
+      binary collation based database. Cleaned up information
+      schema since it is only used by mssql now. [ticket:1343]
+
 0.5.3
 =====
 - orm
@@ -24,6 +52,10 @@ CHANGES
       might say SELECT A.*, B.* FROM A JOIN X, B JOIN Y.  
       Eager loading can also tack its joins onto those 
       multiple FROM clauses.  [ticket:1337]
+
+    - Fixed bug in dynamic_loader() where append/remove events
+      after construction time were not being propagated to the 
+      UOW to pick up on flush(). [ticket:1347]
       
     - Fixed bug where column_prefix wasn't being checked before
       not mapping an attribute that already had class-level 
@@ -93,6 +125,11 @@ CHANGES
        with_polymorphic(), or using from_self().
         
 - sql
+    - An alias() of a select() will convert to a "scalar subquery"
+      when used in an unambiguously scalar context, i.e. it's used 
+      in a comparison operation.  This applies to
+      the ORM when using query.subquery() as well.
+      
     - Fixed missing _label attribute on Function object, others
       when used in a select() with use_labels (such as when used
       in an ORM column_property()).  [ticket:1302]
@@ -112,6 +149,13 @@ CHANGES
       or dialects.   There is a small performance penalty
       which will be resolved in 0.6.  [ticket:1299]
 
+- sqlite
+    - Fixed SQLite reflection methods so that non-present
+      cursor.description, which triggers an auto-cursor
+      close, will be detected so that no results doesn't
+      fail on recent versions of pysqlite which raise 
+      an error when fetchone() called with no rows present.
+      
 - postgres
     - Index reflection won't fail when an index with 
       multiple expressions is encountered.
index bb1ac3253bff8c0ffa1065fc75374c3b318dc414..67f5d7133f98c0a6d1c8fa9ca06037d103979bd4 100644 (file)
@@ -141,6 +141,9 @@ statements that are missed with !, by running the coverage.py utility with the
 This will create a new annotated file ./lib/sqlalchemy/sql.py,cover. Pretty
 cool!
 
+BIG COVERAGE TIP !!!  There is an issue where existing .pyc files may
+store the incorrect filepaths, which will break the coverage system.  If
+coverage numbers are coming out as low/zero, try deleting all .pyc files.
 
 TESTING NEW DIALECTS
 --------------------
index f7ac2ca576dd349387c7f3a072660e9a8358a546..6dcd0321ee953ac3f58c0fea78b8ad1d1c8368e2 100644 (file)
@@ -3,7 +3,7 @@
 
 # You can set these variables from the command line.
 SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
+SPHINXBUILD   = ./bin/sphinx-build
 PAPER         =
 
 # Internal variables.
index 89db6a0a740e79b6aaf914aa9e031c81f98ec7a7..ff9b1f95bf327c19dbabe648d498bb5daabf20df 100644 (file)
@@ -1285,6 +1285,8 @@ Theres no restriction on how many times you can relate from parent to child.  SQ
                     addresses_table.c.city=='New York')),
     })
 
+.. _alternate_collection_implementations:
+
 Alternate Collection Implementations 
 -------------------------------------
 
index 28fdc1ec079d3b58f1f76a28241a59c5d503d2f8..1190f3c3365ed64c4b313618e447d06f8958e99c 100644 (file)
@@ -45,7 +45,7 @@ Next, we can issue CREATE TABLE statements derived from our table metadata, by c
 
     {sql}>>> metadata.create_all(engine) # doctest:+ELLIPSIS,+NORMALIZE_WHITESPACE
     PRAGMA table_info("users")
-    {}
+    ()
     CREATE TABLE users (
         id INTEGER NOT NULL,
         name VARCHAR,
@@ -53,7 +53,7 @@ Next, we can issue CREATE TABLE statements derived from our table metadata, by c
         password VARCHAR,
         PRIMARY KEY (id)
     )
-    {}
+    ()
     COMMIT
 
 Users familiar with the syntax of CREATE TABLE may notice that the VARCHAR columns were generated without a length; on SQLite, this is a valid datatype, but on most databases it's not allowed.  So if running this tutorial on a database such as Postgres or MySQL, and you wish to use SQLAlchemy to generate the tables, a "length" may be provided to the ``String`` type as below::
@@ -276,7 +276,7 @@ Querying the session, we can see that they're flushed into the current transacti
 
 .. sourcecode:: python+sql
 
-    {sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all()
+    {sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all() #doctest: +NORMALIZE_WHITESPACE
     UPDATE users SET name=? WHERE users.id = ?
     ['Edwardo', 1]
     INSERT INTO users (name, fullname, password) VALUES (?, ?, ?)
@@ -295,7 +295,7 @@ Rolling back, we can see that ``ed_user``'s name is back to ``ed``, and ``fake_u
     ROLLBACK
     {stop}
 
-    {sql}>>> ed_user.name
+    {sql}>>> ed_user.name #doctest: +NORMALIZE_WHITESPACE
     BEGIN
     SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password
     FROM users
@@ -309,7 +309,7 @@ issuing a SELECT illustrates the changes made to the database:
 
 .. sourcecode:: python+sql
 
-    {sql}>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all()
+    {sql}>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all() #doctest: +NORMALIZE_WHITESPACE
     SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password
     FROM users
     WHERE users.name IN (?, ?)
@@ -352,7 +352,7 @@ The tuples returned by ``Query`` are *named* tuples, and can be treated much lik
 
 .. sourcecode:: python+sql
 
-    {sql}>>> for row in session.query(User, User.name).all():
+    {sql}>>> for row in session.query(User, User.name).all(): #doctest: +NORMALIZE_WHITESPACE
     ...    print row.User, row.name
     SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password
     FROM users
@@ -368,7 +368,7 @@ You can control the names using the ``label()`` construct for scalar attributes
 
     >>> from sqlalchemy.orm import aliased
     >>> user_alias = aliased(User, name='user_alias')
-    {sql}>>> for row in session.query(user_alias, user_alias.name.label('name_label')).all():
+    {sql}>>> for row in session.query(user_alias, user_alias.name.label('name_label')).all(): #doctest: +NORMALIZE_WHITESPACE
     ...    print row.user_alias, row.name_label
     SELECT users_1.id AS users_1_id, users_1.name AS users_1_name, users_1.fullname AS users_1_fullname, users_1.password AS users_1_password, users_1.name AS name_label
     FROM users AS users_1
@@ -478,7 +478,7 @@ The ``all()``, ``one()``, and ``first()`` methods of ``Query`` immediately issue
 .. sourcecode:: python+sql
 
     >>> query = session.query(User).filter(User.name.like('%ed')).order_by(User.id)
-    {sql}>>> query.all()
+    {sql}>>> query.all() #doctest: +NORMALIZE_WHITESPACE
     SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password
     FROM users
     WHERE users.name LIKE ? ORDER BY users.id
@@ -489,7 +489,7 @@ The ``all()``, ``one()``, and ``first()`` methods of ``Query`` immediately issue
 
 .. sourcecode:: python+sql
 
-    {sql}>>> query.first()
+    {sql}>>> query.first() #doctest: +NORMALIZE_WHITESPACE
     SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password
     FROM users
     WHERE users.name LIKE ? ORDER BY users.id
@@ -501,7 +501,7 @@ The ``all()``, ``one()``, and ``first()`` methods of ``Query`` immediately issue
 
 .. sourcecode:: python+sql
 
-    {sql}>>> try:
+    {sql}>>> try: #doctest: +NORMALIZE_WHITESPACE
     ...     user = query.one()
     ... except Exception, e:
     ...     print e
@@ -514,7 +514,7 @@ The ``all()``, ``one()``, and ``first()`` methods of ``Query`` immediately issue
 
 .. sourcecode:: python+sql
 
-    {sql}>>> try:
+    {sql}>>> try: #doctest: +NORMALIZE_WHITESPACE
     ...     user = query.filter(User.id == 99).one()
     ... except Exception, e:
     ...     print e
@@ -532,7 +532,7 @@ Literal strings can be used flexibly with ``Query``.  Most methods accept string
 
 .. sourcecode:: python+sql
 
-    {sql}>>> for user in session.query(User).filter("id<224").order_by("id").all():
+    {sql}>>> for user in session.query(User).filter("id<224").order_by("id").all(): #doctest: +NORMALIZE_WHITESPACE
     ...     print user.name
     SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password
     FROM users
@@ -616,9 +616,9 @@ We'll need to create the ``addresses`` table in the database, so we will issue a
 
     {sql}>>> metadata.create_all(engine) # doctest: +NORMALIZE_WHITESPACE
     PRAGMA table_info("users")
-    {}
+    ()
     PRAGMA table_info("addresses")
-    {}
+    ()
     CREATE TABLE addresses (
         id INTEGER NOT NULL,
         email_address VARCHAR NOT NULL,
@@ -626,7 +626,7 @@ We'll need to create the ``addresses`` table in the database, so we will issue a
         PRIMARY KEY (id),
          FOREIGN KEY(user_id) REFERENCES users (id)
     )
-    {}
+    ()
     COMMIT
 
 Working with Related Objects
@@ -674,7 +674,7 @@ Querying for Jack, we get just Jack back.  No SQL is yet issued for Jack's addre
 
 .. sourcecode:: python+sql
 
-    {sql}>>> jack = session.query(User).filter_by(name='jack').one()
+    {sql}>>> jack = session.query(User).filter_by(name='jack').one() #doctest: +NORMALIZE_WHITESPACE
     BEGIN
     SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password
     FROM users
@@ -689,7 +689,7 @@ Let's look at the ``addresses`` collection.  Watch the SQL:
 
 .. sourcecode:: python+sql
 
-    {sql}>>> jack.addresses
+    {sql}>>> jack.addresses #doctest: +NORMALIZE_WHITESPACE
     SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id
     FROM addresses
     WHERE ? = addresses.user_id ORDER BY addresses.id
@@ -748,7 +748,7 @@ Or we can make a real JOIN construct; one way to do so is to use the ORM ``join(
 
     >>> from sqlalchemy.orm import join
     {sql}>>> session.query(User).select_from(join(User, Address)).\
-    ...         filter(Address.email_address=='jack@google.com').all()
+    ...         filter(Address.email_address=='jack@google.com').all() #doctest: +NORMALIZE_WHITESPACE
     SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password
     FROM users JOIN addresses ON users.id = addresses.user_id
     WHERE addresses.email_address = ?
@@ -768,7 +768,7 @@ The functionality of ``join()`` is also available generatively from ``Query`` it
 .. sourcecode:: python+sql
 
     {sql}>>> session.query(User).join(User.addresses).\
-    ...     filter(Address.email_address=='jack@google.com').all()
+    ...     filter(Address.email_address=='jack@google.com').all() #doctest: +NORMALIZE_WHITESPACE
     SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password
     FROM users JOIN addresses ON users.id = addresses.user_id
     WHERE addresses.email_address = ?
@@ -849,6 +849,26 @@ Once we have our statement, it behaves like a ``Table`` construct, such as the o
     <User('fred','Fred Flinstone', 'blah')> None
     <User('jack','Jack Bean', 'gjffdd')> 2
 
+Selecting Entities from Subqueries
+----------------------------------
+
+Above, we just selected a result that included a column from a subquery.  What if we wanted our subquery to map to an entity ?   For this we use ``aliased()`` to associate an "alias" of a mapped class to a subquery:
+
+.. sourcecode:: python+sql
+
+    {sql}>>> stmt = session.query(Address).filter(Address.email_address != 'j25@yahoo.com').subquery()
+    >>> adalias = aliased(Address, stmt)
+    >>> for user, address in session.query(User, adalias).join((adalias, User.addresses)): # doctest: +NORMALIZE_WHITESPACE
+    ...     print user, address
+    SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, 
+    users.password AS users_password, anon_1.id AS anon_1_id, 
+    anon_1.email_address AS anon_1_email_address, anon_1.user_id AS anon_1_user_id 
+    FROM users JOIN (SELECT addresses.id AS id, addresses.email_address AS email_address, addresses.user_id AS user_id 
+    FROM addresses 
+    WHERE addresses.email_address != ?) AS anon_1 ON users.id = anon_1.user_id
+    ['j25@yahoo.com']
+    {stop}<User('jack','Jack Bean', 'gjffdd')> <Address('jack@google.com')>
+
 Using EXISTS
 ------------
 
@@ -1025,7 +1045,7 @@ Now when we load Jack (below using ``get()``, which loads by primary key), remov
     {stop}
 
     # remove one Address (lazy load fires off)
-    {sql}>>> del jack.addresses[1]
+    {sql}>>> del jack.addresses[1] #doctest: +NORMALIZE_WHITESPACE
     SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id
     FROM addresses
     WHERE ? = addresses.user_id
@@ -1133,15 +1153,15 @@ Create new tables:
 
     {sql}>>> metadata.create_all(engine) # doctest: +NORMALIZE_WHITESPACE
     PRAGMA table_info("users")
-    {}
+    ()
     PRAGMA table_info("addresses")
-    {}
+    ()
     PRAGMA table_info("posts")
-    {}
+    ()
     PRAGMA table_info("keywords")
-    {}
+    ()
     PRAGMA table_info("post_keywords")
-    {}
+    ()
     CREATE TABLE posts (
         id INTEGER NOT NULL,
         user_id INTEGER,
@@ -1150,7 +1170,7 @@ Create new tables:
         PRIMARY KEY (id),
          FOREIGN KEY(user_id) REFERENCES users (id)
     )
-    {}
+    ()
     COMMIT
     CREATE TABLE keywords (
         id INTEGER NOT NULL,
@@ -1158,7 +1178,7 @@ Create new tables:
         PRIMARY KEY (id),
          UNIQUE (keyword)
     )
-    {}
+    ()
     COMMIT
     CREATE TABLE post_keywords (
         post_id INTEGER,
@@ -1166,14 +1186,14 @@ Create new tables:
          FOREIGN KEY(post_id) REFERENCES posts (id),
          FOREIGN KEY(keyword_id) REFERENCES keywords (id)
     )
-    {}
+    ()
     COMMIT
 
 Usage is not too different from what we've been doing.  Let's give Wendy some blog posts:
 
 .. sourcecode:: python+sql
 
-    {sql}>>> wendy = session.query(User).filter_by(name='wendy').one()
+    {sql}>>> wendy = session.query(User).filter_by(name='wendy').one() #doctest: +NORMALIZE_WHITESPACE
     SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password
     FROM users
     WHERE users.name = ?
@@ -1194,19 +1214,19 @@ We can now look up all blog posts with the keyword 'firstpost'.   We'll use the
 
 .. sourcecode:: python+sql
 
-    {sql}>>> session.query(BlogPost).filter(BlogPost.keywords.any(keyword='firstpost')).all()
-    INSERT INTO posts (user_id, headline, body) VALUES (?, ?, ?)
-    [2, "Wendy's Blog Post", 'This is a test']
+    {sql}>>> session.query(BlogPost).filter(BlogPost.keywords.any(keyword='firstpost')).all() #doctest: +NORMALIZE_WHITESPACE
     INSERT INTO keywords (keyword) VALUES (?)
     ['wendy']
     INSERT INTO keywords (keyword) VALUES (?)
     ['firstpost']
+    INSERT INTO posts (user_id, headline, body) VALUES (?, ?, ?)
+    [2, "Wendy's Blog Post", 'This is a test']
     INSERT INTO post_keywords (post_id, keyword_id) VALUES (?, ?)
-    [[1, 1], [1, 2]]
-    SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body
-    FROM posts
-    WHERE EXISTS (SELECT 1
-    FROM post_keywords, keywords
+    [[1, 2], [1, 1]]
+    SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body 
+    FROM posts 
+    WHERE EXISTS (SELECT 1 
+    FROM post_keywords, keywords 
     WHERE posts.id = post_keywords.post_id AND keywords.id = post_keywords.keyword_id AND keywords.keyword = ?)
     ['firstpost']
     {stop}[BlogPost("Wendy's Blog Post", 'This is a test', <User('wendy','Wendy Williams', 'foobar')>)]
@@ -1216,7 +1236,7 @@ If we want to look up just Wendy's posts, we can tell the query to narrow down t
 .. sourcecode:: python+sql
 
     {sql}>>> session.query(BlogPost).filter(BlogPost.author==wendy).\
-    ... filter(BlogPost.keywords.any(keyword='firstpost')).all()
+    ... filter(BlogPost.keywords.any(keyword='firstpost')).all() #doctest: +NORMALIZE_WHITESPACE
     SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body
     FROM posts
     WHERE ? = posts.user_id AND (EXISTS (SELECT 1
@@ -1229,7 +1249,7 @@ Or we can use Wendy's own ``posts`` relation, which is a "dynamic" relation, to
 
 .. sourcecode:: python+sql
 
-    {sql}>>> wendy.posts.filter(BlogPost.keywords.any(keyword='firstpost')).all()
+    {sql}>>> wendy.posts.filter(BlogPost.keywords.any(keyword='firstpost')).all() #doctest: +NORMALIZE_WHITESPACE
     SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body
     FROM posts
     WHERE ? = posts.user_id AND (EXISTS (SELECT 1
diff --git a/doc/build/reference/orm/collections.rst b/doc/build/reference/orm/collections.rst
new file mode 100644 (file)
index 0000000..e8cf678
--- /dev/null
@@ -0,0 +1,17 @@
+Collection Mapping
+==================
+
+This is an in-depth discussion of collection mechanics.  For simple examples, see :ref:`alternate_collection_implementations`.
+
+.. automodule:: sqlalchemy.orm.collections
+
+.. autofunction:: attribute_mapped_collection
+
+.. autoclass:: collection
+
+.. autofunction:: collection_adapter
+
+.. autofunction:: column_mapped_collection
+
+.. autofunction:: mapped_collection
+
index c7c771d8cd79d4b44b95279c10f40f094a9e52ae..001d7b4eebd7d12c3047b712e159da3fa0d3d3c0 100644 (file)
@@ -7,6 +7,7 @@ sqlalchemy.orm
     :glob:
 
     mapping
+    collections
     query
     sessions
     interfaces
index cbb79662da9ea52222c058b745be86e081a4eb34..13541524a575594a035bd45629c701ce3427a404 100644 (file)
@@ -235,6 +235,8 @@ from sqlalchemy.engine import default, base, reflection
 from sqlalchemy import types as sqltypes
 from decimal import Decimal as _python_Decimal
 
+import information_schema as ischema
+
 MS_2008_VERSION = (10,)
 #MS_2005_VERSION = ??
 #MS_2000_VERSION = ??
@@ -1110,22 +1112,9 @@ class MSDialect(default.DefaultDialect):
         return self.schema_name
 
     def table_names(self, connection, schema):
-        from sqlalchemy.dialects import information_schema as ischema
         return ischema.table_names(connection, schema)
 
-    def uppercase_table(self, t):
-        # convert all names to uppercase -- fixes refs to INFORMATION_SCHEMA for case-senstive DBs, and won't matter for case-insensitive
-        t.name = t.name.upper()
-        if t.schema:
-            t.schema = t.schema.upper()
-        for c in t.columns:
-            c.name = c.name.upper()
-        return t
-
-
     def has_table(self, connection, tablename, schema=None):
-        import sqlalchemy.dialects.information_schema as ischema
-
         current_schema = schema or self.get_default_schema_name(connection)
         columns = self.uppercase_table(ischema.columns)
         s = sql.select([columns],
@@ -1140,7 +1129,6 @@ class MSDialect(default.DefaultDialect):
 
     @reflection.cache
     def get_schema_names(self, connection, info_cache=None):
-        import sqlalchemy.dialects.information_schema as ischema
         s = sql.select([self.uppercase_table(ischema.schemata).c.schema_name],
             order_by=[ischema.schemata.c.schema_name]
         )
@@ -1149,7 +1137,6 @@ class MSDialect(default.DefaultDialect):
 
     @reflection.cache
     def get_table_names(self, connection, schemaname, info_cache=None):
-        import sqlalchemy.dialects.information_schema as ischema
         current_schema = schemaname or self.get_default_schema_name(connection)
         tables = self.uppercase_table(ischema.tables)
         s = sql.select([tables.c.table_name],
@@ -1164,7 +1151,6 @@ class MSDialect(default.DefaultDialect):
 
     @reflection.cache
     def get_view_names(self, connection, schemaname=None, info_cache=None):
-        import sqlalchemy.dialects.information_schema as ischema
         current_schema = schemaname or self.get_default_schema_name(connection)
         tables = self.uppercase_table(ischema.tables)
         s = sql.select([tables.c.table_name],
@@ -1197,7 +1183,6 @@ class MSDialect(default.DefaultDialect):
     @reflection.cache
     def get_view_definition(self, connection, viewname, schemaname=None,
                             info_cache=None):
-        import sqlalchemy.dialects.information_schema as ischema
         current_schema = schemaname or self.get_default_schema_name(connection)
         views = self.uppercase_table(ischema.views)
         s = sql.select([views.c.view_definition],
@@ -1216,7 +1201,6 @@ class MSDialect(default.DefaultDialect):
                                                             info_cache=None):
         # Get base columns
         current_schema = schemaname or self.get_default_schema_name(connection)
-        import sqlalchemy.dialects.information_schema as ischema
         columns = self.uppercase_table(ischema.columns)
         s = sql.select([columns],
                    current_schema
@@ -1275,7 +1259,6 @@ class MSDialect(default.DefaultDialect):
     @reflection.cache
     def get_primary_keys(self, connection, tablename, schemaname=None,
                                                             info_cache=None):
-        import sqlalchemy.dialects.information_schema as ischema
         current_schema = schemaname or self.get_default_schema_name(connection)
         pkeys = []
         # Add constraints
@@ -1299,7 +1282,6 @@ class MSDialect(default.DefaultDialect):
     @reflection.cache
     def get_foreign_keys(self, connection, tablename, schemaname=None,
                                                             info_cache=None):
-        import sqlalchemy.dialects.information_schema as ischema
         current_schema = schemaname or self.get_default_schema_name(connection)
         # Add constraints
         RR = self.uppercase_table(ischema.ref_constraints)    #information_schema.referential_constraints
@@ -1350,7 +1332,6 @@ class MSDialect(default.DefaultDialect):
         return fkeys
 
     def reflecttable(self, connection, table, include_columns):
-        import sqlalchemy.dialects.information_schema as ischema
         # Get base columns
         if table.schema is not None:
             current_schema = table.schema
diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py
new file mode 100644 (file)
index 0000000..447c7b5
--- /dev/null
@@ -0,0 +1,65 @@
+from sqlalchemy import Table, MetaData, Column, ForeignKey, String, Integer
+
+ischema = MetaData()
+schemata = Table("SCHEMATA", ischema,
+    Column("CATALOG_NAME", String, key="catalog_name"),
+    Column("SCHEMA_NAME", String, key="schema_name"),
+    Column("SCHEMA_OWNER", String, key="schema_owner"),
+    schema="INFORMATION_SCHEMA")
+
+tables = Table("TABLES", ischema,
+    Column("TABLE_CATALOG", String, key="table_catalog"),
+    Column("TABLE_SCHEMA", String, key="table_schema"),
+    Column("TABLE_NAME", String, key="table_name"),
+    Column("TABLE_TYPE", String, key="table_type"),
+    schema="INFORMATION_SCHEMA")
+
+columns = Table("COLUMNS", ischema,
+    Column("TABLE_SCHEMA", String, key="table_schema"),
+    Column("TABLE_NAME", String, key="table_name"),
+    Column("COLUMN_NAME", String, key="column_name"),
+    Column("IS_NULLABLE", Integer, key="is_nullable"),
+    Column("DATA_TYPE", String, key="data_type"),
+    Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
+    Column("CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"),
+    Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
+    Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
+    Column("COLUMN_DEFAULT", Integer, key="column_default"),
+    Column("COLLATION_NAME", String, key="collation_name"),
+    schema="INFORMATION_SCHEMA")
+
+constraints = Table("TABLE_CONSTRAINTS", ischema,
+    Column("TABLE_SCHEMA", String, key="table_schema"),
+    Column("TABLE_NAME", String, key="table_name"),
+    Column("CONSTRAINT_NAME", String, key="constraint_name"),
+    Column("CONSTRAINT_TYPE", String, key="constraint_type"),
+    schema="INFORMATION_SCHEMA")
+
+column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema,
+    Column("TABLE_SCHEMA", String, key="table_schema"),
+    Column("TABLE_NAME", String, key="table_name"),
+    Column("COLUMN_NAME", String, key="column_name"),
+    Column("CONSTRAINT_NAME", String, key="constraint_name"),
+    schema="INFORMATION_SCHEMA")
+
+key_constraints = Table("KEY_COLUMN_USAGE", ischema,
+    Column("TABLE_SCHEMA", String, key="table_schema"),
+    Column("TABLE_NAME", String, key="table_name"),
+    Column("COLUMN_NAME", String, key="column_name"),
+    Column("CONSTRAINT_NAME", String, key="constraint_name"),
+    Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
+    schema="INFORMATION_SCHEMA")
+
+ref_constraints = Table("REFERENTIAL_CONSTRAINTS", ischema,
+    Column("CONSTRAINT_CATALOG", String, key="constraint_catalog"),
+    Column("CONSTRAINT_SCHEMA", String, key="constraint_schema"),
+    Column("CONSTRAINT_NAME", String, key="constraint_name"),
+    Column("UNIQUE_CONSTRAINT_CATLOG", String, key="unique_constraint_catalog"),
+    Column("UNIQUE_CONSTRAINT_SCHEMA", String, key="unique_constraint_schema"),
+    Column("UNIQUE_CONSTRAINT_NAME", String, key="unique_constraint_name"),
+    Column("MATCH_OPTION", String, key="match_option"),
+    Column("UPDATE_RULE", String, key="update_rule"),
+    Column("DELETE_RULE", String, key="delete_rule"),
+    schema="INFORMATION_SCHEMA")
+
index d0e0336f7daebaa7c6307892328e0991299933e1..5b40dcaa4dc30fb93dd04e1fa0d2d3a8671e57ab 100644 (file)
@@ -282,7 +282,7 @@ class SQLiteDialect(default.DefaultDialect):
         else:
             pragma = "PRAGMA "
         qtable = quote(table_name)
-        cursor = connection.execute("%stable_info(%s)" % (pragma, qtable))
+        cursor = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
         row = cursor.fetchone()
 
         # consume remaining rows, to work around
@@ -354,7 +354,7 @@ class SQLiteDialect(default.DefaultDialect):
         else:
             pragma = "PRAGMA "
         qtable = quote(table_name)
-        c = connection.execute("%stable_info(%s)" % (pragma, qtable))
+        c = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
         found_table = False
         columns = []
         while True:
@@ -409,7 +409,7 @@ class SQLiteDialect(default.DefaultDialect):
         else:
             pragma = "PRAGMA "
         qtable = quote(table_name)
-        c = connection.execute("%sforeign_key_list(%s)" % (pragma, qtable))
+        c = _pragma_cursor(connection.execute("%sforeign_key_list(%s)" % (pragma, qtable)))
         fkeys = []
         fks = {}
         while True:
@@ -449,7 +449,7 @@ class SQLiteDialect(default.DefaultDialect):
         else:
             pragma = "PRAGMA "
         qtable = quote(table_name)
-        c = connection.execute("%sindex_list(%s)" % (pragma, qtable))
+        c = _pragma_cursor(connection.execute("%sindex_list(%s)" % (pragma, qtable)))
         indexes = []
         while True:
             row = c.fetchone()
@@ -474,7 +474,7 @@ class SQLiteDialect(default.DefaultDialect):
         else:
             pragma = "PRAGMA "
         qtable = quote(table_name)
-        c = connection.execute("%sindex_list(%s)" % (pragma, qtable))
+        c = _pragma_cursor(connection.execute("%sindex_list(%s)" % (pragma, qtable)))
         unique_indexes = []
         while True:
             row = c.fetchone()
@@ -484,7 +484,7 @@ class SQLiteDialect(default.DefaultDialect):
                 unique_indexes.append(row[1])
         # loop thru unique indexes for one that includes the primary key
         for idx in unique_indexes:
-            c = connection.execute("%sindex_info(%s)" % (pragma, idx))
+            c = _pragma_cursor(connection.execute("%sindex_info(%s)" % (pragma, idx)))
             cols = []
             while True:
                 row = c.fetchone()
@@ -532,3 +532,11 @@ class SQLiteDialect(default.DefaultDialect):
         # this doesn't do anything ???
         unique_indexes = self.get_unique_indexes(connection, table_name, 
                                     schema, info_cache=info_cache)
+
+
+def _pragma_cursor(cursor):
+    """work around SQLite issue whereby cursor.description is blank when PRAGMA returns no rows."""
+    
+    if cursor.closed:
+        cursor._fetchone_impl = lambda: None
+    return cursor
index 0de5b98ff59f634f414785a715cbb0adc9631120..4bc3f58c2e63efe17cb72b233272b2351771e884 100644 (file)
@@ -19,7 +19,7 @@ from sqlalchemy.orm import (
     )
 from sqlalchemy.orm.query import Query
 from sqlalchemy.orm.util import _state_has_identity, has_identity
-
+from sqlalchemy.orm import attributes, collections
 
 class DynaLoader(strategies.AbstractRelationLoader):
     def init_class_attribute(self, mapper):
@@ -70,11 +70,12 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
         collection_history = self._modified_event(state)
         collection_history.added_items.append(value)
 
-        if self.trackparent and value is not None:
-            self.sethasparent(attributes.instance_state(value), True)
         for ext in self.extensions:
             ext.append(state, value, initiator or self)
 
+        if self.trackparent and value is not None:
+            self.sethasparent(attributes.instance_state(value), True)
+
     def fire_remove_event(self, state, value, initiator):
         collection_history = self._modified_event(state)
         collection_history.deleted_items.append(value)
@@ -86,10 +87,12 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
             ext.remove(state, value, initiator or self)
 
     def _modified_event(self, state):
-        state.modified = True
+        
         if self.key not in state.committed_state:
             state.committed_state[self.key] = CollectionHistory(self, state)
 
+        state.modified_event(self, False, attributes.NEVER_SET, passive=attributes.PASSIVE_NO_INITIALIZE)
+
         # this is a hack to allow the _base.ComparableEntity fixture
         # to work
         state.dict[self.key] = True
@@ -99,12 +102,19 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
         if initiator is self:
             return
 
+        self._set_iterable(state, value)
+
+    def _set_iterable(self, state, iterable, adapter=None):
+
         collection_history = self._modified_event(state)
+        new_values = list(iterable)
+        
         if _state_has_identity(state):
             old_collection = list(self.get(state))
         else:
             old_collection = []
-        collection_history.replace(old_collection, value)
+
+        collections.bulk_replace(new_values, DynCollectionAdapter(self, state, old_collection), DynCollectionAdapter(self, state, new_values))
 
     def delete(self, *args, **kwargs):
         raise NotImplementedError()
@@ -132,6 +142,28 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
         if initiator is not self:
             self.fire_remove_event(state, value, initiator)
 
+class DynCollectionAdapter(object):
+    """the dynamic analogue to orm.collections.CollectionAdapter"""
+    
+    def __init__(self, attr, owner_state, data):
+        self.attr = attr
+        self.state = owner_state
+        self.data = data
+    
+    def __iter__(self):
+        return iter(self.data)
+        
+    def append_with_event(self, item, initiator=None):
+        self.attr.append(self.state, item, initiator)
+
+    def remove_with_event(self, item, initiator=None):
+        self.attr.remove(self.state, item, initiator)
+
+    def append_without_event(self, item):
+        pass
+    
+    def remove_without_event(self, item):
+        pass
         
 class AppenderMixin(object):
     query_class = None
@@ -236,8 +268,4 @@ class CollectionHistory(object):
             self.deleted_items = []
             self.added_items = []
             self.unchanged_items = []
-            
-    def replace(self, olditems, newitems):
-        self.added_items = newitems
-        self.deleted_items = olditems
         
index 5e01443a6833ead2d03de6094cf16544b36ff75f..d8af4e74f5a548adc380f323ce12242bbb0bc7cc 100644 (file)
@@ -1357,6 +1357,7 @@ class Session(object):
             not self._deleted and not self._new):
             return
 
+        
         dirty = self._dirty_states
         if not dirty and not self._deleted and not self._new:
             self.identity_map.modified = False
index 37c88907b6f2dde2895901973180af29bae6bfbf..c858ca10265f8ae6657f9219dd1e3b795b3780ee 100644 (file)
@@ -547,7 +547,7 @@ def object_mapper(instance):
         raise exc.UnmappedInstanceError(instance)
 
 def class_mapper(class_, compile=True):
-    """Given a class (or an object), return the primary Mapper associated with the key.
+    """Given a class, return the primary Mapper associated with the key.
 
     Raises UnmappedClassError if no mapping is configured.
 
@@ -597,7 +597,7 @@ def _is_mapped_class(cls):
         manager = attributes.manager_of_class(cls)
         return manager and _INSTRUMENTOR in manager.info
     return False
-    
+
 def instance_str(instance):
     """Return a string describing an instance."""
 
index 363b433d5ce6f7ef0ad5dd79a44db6acb8a5b847..a4e9aa5c4fd1a8c25d62463436ac65562d480281 100644 (file)
@@ -842,8 +842,13 @@ class ForeignKey(SchemaItem):
             return schema + "." + self.column.table.name + "." + self.column.key
         elif isinstance(self._colspec, basestring):
             return self._colspec
+        elif hasattr(self._colspec, '__clause_element__'):
+            _column = self._colspec.__clause_element__()
         else:
-            return "%s.%s" % (self._colspec.table.fullname, self._colspec.key)
+            _column = self._colspec
+            
+        return "%s.%s" % (_column.table.fullname, _column.key)
+
     target_fullname = property(_get_colspec)
 
     def references(self, table):
index 126007c3591cc4e7942ae3db6727bb1f4d47aa3b..5e565cfe90f7670cf1c455d1ddeba6e5ce437ba4 100644 (file)
@@ -1548,7 +1548,7 @@ class _CompareMixin(ColumnOperators):
             return other.__clause_element__()
         elif not isinstance(other, ClauseElement):
             return self._bind_param(other)
-        elif isinstance(other, _SelectBaseMixin):
+        elif isinstance(other, (_SelectBaseMixin, Alias)):
             return other.as_scalar()
         else:
             return other
@@ -1790,7 +1790,21 @@ class FromClause(Selectable):
         return Join(self, right, onclause, True)
 
     def alias(self, name=None):
-        """return an alias of this ``FromClause`` against another ``FromClause``."""
+        """return an alias of this ``FromClause``.
+        
+        For table objects, this has the effect of the table being rendered
+        as ``tablename AS aliasname`` in a SELECT statement.  
+        For select objects, the effect is that of creating a named
+        subquery, i.e. ``(select ...) AS aliasname``.
+        The ``alias()`` method is the general way to create
+        a "subquery" out of an existing SELECT.
+        
+        The ``name`` parameter is optional, and if left blank an 
+        "anonymous" name will be generated at compile time, guaranteed
+        to be unique against other anonymous constructs used in the
+        same statement.
+        
+        """
 
         return Alias(self, name)
 
@@ -2020,7 +2034,7 @@ class _BindParamClause(ColumnElement):
         the same type.
 
         """
-        return isinstance(other, _BindParamClause) and other.type.__class__ == self.type.__class__
+        return isinstance(other, _BindParamClause) and other.type.__class__ == self.type.__class__ and self.value == other.value
 
     def __getstate__(self):
         """execute a deferred value for serialization purposes."""
@@ -2625,6 +2639,12 @@ class Alias(FromClause):
         return self.name.encode('ascii', 'backslashreplace')
         # end Py2K
 
+    def as_scalar(self):
+        try:
+            return self.element.as_scalar()
+        except AttributeError:
+            raise AttributeError("Element %s does not support 'as_scalar()'" % self.element)
+        
     def is_derived_from(self, fromclause):
         if fromclause in self._cloned_set:
             return True
index 738f37495582a6bd1b63ea9dc8586d656cafe456..c6d4fe681471c642f0064ad074335acd2c2e13b5 100644 (file)
@@ -675,6 +675,8 @@ class DeclarativeTest(DeclarativeTestBase):
         # longer the case
         sa.orm.compile_mappers()
 
+        eq_(str(Address.user_id.property.columns[0].foreign_keys[0]), "ForeignKey('users.id')")
+        
         Base.metadata.create_all()
         u1 = User(name='u1', addresses=[
             Address(email='one'),
index e72acac9aa38878fc9606dbd220a6bde0b62e3a2..f975f762f8b0fc81a7267cc05e47d8dc4d9161e9 100644 (file)
@@ -2,8 +2,8 @@ import testenv; testenv.configure_for_tests()
 import operator
 from sqlalchemy.orm import dynamic_loader, backref
 from testlib import testing
-from testlib.sa import Table, Column, Integer, String, ForeignKey, desc
-from testlib.sa.orm import mapper, relation, create_session, Query
+from testlib.sa import Table, Column, Integer, String, ForeignKey, desc, select, func
+from testlib.sa.orm import mapper, relation, create_session, Query, attributes
 from testlib.testing import eq_
 from testlib.compat import _function_named
 from orm import _base, _fixtures
@@ -152,11 +152,84 @@ class DynamicTest(_fixtures.FixtureTest):
         assert type(q).__name__ == 'MyQuery'
 
 
-class FlushTest(_fixtures.FixtureTest):
+class SessionTest(_fixtures.FixtureTest):
     run_inserts = None
 
     @testing.resolve_artifact_names
-    def test_basic(self):
+    def test_events(self):
+        mapper(User, users, properties={
+            'addresses':dynamic_loader(mapper(Address, addresses))
+        })
+        sess = create_session()
+        u1 = User(name='jack')
+        a1 = Address(email_address='foo')
+        sess.add_all([u1, a1])
+        sess.flush()
+        
+        assert testing.db.scalar(select([func.count(1)]).where(addresses.c.user_id!=None)) == 0
+        u1 = sess.query(User).get(u1.id)
+        u1.addresses.append(a1)
+        sess.flush()
+
+        assert testing.db.execute(select([addresses]).where(addresses.c.user_id!=None)).fetchall() == [
+            (a1.id, u1.id, 'foo')
+        ]
+        
+        u1.addresses.remove(a1)
+        sess.flush()
+        assert testing.db.scalar(select([func.count(1)]).where(addresses.c.user_id!=None)) == 0
+        
+        u1.addresses.append(a1)
+        sess.flush()
+        assert testing.db.execute(select([addresses]).where(addresses.c.user_id!=None)).fetchall() == [
+            (a1.id, u1.id, 'foo')
+        ]
+
+        a2= Address(email_address='bar')
+        u1.addresses.remove(a1)
+        u1.addresses.append(a2)
+        sess.flush()
+        assert testing.db.execute(select([addresses]).where(addresses.c.user_id!=None)).fetchall() == [
+            (a2.id, u1.id, 'bar')
+        ]
+        
+
+    @testing.resolve_artifact_names
+    def test_merge(self):
+        mapper(User, users, properties={
+            'addresses':dynamic_loader(mapper(Address, addresses), order_by=addresses.c.email_address)
+        })
+        sess = create_session()
+        u1 = User(name='jack')
+        a1 = Address(email_address='a1')
+        a2 = Address(email_address='a2')
+        a3 = Address(email_address='a3')
+        
+        u1.addresses.append(a2)
+        u1.addresses.append(a3)
+        
+        sess.add_all([u1, a1])
+        sess.flush()
+        
+        u1 = User(id=u1.id, name='jack')
+        u1.addresses.append(a1)
+        u1.addresses.append(a3)
+        u1 = sess.merge(u1)
+        assert attributes.get_history(u1, 'addresses') == (
+            [a1], 
+            [a3], 
+            [a2]
+        )
+
+        sess.flush()
+        
+        eq_(
+            list(u1.addresses),
+            [a1, a3]
+        )
+        
+    @testing.resolve_artifact_names
+    def test_flush(self):
         mapper(User, users, properties={
             'addresses':dynamic_loader(mapper(Address, addresses))
         })
@@ -192,6 +265,31 @@ class FlushTest(_fixtures.FixtureTest):
         assert 'addresses' not in u1.__dict__.keys()
         u1.addresses = [Address(email_address='test')]
         assert 'addresses' in dir(u1)
+    
+    @testing.resolve_artifact_names
+    def test_collection_set(self):
+        mapper(User, users, properties={
+            'addresses':dynamic_loader(mapper(Address, addresses), order_by=addresses.c.email_address)
+        })
+        sess = create_session(autoflush=True, autocommit=False)
+        u1 = User(name='jack')
+        a1 = Address(email_address='a1')
+        a2 = Address(email_address='a2')
+        a3 = Address(email_address='a3')
+        a4 = Address(email_address='a4')
+        
+        sess.add(u1)
+        u1.addresses = [a1, a3]
+        assert list(u1.addresses) == [a1, a3]
+        u1.addresses = [a1, a2, a4]
+        assert list(u1.addresses) == [a1, a2, a4]
+        u1.addresses = [a2, a3]
+        assert list(u1.addresses) == [a2, a3]
+        u1.addresses = []
+        assert list(u1.addresses) == []
+        
+        
+
         
     @testing.resolve_artifact_names
     def test_rollback(self):
@@ -353,7 +451,7 @@ def create_backref_test(autoflush, saveuser):
     test_backref = _function_named(
         test_backref, "test%s%s" % ((autoflush and "_autoflush" or ""),
                                     (saveuser and "_saveuser" or "_savead")))
-    setattr(FlushTest, test_backref.__name__, test_backref)
+    setattr(SessionTest, test_backref.__name__, test_backref)
 
 for autoflush in (False, True):
     for saveuser in (False, True):
index 4ac8a13efcf324473a870c4cdd5624e351380ce3..b5c3b3669eac7ac0060bc5e94fa25aa6616ed213 100644 (file)
@@ -173,6 +173,27 @@ class LazyTest(_fixtures.FixtureTest):
         l = q.filter(users.c.id == 7).all()
         assert [User(id=7, address=Address(id=1))] == l
 
+    @testing.resolve_artifact_names
+    def test_many_to_one_binds(self):
+        mapper(Address, addresses, primary_key=[addresses.c.user_id, addresses.c.email_address])
+        
+        mapper(User, users, properties = dict(
+            address = relation(Address, uselist=False,
+                primaryjoin=sa.and_(users.c.id==addresses.c.user_id, addresses.c.email_address=='ed@bettyboop.com')
+            )
+        ))
+        q = create_session().query(User)
+        eq_(
+            [
+                User(id=7, address=None),
+                User(id=8, address=Address(id=3)),
+                User(id=9, address=None),
+                User(id=10, address=None),
+            ], 
+            list(q)
+        )
+        
+
     @testing.resolve_artifact_names
     def test_double(self):
         """tests lazy loading with two relations simulatneously, from the same table, using aliases.  """
index f0eb798d11a878c65faa013e2e66c881f0bbe611..e1e18896a829f3abc544772504bac4a60be34474 100644 (file)
@@ -525,7 +525,16 @@ class ExpressionTest(QueryTest, AssertsCompiledSQL):
         l = list(session.query(User).instances(s.execute(emailad = 'jack@bean.com')))
         eq_([User(id=7)], l)
 
-
+    def test_scalar_subquery(self):
+        session = create_session()
+        
+        q = session.query(User.id).filter(User.id==7).subquery()
+        
+        q = session.query(User).filter(User.id==q)
+        
+        eq_(User(id=7), q.one())
+        
+        
     def test_in(self):
         session = create_session()
         s = session.query(User.id).join(User.addresses).group_by(User.id).having(func.count(Address.id) > 2)
@@ -1742,7 +1751,7 @@ class MixedEntitiesTest(QueryTest):
         q2 = q.group_by([User.name.like('%j%')]).order_by(desc(User.name.like('%j%'))).values(User.name.like('%j%'), func.count(User.name.like('%j%')))
         self.assertEquals(list(q2), [(True, 1), (False, 3)])
 
-    def test_scalar_subquery(self):
+    def test_correlated_subquery(self):
         """test that a subquery constructed from ORM attributes doesn't leak out 
         those entities to the outermost query.
         
index 9f35007481bee92403d20f6c361d15fda0adb888..19401098c19117f4fdbefd1c8eca04e099112f41 100644 (file)
@@ -11,6 +11,9 @@ def suite():
         'profiling.zoomark_orm',
         )
     alltests = unittest.TestSuite()
+    if testenv.testlib.config.coverage_enabled:
+        return alltests
+        
     for name in modules_to_test:
         mod = __import__(name)
         for token in name.split('.')[1:]:
index a4de6e331eab0849e65da9c3a89a1d8c02088769..d7ef36bce28ea8b04410dbb75c1b68711c30d4f0 100644 (file)
@@ -305,6 +305,12 @@ sq.myothertable_othername AS sq_myothertable_othername FROM (" + sqstring + ") A
         s = select([table1.c.myid]).correlate(None).as_scalar()
         self.assert_compile(select([table1, s]), "SELECT mytable.myid, mytable.name, mytable.description, (SELECT mytable.myid FROM mytable) AS anon_1 FROM mytable")
 
+        # test that aliases use as_scalar() when used in an explicitly scalar context
+        s = select([table1.c.myid]).alias()
+        self.assert_compile(select([table1.c.myid]).where(table1.c.myid==s), "SELECT mytable.myid FROM mytable WHERE mytable.myid = (SELECT mytable.myid FROM mytable)")
+        self.assert_compile(select([table1.c.myid]).where(s > table1.c.myid), "SELECT mytable.myid FROM mytable WHERE mytable.myid < (SELECT mytable.myid FROM mytable)")
+
+
         s = select([table1.c.myid]).as_scalar()
         self.assert_compile(select([table2, s]), "SELECT myothertable.otherid, myothertable.othername, (SELECT mytable.myid FROM mytable) AS anon_1 FROM myothertable")
 
@@ -396,6 +402,7 @@ sq.myothertable_othername AS sq_myothertable_othername FROM (" + sqstring + ") A
             "myothertable.othername = :othername_2 OR myothertable.otherid = :otherid_1) AND sysdate() = today()",
             checkparams = {'othername_1': 'asdf', 'othername_2':'foo', 'otherid_1': 9, 'myid_1': 12}
         )
+        
 
     def test_distinct(self):
         self.assert_compile(
index ac9f397177b22aaad52291f43e75c204f37fcfeb..cef4c6e1dcf641808034fe455f8aaca1584d2a82 100644 (file)
@@ -9,6 +9,7 @@ db_label, db_url, db_opts = None, None, {}
 
 options = None
 file_config = None
+coverage_enabled = False
 
 base_config = """
 [db]
@@ -77,24 +78,50 @@ def _log(option, opt_str, value, parser):
     elif opt_str.endswith('-debug'):
         logging.getLogger(value).setLevel(logging.DEBUG)
 
-def _start_coverage(option, opt_str, value, parser):
+def _start_cumulative_coverage(option, opt_str, value, parser):
+    _start_coverage(option, opt_str, value, parser, erase=False)
+
+def _start_coverage(option, opt_str, value, parser, erase=True):
     import sys, atexit, coverage
     true_out = sys.stdout
-
-    def _iter_covered_files():
-        import sqlalchemy
-        for rec in os.walk(os.path.dirname(sqlalchemy.__file__)):
+    
+    global coverage_enabled
+    coverage_enabled = True
+    
+    def _iter_covered_files(mod, recursive=True):
+        
+        if recursive:
+            ff = os.walk
+        else:
+            ff = os.listdir
+            
+        for rec in ff(os.path.dirname(mod.__file__)):
             for x in rec[2]:
                 if x.endswith('.py'):
                     yield os.path.join(rec[0], x)
+            
     def _stop():
         coverage.stop()
         true_out.write("\nPreparing coverage report...\n")
-        coverage.report(list(_iter_covered_files()),
-                        show_missing=False, ignore_errors=False,
-                        file=true_out)
+
+        from sqlalchemy import sql, orm, engine, \
+                            ext, databases, log
+                        
+        import sqlalchemy
+        
+        for modset in [
+            _iter_covered_files(sqlalchemy, recursive=False),
+            _iter_covered_files(databases),
+            _iter_covered_files(engine),
+            _iter_covered_files(ext),
+            _iter_covered_files(orm),
+        ]:
+            coverage.report(list(modset),
+                            show_missing=False, ignore_errors=False,
+                            file=true_out)
     atexit.register(_stop)
-    coverage.erase()
+    if erase:
+        coverage.erase()
     coverage.start()
 
 def _list_dbs(*args):
@@ -151,6 +178,8 @@ opt("--table-option", action="append", dest="tableopts", default=[],
     help="Add a dialect-specific table option, key=value")
 opt("--coverage", action="callback", callback=_start_coverage,
     help="Dump a full coverage report after running tests")
+opt("--cumulative-coverage", action="callback", callback=_start_cumulative_coverage,
+    help="Like --coverage, but accumlate coverage into the current DB")
 opt("--profile", action="append", dest="profile_targets", default=[],
     help="Enable a named profile target (multiple OK.)")
 opt("--profile-sort", action="store", dest="profile_sort", default=None,