--- /dev/null
+.. change::
+ :tags: change, engine
+
+ Removed the concept of a bound engine from the :class:`.Compiler` object,
+ and removed the ``.execute()`` and ``.scalar()`` methods from :class:`.Compiler`.
+ These were essentially forgotten methods from over a decade ago and had no
+ practical use, and it's not appropriate for the :class:`.Compiler` object
+ itself to be maintaining a reference to an :class:`.Engine`.
fullname VARCHAR,
PRIMARY KEY (id)
)
- ()
+ [...] ()
COMMIT
CREATE TABLE addresses (
id INTEGER NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY(user_id) REFERENCES users (id)
)
- ()
+ [...] ()
COMMIT
.. note::
>>> result = conn.execute(ins)
{opensql}INSERT INTO users (name, fullname) VALUES (?, ?)
- ('jack', 'Jack Jones')
+ [...] ('jack', 'Jack Jones')
COMMIT
So the INSERT statement was now issued to the database. Although we got
>>> ins = users.insert()
>>> conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams')
{opensql}INSERT INTO users (id, name, fullname) VALUES (?, ?, ?)
- (2, 'wendy', 'Wendy Williams')
+ [...] (2, 'wendy', 'Wendy Williams')
COMMIT
{stop}<sqlalchemy.engine.cursor.LegacyCursorResult object at 0x...>
... {'user_id': 2, 'email_address' : 'wendy@aol.com'},
... ])
{opensql}INSERT INTO addresses (user_id, email_address) VALUES (?, ?)
- ((1, 'jack@yahoo.com'), (1, 'jack@msn.com'), (2, 'www@www.org'), (2, 'wendy@aol.com'))
+ [...] ((1, 'jack@yahoo.com'), (1, 'jack@msn.com'), (2, 'www@www.org'), (2, 'wendy@aol.com'))
COMMIT
{stop}<sqlalchemy.engine.cursor.LegacyCursorResult object at 0x...>
>>> result = conn.execute(s)
{opensql}SELECT users.id, users.name, users.fullname
FROM users
- ()
+ [...] ()
Above, we issued a basic :func:`_expression.select` call, placing the ``users`` table
within the COLUMNS clause of the select, and then executing. SQLAlchemy
{sql}>>> result = conn.execute(s)
SELECT users.id, users.name, users.fullname
FROM users
- ()
+ [...] ()
{stop}>>> for id, name, fullname in result:
... print("name:", name, "; fullname: ", fullname)
{sql}>>> result = conn.execute(s)
SELECT users.id, users.name, users.fullname
FROM users
- ()
+ [...] ()
{stop}>>> for row in result:
... print("name:", row.name, "; fullname: ", row.fullname)
{sql}>>> result = conn.execute(s)
SELECT users.id, users.name, users.fullname
FROM users
- ()
+ [...] ()
{stop}>>> row = result.fetchone()
>>> print("name:", row._mapping['name'], "; fullname:", row._mapping['fullname'])
... print("name:", row._mapping[users.c.name], "; fullname:", row._mapping[users.c.fullname])
SELECT users.id, users.name, users.fullname
FROM users
- ()
+ [...] ()
{stop}name: jack ; fullname: Jack Jones
name: wendy ; fullname: Wendy Williams
{sql}>>> result = conn.execute(s)
SELECT users.name, users.fullname
FROM users
- ()
+ [...] ()
{stop}>>> for row in result:
... print(row)
(u'jack', u'Jack Jones')
... print(row)
SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address
FROM users, addresses
- ()
+ [...] ()
{stop}(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com')
(1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com')
(1, u'jack', u'Jack Jones', 3, 2, u'www@www.org')
addresses.user_id, addresses.email_address
FROM users, addresses
WHERE users.id = addresses.user_id
- ()
+ [...] ()
{stop}(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com')
(1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com')
(2, u'wendy', u'Wendy Williams', 3, 2, u'www@www.org')
FROM users, addresses
WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
(addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
- (', ', 'm', 'z', '%@aol.com', '%@msn.com')
+ [...] (', ', 'm', 'z', '%@aol.com', '%@msn.com')
{stop}[(u'Wendy Williams, wendy@aol.com',)]
Once again, SQLAlchemy figured out the FROM clause for our statement. In fact
FROM users, addresses
WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
(addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
- (', ', 'm', 'z', '%@aol.com', '%@msn.com')
+ [...] (', ', 'm', 'z', '%@aol.com', '%@msn.com')
{stop}[(u'Wendy Williams, wendy@aol.com',)]
The way that we can build up a :func:`_expression.select` construct through successive
FROM users, addresses
WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
(addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
- ('m', 'z', '%@aol.com', '%@msn.com')
+ [...] ('m', 'z', '%@aol.com', '%@msn.com')
{stop}[(u'Wendy Williams, wendy@aol.com',)]
Above, we can see that bound parameters are specified in
{opensql}SELECT users.id, addresses.id, users.id, users.name,
addresses.email_address AS email
FROM users JOIN addresses ON users.id=addresses.user_id WHERE users.id = 1
- ()
+ [...] ()
{stop}
Above, there's three columns in the result that are named "id", but since
FROM users, addresses
WHERE users.id = addresses.user_id AND users.name BETWEEN 'm' AND 'z'
AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
- ('%@aol.com', '%@msn.com')
+ [...] ('%@aol.com', '%@msn.com')
{stop}[(u'Wendy Williams, wendy@aol.com',)]
.. versionchanged:: 1.0.0
WHERE users.id = addresses.user_id
AND users.name BETWEEN 'm' AND 'z'
AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
- (', ', '%@aol.com', '%@msn.com')
+ [...] (', ', '%@aol.com', '%@msn.com')
{stop}[(u'Wendy Williams, wendy@aol.com',)]
Ordering or Grouping by a Label
{sql}>>> conn.execute(stmt).fetchall()
SELECT addresses.user_id, count(addresses.id) AS num_addresses
FROM addresses GROUP BY addresses.user_id ORDER BY addresses.user_id, num_addresses
- ()
+ [...] ()
{stop}[(1, 2), (2, 2)]
We can use modifiers like :func:`.asc` or :func:`.desc` by passing the string
{sql}>>> conn.execute(stmt).fetchall()
SELECT addresses.user_id, count(addresses.id) AS num_addresses
FROM addresses GROUP BY addresses.user_id ORDER BY addresses.user_id, num_addresses DESC
- ()
+ [...] ()
{stop}[(1, 2), (2, 2)]
Note that the string feature here is very much tailored to when we have
users_2.name, users_2.fullname
FROM users AS users_1, users AS users_2
WHERE users_1.name > users_2.name ORDER BY users_1.name
- ()
+ [...] ()
{stop}[(2, u'wendy', u'Wendy Williams', 1, u'jack', u'Jack Jones')]
AND users.id = addresses_2.user_id
AND addresses_1.email_address = ?
AND addresses_2.email_address = ?
- ('jack@msn.com', 'jack@yahoo.com')
+ [...] ('jack@msn.com', 'jack@yahoo.com')
{stop}[(1, u'jack', u'Jack Jones')]
Note that the :class:`_expression.Alias` construct generated the names ``addresses_1`` and
AND addresses_1.email_address = ?
AND addresses_2.email_address = ?) AS anon_1
WHERE users.id = anon_1.id
- ('jack@msn.com', 'jack@yahoo.com')
+ [...] ('jack@msn.com', 'jack@yahoo.com')
{stop}[(u'jack',)]
.. versionchanged:: 1.4 Added the :class:`.Subquery` object and created more of a
{sql}>>> conn.execute(s).fetchall()
SELECT users.fullname
FROM users JOIN addresses ON addresses.email_address LIKE users.name || ?
- ('%',)
+ [...] ('%',)
{stop}[(u'Jack Jones',), (u'Jack Jones',), (u'Wendy Williams',)]
The :meth:`_expression.FromClause.outerjoin` method creates ``LEFT OUTER JOIN`` constructs,
SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses, anon_1
WHERE addresses.user_id = anon_1.id ORDER BY addresses.id
- ('wendy',)
+ [...] ('wendy',)
{stop}[(3, 2, 'www@www.org'), (4, 2, 'wendy@aol.com')]
The CTE construct is a great way to provide a source of rows that is
SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses, anon_1
WHERE addresses.user_id = anon_1.id ORDER BY addresses.id
- ()
+ [...] ()
{stop}[(1, 1, 'jack@yahoo.com'), (2, 1, 'jack@msn.com'), (3, 2, 'www@www.org'), (4, 2, 'wendy@aol.com')]
SELECT users.id, users.name, users.fullname
FROM users
WHERE users.name = ?
- ('wendy',)
+ [...] ('wendy',)
{stop}[(2, u'wendy', u'Wendy Williams')]
Another important aspect of :func:`.bindparam` is that it may be assigned a
SELECT users.id, users.name, users.fullname
FROM users
WHERE users.name LIKE ? || '%'
- ('wendy',)
+ [...] ('wendy',)
{stop}[(2, u'wendy', u'Wendy Williams')]
FROM users LEFT OUTER JOIN addresses ON users.id = addresses.user_id
WHERE users.name LIKE ? || '%' OR addresses.email_address LIKE ? || '@%'
ORDER BY addresses.id
- ('jack', 'jack')
+ [...] ('jack', 'jack')
{stop}[(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com'), (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com')]
.. seealso::
... ).scalar()
{opensql}SELECT max(addresses.email_address) AS maxemail
FROM addresses
- ()
+ [...] ()
{stop}u'www@www.org'
Databases such as PostgreSQL and Oracle which support functions that return
>>> conn.execute(s).fetchall()
{opensql}SELECT CAST(users.id AS VARCHAR) AS id
FROM users
- ()
+ [...] ()
{stop}[('1',), ('2',)]
The :func:`.cast` function is used not just when converting between datatypes,
SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses
WHERE addresses.email_address LIKE ? ORDER BY email_address
- ('foo@bar.com', '%@yahoo.com')
+ [...] ('foo@bar.com', '%@yahoo.com')
{stop}[(1, 1, u'jack@yahoo.com')]
Also available, though not supported on all databases, are
SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses
WHERE addresses.email_address LIKE ?
- ('%@%.com', '%@msn.com')
+ [...] ('%@%.com', '%@msn.com')
{stop}[(1, 1, u'jack@yahoo.com'), (4, 2, u'wendy@aol.com')]
A common issue with so-called "compound" selectables arises due to the fact
SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses
WHERE addresses.email_address LIKE ?
- ('%@yahoo.com', '%@msn.com', '%@msn.com')
+ [...] ('%@yahoo.com', '%@msn.com', '%@msn.com')
{stop}[(1, 1, u'jack@yahoo.com')]
.. seealso::
FROM addresses
WHERE users.id = addresses.user_id) AS anon_1
FROM users
- ()
+ [...] ()
{stop}[(u'jack', 2), (u'wendy', 2)]
To apply a non-anonymous column name to our scalar select, we create
FROM addresses
WHERE users.id = addresses.user_id) AS address_count
FROM users
- ()
+ [...] ()
{stop}[(u'jack', 2), (u'wendy', 2)]
.. seealso::
FROM addresses
WHERE addresses.user_id = users.id
AND addresses.email_address = ?)
- ('jack@yahoo.com',)
+ [...] ('jack@yahoo.com',)
{stop}[(u'jack',)]
Auto-correlation will usually do what's expected, however it can also be controlled.
WHERE users.id = (SELECT users.id
FROM users
WHERE users.id = addresses.user_id AND users.name = ?)
- ('jack',)
+ [...] ('jack',)
{stop}[(u'jack', u'jack@yahoo.com'), (u'jack', u'jack@msn.com')]
To entirely disable a statement from correlating, we can pass ``None``
WHERE users.id = (SELECT users.id
FROM users
WHERE users.name = ?)
- ('wendy',)
+ [...] ('wendy',)
{stop}[(u'wendy',)]
We can also control correlation via exclusion, using the :meth:`_expression.Select.correlate_except`
WHERE users.id = (SELECT users.id
FROM users
WHERE users.id = addresses.user_id AND users.name = ?)
- ('jack',)
+ [...] ('jack',)
{stop}[(u'jack', u'jack@yahoo.com'), (u'jack', u'jack@msn.com')]
.. _lateral_selects:
>>> conn.execute(stmt).fetchall()
{opensql}SELECT users.name
FROM users ORDER BY users.name
- ()
+ [...] ()
{stop}[(u'jack',), (u'wendy',)]
Ascending or descending can be controlled using the :meth:`_expression.ColumnElement.asc`
>>> conn.execute(stmt).fetchall()
{opensql}SELECT users.name
FROM users ORDER BY users.name DESC
- ()
+ [...] ()
{stop}[(u'wendy',), (u'jack',)]
Grouping refers to the GROUP BY clause, and is usually used in conjunction
FROM users JOIN addresses
ON users.id = addresses.user_id
GROUP BY users.name
- ()
+ [...] ()
{stop}[(u'jack', 2), (u'wendy', 2)]
HAVING can be used to filter results on an aggregate value, after GROUP BY has
ON users.id = addresses.user_id
GROUP BY users.name
HAVING length(users.name) > ?
- (4,)
+ [...] (4,)
{stop}[(u'wendy', 2)]
A common system of dealing with duplicates in composed SELECT statements
{opensql}SELECT DISTINCT users.name
FROM users, addresses
WHERE (addresses.email_address LIKE '%' || users.name || '%')
- ()
+ [...] ()
{stop}[(u'jack',), (u'wendy',)]
Most database backends support a system of limiting how many rows
{opensql}SELECT users.name, addresses.email_address
FROM users JOIN addresses ON users.id = addresses.user_id
LIMIT ? OFFSET ?
- (1, 1)
+ [...] (1, 1)
{stop}[(u'jack', u'jack@msn.com')]
... values(fullname="Fullname: " + users.c.name)
>>> conn.execute(stmt)
{opensql}UPDATE users SET fullname=(? || users.name)
- ('Fullname: ',)
+ [...] ('Fullname: ',)
COMMIT
{stop}<sqlalchemy.engine.cursor.LegacyCursorResult object at 0x...>
... {'id':6, '_name':'name3'},
... ])
{opensql}INSERT INTO users (id, name) VALUES (?, (? || ?))
- ((4, 'name1', ' .. name'), (5, 'name2', ' .. name'), (6, 'name3', ' .. name'))
+ [...] ((4, 'name1', ' .. name'), (5, 'name2', ' .. name'), (6, 'name3', ' .. name'))
COMMIT
<sqlalchemy.engine.cursor.LegacyCursorResult object at 0x...>
>>> conn.execute(stmt)
{opensql}UPDATE users SET name=? WHERE users.name = ?
- ('ed', 'jack')
+ [...] ('ed', 'jack')
COMMIT
{stop}<sqlalchemy.engine.cursor.LegacyCursorResult object at 0x...>
... {'oldname':'jim', 'newname':'jake'},
... ])
{opensql}UPDATE users SET name=? WHERE users.name = ?
- (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim'))
+ [...] (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim'))
COMMIT
{stop}<sqlalchemy.engine.cursor.LegacyCursorResult object at 0x...>
FROM addresses
WHERE addresses.user_id = users.id
LIMIT ? OFFSET ?)
- (1, 0)
+ [...] (1, 0)
COMMIT
{stop}<sqlalchemy.engine.cursor.LegacyCursorResult object at 0x...>
>>> conn.execute(addresses.delete())
{opensql}DELETE FROM addresses
- ()
+ [...] ()
COMMIT
{stop}<sqlalchemy.engine.cursor.LegacyCursorResult object at 0x...>
>>> conn.execute(users.delete().where(users.c.name > 'm'))
{opensql}DELETE FROM users WHERE users.name > ?
- ('m',)
+ [...] ('m',)
COMMIT
{stop}<sqlalchemy.engine.cursor.LegacyCursorResult object at 0x...>
>>> result = conn.execute(users.delete())
{opensql}DELETE FROM users
- ()
+ [...] ()
COMMIT
{stop}>>> result.rowcount
1
>>> Base.metadata.create_all(engine)
PRAGMA main.table_info("users")
- ()
+ [...] ()
PRAGMA temp.table_info("users")
- ()
+ [...] ()
CREATE TABLE users (
id INTEGER NOT NULL, name VARCHAR,
fullname VARCHAR,
nickname VARCHAR,
PRIMARY KEY (id)
)
- ()
+ [...] ()
COMMIT
.. topic:: Minimal Table Descriptions vs. Full Descriptions
{sql}>>> our_user = session.query(User).filter_by(name='ed').first() # doctest:+NORMALIZE_WHITESPACE
BEGIN (implicit)
INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?)
- ('ed', 'Ed Jones', 'edsnickname')
+ [...] ('ed', 'Ed Jones', 'edsnickname')
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
FROM users
WHERE users.name = ?
LIMIT ? OFFSET ?
- ('ed', 1, 0)
+ [...] ('ed', 1, 0)
{stop}>>> our_user
<User(name='ed', fullname='Ed Jones', nickname='edsnickname')>
{sql}>>> session.commit()
UPDATE users SET nickname=? WHERE users.id = ?
- ('eddie', 1)
+ [...] ('eddie', 1)
INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?)
- ('wendy', 'Wendy Williams', 'windy')
+ [...] ('wendy', 'Wendy Williams', 'windy')
INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?)
- ('mary', 'Mary Contrary', 'mary')
+ [...] ('mary', 'Mary Contrary', 'mary')
INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?)
- ('fred', 'Fred Flintstone', 'freddy')
+ [...] ('fred', 'Fred Flintstone', 'freddy')
COMMIT
:meth:`~.Session.commit` flushes the remaining changes to the
users.nickname AS users_nickname
FROM users
WHERE users.id = ?
- (1,)
+ [...] (1,)
{stop}1
After the :class:`~sqlalchemy.orm.session.Session` inserts new rows in the
{sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all()
UPDATE users SET name=? WHERE users.id = ?
- ('Edwardo', 1)
+ [...] ('Edwardo', 1)
INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?)
- ('fakeuser', 'Invalid', '12345')
+ [...] ('fakeuser', 'Invalid', '12345')
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
users.nickname AS users_nickname
FROM users
WHERE users.name IN (?, ?)
- ('Edwardo', 'fakeuser')
+ [...] ('Edwardo', 'fakeuser')
{stop}[<User(name='Edwardo', fullname='Ed Jones', nickname='eddie')>, <User(name='fakeuser', fullname='Invalid', nickname='12345')>]
Rolling back, we can see that ``ed_user``'s name is back to ``ed``, and
users.nickname AS users_nickname
FROM users
WHERE users.id = ?
- (1,)
+ [...] (1,)
{stop}u'ed'
>>> fake_user in session
False
users.nickname AS users_nickname
FROM users
WHERE users.name IN (?, ?)
- ('ed', 'fakeuser')
+ [...] ('ed', 'fakeuser')
{stop}[<User(name='ed', fullname='Ed Jones', nickname='eddie')>]
.. _ormtutorial_querying:
users.fullname AS users_fullname,
users.nickname AS users_nickname
FROM users ORDER BY users.id
- ()
+ [...] ()
{stop}ed Ed Jones
wendy Wendy Williams
mary Mary Contrary
SELECT users.name AS users_name,
users.fullname AS users_fullname
FROM users
- ()
+ [...] ()
{stop}ed Ed Jones
wendy Wendy Williams
mary Mary Contrary
users.fullname AS users_fullname,
users.nickname AS users_nickname
FROM users
- ()
+ [...] ()
{stop}<User(name='ed', fullname='Ed Jones', nickname='eddie')> ed
<User(name='wendy', fullname='Wendy Williams', nickname='windy')> wendy
<User(name='mary', fullname='Mary Contrary', nickname='mary')> mary
... print(row.name_label)
SELECT users.name AS name_label
FROM users
- (){stop}
+ [...] (){stop}
ed
wendy
mary
user_alias.fullname AS user_alias_fullname,
user_alias.nickname AS user_alias_nickname
FROM users AS user_alias
- (){stop}
+ [...] (){stop}
<User(name='ed', fullname='Ed Jones', nickname='eddie')>
<User(name='wendy', fullname='Wendy Williams', nickname='windy')>
<User(name='mary', fullname='Mary Contrary', nickname='mary')>
users.nickname AS users_nickname
FROM users ORDER BY users.id
LIMIT ? OFFSET ?
- (2, 1){stop}
+ [...] (2, 1){stop}
<User(name='wendy', fullname='Wendy Williams', nickname='windy')>
<User(name='mary', fullname='Mary Contrary', nickname='mary')>
... print(name)
SELECT users.name AS users_name FROM users
WHERE users.fullname = ?
- ('Ed Jones',)
+ [...] ('Ed Jones',)
{stop}ed
...or :func:`~sqlalchemy.orm.query.Query.filter`, which uses more flexible SQL
... print(name)
SELECT users.name AS users_name FROM users
WHERE users.fullname = ?
- ('Ed Jones',)
+ [...] ('Ed Jones',)
{stop}ed
The :class:`~sqlalchemy.orm.query.Query` object is fully **generative**, meaning
users.nickname AS users_nickname
FROM users
WHERE users.name = ? AND users.fullname = ?
- ('ed', 'Ed Jones')
+ [...] ('ed', 'Ed Jones')
{stop}<User(name='ed', fullname='Ed Jones', nickname='eddie')>
Common Filter Operators
users.nickname AS users_nickname
FROM users
WHERE users.name LIKE ? ORDER BY users.id
- ('%ed',)
+ [...] ('%ed',)
{stop}[<User(name='ed', fullname='Ed Jones', nickname='eddie')>,
<User(name='fred', fullname='Fred Flintstone', nickname='freddy')>]
FROM users
WHERE users.name LIKE ? ORDER BY users.id
LIMIT ? OFFSET ?
- ('%ed', 1, 0)
+ [...] ('%ed', 1, 0)
{stop}<User(name='ed', fullname='Ed Jones', nickname='eddie')>
* :meth:`_query.Query.one()` fully fetches all rows, and if not
SELECT users.id AS users_id
FROM users
WHERE users.name = ? ORDER BY users.id
- ('ed',)
+ [...] ('ed',)
{stop}1
.. _orm_tutorial_literal_sql:
users.nickname AS users_nickname
FROM users
WHERE id<224 ORDER BY id
- ()
+ [...] ()
{stop}ed
wendy
mary
users.nickname AS users_nickname
FROM users
WHERE id<? and name=? ORDER BY users.id
- (224, 'fred')
+ [...] (224, 'fred')
{stop}<User(name='fred', fullname='Fred Flintstone', nickname='freddy')>
To use an entirely string-based statement, a :func:`_expression.text` construct
{sql}>>> session.query(User).from_statement(
... text("SELECT * FROM users where name=:name")).params(name='ed').all()
SELECT * FROM users where name=?
- ('ed',)
+ [...] ('ed',)
{stop}[<User(name='ed', fullname='Ed Jones', nickname='eddie')>]
For better targeting of mapped columns to a textual SELECT, as well as to
>>> stmt = stmt.columns(User.name, User.id, User.fullname, User.nickname)
{sql}>>> session.query(User).from_statement(stmt).params(name='ed').all()
SELECT name, id, fullname, nickname FROM users where name=?
- ('ed',)
+ [...] ('ed',)
{stop}[<User(name='ed', fullname='Ed Jones', nickname='eddie')>]
When selecting from a :func:`_expression.text` construct, the :class:`_query.Query`
{sql}>>> session.query(User.id, User.name).\
... from_statement(stmt).params(name='ed').all()
SELECT name, id FROM users where name=?
- ('ed',)
+ [...] ('ed',)
{stop}[(1, u'ed')]
.. seealso::
users.nickname AS users_nickname
FROM users
WHERE users.name LIKE ?) AS anon_1
- ('%ed',)
+ [...] ('%ed',)
{stop}2
.. sidebar:: Counting on ``count()``
{sql}>>> session.query(func.count(User.name), User.name).group_by(User.name).all()
SELECT count(users.name) AS count_1, users.name AS users_name
FROM users GROUP BY users.name
- ()
+ [...] ()
{stop}[(1, u'ed'), (1, u'fred'), (1, u'mary'), (1, u'wendy')]
To achieve our simple ``SELECT count(*) FROM table``, we can apply it as:
{sql}>>> session.query(func.count('*')).select_from(User).scalar()
SELECT count(?) AS count_1
FROM users
- ('*',)
+ [...] ('*',)
{stop}4
The usage of :meth:`_query.Query.select_from` can be removed if we express the count in terms
{sql}>>> session.query(func.count(User.id)).scalar()
SELECT count(users.id) AS count_1
FROM users
- ()
+ [...] ()
{stop}4
.. _orm_tutorial_relationship:
PRIMARY KEY (id),
FOREIGN KEY(user_id) REFERENCES users (id)
)
- ()
+ [...] ()
COMMIT
Working with Related Objects
>>> session.add(jack)
{sql}>>> session.commit()
INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?)
- ('jack', 'Jack Bean', 'gjffdd')
+ [...] ('jack', 'Jack Bean', 'gjffdd')
INSERT INTO addresses (email_address, user_id) VALUES (?, ?)
- ('jack@google.com', 5)
+ [...] ('jack@google.com', 5)
INSERT INTO addresses (email_address, user_id) VALUES (?, ?)
- ('j25@yahoo.com', 5)
+ [...] ('j25@yahoo.com', 5)
COMMIT
Querying for Jack, we get just Jack back. No SQL is yet issued for Jack's addresses:
users.nickname AS users_nickname
FROM users
WHERE users.name = ?
- ('jack',)
+ [...] ('jack',)
{stop}>>> jack
<User(name='jack', fullname='Jack Bean', nickname='gjffdd')>
addresses.user_id AS addresses_user_id
FROM addresses
WHERE ? = addresses.user_id ORDER BY addresses.id
- (5,)
+ [...] (5,)
{stop}[<Address(email_address='jack@google.com')>, <Address(email_address='j25@yahoo.com')>]
When we accessed the ``addresses`` collection, SQL was suddenly issued. This
FROM users, addresses
WHERE users.id = addresses.user_id
AND addresses.email_address = ?
- ('jack@google.com',)
+ [...] ('jack@google.com',)
{stop}<User(name='jack', fullname='Jack Bean', nickname='gjffdd')>
<Address(email_address='jack@google.com')>
users.nickname AS users_nickname
FROM users JOIN addresses ON users.id = addresses.user_id
WHERE addresses.email_address = ?
- ('jack@google.com',)
+ [...] ('jack@google.com',)
{stop}[<User(name='jack', fullname='Jack Bean', nickname='gjffdd')>]
:meth:`_query.Query.join` knows how to join between ``User``
ON users.id = addresses_2.user_id
WHERE addresses_1.email_address = ?
AND addresses_2.email_address = ?
- ('jack@google.com', 'j25@yahoo.com')
+ [...] ('jack@google.com', 'j25@yahoo.com')
{stop}jack jack@google.com j25@yahoo.com
In addition to using the :meth:`_orm.PropComparator.of_type` method, it is
FROM addresses GROUP BY addresses.user_id) AS anon_1
ON users.id = anon_1.user_id
ORDER BY users.id
- ('*',)
+ [...] ('*',)
{stop}<User(name='ed', fullname='Ed Jones', nickname='eddie')> None
<User(name='wendy', fullname='Wendy Williams', nickname='windy')> None
<User(name='mary', fullname='Mary Contrary', nickname='mary')> None
FROM addresses
WHERE addresses.email_address != ?) AS anon_1
ON users.id = anon_1.user_id
- ('j25@yahoo.com',)
+ [...] ('j25@yahoo.com',)
{stop}<User(name='jack', fullname='Jack Bean', nickname='gjffdd')>
<Address(email_address='jack@google.com')>
WHERE EXISTS (SELECT *
FROM addresses
WHERE addresses.user_id = users.id)
- ()
+ [...] ()
{stop}jack
The :class:`~sqlalchemy.orm.query.Query` features several operators which make
WHERE EXISTS (SELECT 1
FROM addresses
WHERE users.id = addresses.user_id)
- ()
+ [...] ()
{stop}jack
:meth:`~.RelationshipProperty.Comparator.any` takes criterion as well, to limit the rows matched:
WHERE EXISTS (SELECT 1
FROM addresses
WHERE users.id = addresses.user_id AND addresses.email_address LIKE ?)
- ('%google%',)
+ [...] ('%google%',)
{stop}jack
:meth:`~.RelationshipProperty.Comparator.has` is the same operator as
WHERE NOT (EXISTS (SELECT 1
FROM users
WHERE users.id = addresses.user_id AND users.name = ?))
- ('jack',)
+ [...] ('jack',)
{stop}[]
Common Relationship Operators
users.nickname AS users_nickname
FROM users
WHERE users.name = ?
- ('jack',)
+ [...] ('jack',)
SELECT addresses.user_id AS addresses_user_id,
addresses.id AS addresses_id,
addresses.email_address AS addresses_email_address
FROM addresses
WHERE addresses.user_id IN (?)
ORDER BY addresses.id
- (5,)
+ [...] (5,)
{stop}>>> jack
<User(name='jack', fullname='Jack Bean', nickname='gjffdd')>
FROM users
LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id
WHERE users.name = ? ORDER BY addresses_1.id
- ('jack',)
+ [...] ('jack',)
{stop}>>> jack
<User(name='jack', fullname='Jack Bean', nickname='gjffdd')>
addresses.user_id AS addresses_user_id
FROM addresses JOIN users ON users.id = addresses.user_id
WHERE users.name = ?
- ('jack',)
+ [...] ('jack',)
{stop}>>> jacks_addresses
[<Address(email_address='jack@google.com')>, <Address(email_address='j25@yahoo.com')>]
>>> session.delete(jack)
{sql}>>> session.query(User).filter_by(name='jack').count()
UPDATE addresses SET user_id=? WHERE addresses.id = ?
- ((None, 1), (None, 2))
+ [...] ((None, 1), (None, 2))
DELETE FROM users WHERE users.id = ?
- (5,)
+ [...] (5,)
SELECT count(*) AS count_1
FROM (SELECT users.id AS users_id,
users.name AS users_name,
users.nickname AS users_nickname
FROM users
WHERE users.name = ?) AS anon_1
- ('jack',)
+ [...] ('jack',)
{stop}0
So far, so good. How about Jack's ``Address`` objects ?
addresses.user_id AS addresses_user_id
FROM addresses
WHERE addresses.email_address IN (?, ?)) AS anon_1
- ('jack@google.com', 'j25@yahoo.com')
+ [...] ('jack@google.com', 'j25@yahoo.com')
{stop}2
Uh oh, they're still there ! Analyzing the flush SQL, we can see that the
users.nickname AS users_nickname
FROM users
WHERE users.id = ?
- (5,)
+ [...] (5,)
{stop}
# remove one Address (lazy load fires off)
addresses.user_id AS addresses_user_id
FROM addresses
WHERE ? = addresses.user_id
- (5,)
+ [...] (5,)
{stop}
# only one address remains
... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com'])
... ).count()
DELETE FROM addresses WHERE addresses.id = ?
- (2,)
+ [...] (2,)
SELECT count(*) AS count_1
FROM (SELECT addresses.id AS addresses_id,
addresses.email_address AS addresses_email_address,
addresses.user_id AS addresses_user_id
FROM addresses
WHERE addresses.email_address IN (?, ?)) AS anon_1
- ('jack@google.com', 'j25@yahoo.com')
+ [...] ('jack@google.com', 'j25@yahoo.com')
{stop}1
Deleting Jack will delete both Jack and the remaining ``Address`` associated
{sql}>>> session.query(User).filter_by(name='jack').count()
DELETE FROM addresses WHERE addresses.id = ?
- (1,)
+ [...] (1,)
DELETE FROM users WHERE users.id = ?
- (5,)
+ [...] (5,)
SELECT count(*) AS count_1
FROM (SELECT users.id AS users_id,
users.name AS users_name,
users.nickname AS users_nickname
FROM users
WHERE users.name = ?) AS anon_1
- ('jack',)
+ [...] ('jack',)
{stop}0
{sql}>>> session.query(Address).filter(
addresses.user_id AS addresses_user_id
FROM addresses
WHERE addresses.email_address IN (?, ?)) AS anon_1
- ('jack@google.com', 'j25@yahoo.com')
+ [...] ('jack@google.com', 'j25@yahoo.com')
{stop}0
.. topic:: More on Cascades
PRIMARY KEY (id),
UNIQUE (keyword)
)
- ()
+ [...] ()
COMMIT
CREATE TABLE posts (
id INTEGER NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY(user_id) REFERENCES users (id)
)
- ()
+ [...] ()
COMMIT
CREATE TABLE post_keywords (
post_id INTEGER NOT NULL,
FOREIGN KEY(post_id) REFERENCES posts (id),
FOREIGN KEY(keyword_id) REFERENCES keywords (id)
)
- ()
+ [...] ()
COMMIT
Usage is not too different from what we've been doing. Let's give Wendy some blog posts:
users.nickname AS users_nickname
FROM users
WHERE users.name = ?
- ('wendy',)
+ [...] ('wendy',)
{stop}
>>> post = BlogPost("Wendy's Blog Post", "This is a test", wendy)
>>> session.add(post)
... filter(BlogPost.keywords.any(keyword='firstpost')).\
... all()
INSERT INTO keywords (keyword) VALUES (?)
- ('wendy',)
+ [...] ('wendy',)
INSERT INTO keywords (keyword) VALUES (?)
- ('firstpost',)
+ [...] ('firstpost',)
INSERT INTO posts (user_id, headline, body) VALUES (?, ?, ?)
- (2, "Wendy's Blog Post", 'This is a test')
+ [...] (2, "Wendy's Blog Post", 'This is a test')
INSERT INTO post_keywords (post_id, keyword_id) VALUES (?, ?)
- (...)
+ [...] (...)
SELECT posts.id AS posts_id,
posts.user_id AS posts_user_id,
posts.headline AS posts_headline,
WHERE posts.id = post_keywords.post_id
AND keywords.id = post_keywords.keyword_id
AND keywords.keyword = ?)
- ('firstpost',)
+ [...] ('firstpost',)
{stop}[BlogPost("Wendy's Blog Post", 'This is a test', <User(name='wendy', fullname='Wendy Williams', nickname='windy')>)]
If we want to look up posts owned by the user ``wendy``, we can tell
WHERE posts.id = post_keywords.post_id
AND keywords.id = post_keywords.keyword_id
AND keywords.keyword = ?))
- (2, 'firstpost')
+ [...] (2, 'firstpost')
{stop}[BlogPost("Wendy's Blog Post", 'This is a test', <User(name='wendy', fullname='Wendy Williams', nickname='windy')>)]
Or we can use Wendy's own ``posts`` relationship, which is a "dynamic"
WHERE posts.id = post_keywords.post_id
AND keywords.id = post_keywords.keyword_id
AND keywords.keyword = ?))
- (2, 'firstpost')
+ [...] (2, 'firstpost')
{stop}[BlogPost("Wendy's Blog Post", 'This is a test', <User(name='wendy', fullname='Wendy Williams', nickname='windy')>)]
Further Reference
WHERE t.typtype = 'd'
"""
- s = sql.text(SQL_DOMAINS).columns(attname=sqltypes.Unicode)
+ s = sql.text(SQL_DOMAINS)
c = connection.execution_options(future_result=True).execute(s)
domains = {}
)
compiled_cache = execution_options.get(
- "compiled_cache", self.dialect._compiled_cache
+ "compiled_cache", self.engine._compiled_cache
)
- if compiled_cache is not None:
- elem_cache_key = elem._generate_cache_key()
- else:
- elem_cache_key = None
-
- if elem_cache_key:
- cache_key, extracted_params = elem_cache_key
- key = (
- dialect,
- cache_key,
- tuple(keys),
- bool(schema_translate_map),
- inline,
- )
- compiled_sql = compiled_cache.get(key)
-
- if compiled_sql is None:
- compiled_sql = elem.compile(
- dialect=dialect,
- cache_key=elem_cache_key,
- column_keys=keys,
- inline=inline,
- schema_translate_map=schema_translate_map,
- linting=self.dialect.compiler_linting
- | compiler.WARN_LINTING,
- )
- compiled_cache[key] = compiled_sql
- else:
- extracted_params = None
- compiled_sql = elem.compile(
- dialect=dialect,
- column_keys=keys,
- inline=inline,
- schema_translate_map=schema_translate_map,
- linting=self.dialect.compiler_linting | compiler.WARN_LINTING,
- )
-
+ compiled_sql, extracted_params, cache_hit = elem._compile_w_cache(
+ dialect=dialect,
+ compiled_cache=compiled_cache,
+ column_keys=keys,
+ inline=inline,
+ schema_translate_map=schema_translate_map,
+ linting=self.dialect.compiler_linting | compiler.WARN_LINTING,
+ )
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
distilled_params,
elem,
extracted_params,
+ cache_hit=cache_hit,
)
if has_events:
self.dispatch.after_execute(
statement,
parameters,
execution_options,
- *args
+ *args,
+ **kw
):
"""Create an :class:`.ExecutionContext` and execute, returning
a :class:`_engine.CursorResult`."""
conn = self._revalidate_connection()
context = constructor(
- dialect, self, conn, execution_options, *args
+ dialect, self, conn, execution_options, *args, **kw
)
except (exc.PendingRollbackError, exc.ResourceClosedError):
raise
self.engine.logger.info(statement)
- # stats = context._get_cache_stats()
+ stats = context._get_cache_stats()
if not self.engine.hide_parameters:
- # TODO: I love the stats but a ton of tests that are hardcoded.
- # to certain log output are failing.
self.engine.logger.info(
- "%r",
+ "[%s] %r",
+ stats,
sql_util._repr_params(
parameters, batches=10, ismulti=context.executemany
),
)
- # self.engine.logger.info(
- # "[%s] %r",
- # stats,
- # sql_util._repr_params(
- # parameters, batches=10, ismulti=context.executemany
- # ),
- # )
else:
self.engine.logger.info(
- "[SQL parameters hidden due to hide_parameters=True]"
+ "[%s] [SQL parameters hidden due to hide_parameters=True]"
+ % (stats,)
)
- # self.engine.logger.info(
- # "[%s] [SQL parameters hidden due to hide_parameters=True]"
- # % (stats,)
- # )
evt_handled = False
try:
url,
logging_name=None,
echo=None,
+ query_cache_size=500,
execution_options=None,
hide_parameters=False,
):
self.logging_name = logging_name
self.echo = echo
self.hide_parameters = hide_parameters
+ if query_cache_size != 0:
+ self._compiled_cache = util.LRUCache(
+ query_cache_size, size_alert=self._lru_size_alert
+ )
+ else:
+ self._compiled_cache = None
log.instance_logger(self, echoflag=echo)
if execution_options:
self.update_execution_options(**execution_options)
+ def _lru_size_alert(self, cache):
+ if self._should_log_info:
+ self.logger.info(
+ "Compiled cache size pruning from %d items to %d. "
+ "Increase cache size to reduce the frequency of pruning.",
+ len(cache),
+ cache.capacity,
+ )
+
@property
def engine(self):
return self
+ def clear_compiled_cache(self):
+ """Clear the compiled cache associated with the dialect.
+
+ This applies **only** to the built-in cache that is established
+ via the :paramref:`.create_engine.query_cache_size` parameter.
+ It will not impact any dictionary caches that were passed via the
+ :paramref:`.Connection.execution_options.query_cache` parameter.
+
+ .. versionadded:: 1.4
+
+ """
+ if self._compiled_cache:
+ self._compiled_cache.clear()
+
def update_execution_options(self, **opt):
r"""Update the default execution_options dictionary
of this :class:`_engine.Engine`.
self.dialect = proxied.dialect
self.logging_name = proxied.logging_name
self.echo = proxied.echo
+ self._compiled_cache = proxied._compiled_cache
self.hide_parameters = proxied.hide_parameters
log.instance_logger(self, echoflag=self.echo)
.. versionadded:: 1.2.3
:param query_cache_size: size of the cache used to cache the SQL string
- form of queries. Defaults to zero, which disables caching.
+ form of queries. Set to zero to disable caching.
+
+ The cache is pruned of its least recently used items when its size reaches
+ N * 1.5. Defaults to 500, meaning the cache will always store at least
+ 500 SQL statements when filled, and will grow up to 750 items at which
+ point it is pruned back down to 500 by removing the 250 least recently
+ used items.
Caching is accomplished on a per-statement basis by generating a
cache key that represents the statement's structure, then generating
bypass the cache. SQL logging will indicate statistics for each
statement whether or not it were pull from the cache.
+ .. note:: some ORM functions related to unit-of-work persistence as well
+ as some attribute loading strategies will make use of individual
+ per-mapper caches outside of the main cache.
+
+
.. seealso::
``engine_caching`` - TODO: this will be an upcoming section describing
"_keys",
"_tuplefilter",
"_translated_indexes",
+ "_safe_for_cache"
# don't need _unique_filters support here for now. Can be added
# if a need arises.
)
return new_metadata
def _adapt_to_context(self, context):
- """When using a cached result metadata against a new context,
- we need to rewrite the _keymap so that it has the specific
- Column objects in the new context inside of it. this accommodates
- for select() constructs that contain anonymized columns and
- are cached.
+ """When using a cached Compiled construct that has a _result_map,
+ for a new statement that used the cached Compiled, we need to ensure
+ the keymap has the Column objects from our new statement as keys.
+ So here we rewrite keymap with new entries for the new columns
+ as matched to those of the cached statement.
"""
if not context.compiled._result_columns:
# to the result map.
md = self.__class__.__new__(self.__class__)
- md._keymap = self._keymap.copy()
+ md._keymap = dict(self._keymap)
# match up new columns positionally to the result columns
for existing, new in zip(
context.compiled._result_columns,
invoked_statement._exported_columns_iterator(),
):
- md._keymap[new] = md._keymap[existing[RM_NAME]]
+ if existing[RM_NAME] in md._keymap:
+ md._keymap[new] = md._keymap[existing[RM_NAME]]
md.case_sensitive = self.case_sensitive
md._processors = self._processors
self._tuplefilter = None
self._translated_indexes = None
self.case_sensitive = dialect.case_sensitive
+ self._safe_for_cache = False
if context.result_column_struct:
(
self._keys = [elem[0] for elem in result_columns]
# pure positional 1-1 case; doesn't need to read
# the names from cursor.description
+
+ # this metadata is safe to cache because we are guaranteed
+ # to have the columns in the same order for new executions
+ self._safe_for_cache = True
return [
(
idx,
for idx, rmap_entry in enumerate(result_columns)
]
else:
+
# name-based or text-positional cases, where we need
# to read cursor.description names
+
if textual_ordered:
+ self._safe_for_cache = True
# textual positional case
raw_iterator = self._merge_textual_cols_by_position(
context, cursor_description, result_columns
elif num_ctx_cols:
# compiled SQL with a mismatch of description cols
# vs. compiled cols, or textual w/ unordered columns
+ # the order of columns can change if the query is
+ # against a "select *", so not safe to cache
+ self._safe_for_cache = False
raw_iterator = self._merge_cols_by_name(
context,
cursor_description,
loose_column_name_matching,
)
else:
- # no compiled SQL, just a raw string
+ # no compiled SQL, just a raw string, order of columns
+ # can change for "select *"
+ self._safe_for_cache = False
raw_iterator = self._merge_cols_by_none(
context, cursor_description
)
out_parameters = None
_metadata = None
- _metadata_from_cache = False
_soft_closed = False
closed = False
def _init_metadata(self, context, cursor_description):
if context.compiled:
if context.compiled._cached_metadata:
- cached_md = self.context.compiled._cached_metadata
- self._metadata_from_cache = True
-
- # result rewrite/ adapt step. two translations can occur here.
- # one is if we are invoked against a cached statement, we want
- # to rewrite the ResultMetaData to reflect the column objects
- # that are in our current selectable, not the cached one. the
- # other is, the CompileState can return an alternative Result
- # object. Finally, CompileState might want to tell us to not
- # actually do the ResultMetaData adapt step if it in fact has
- # changed the selected columns in any case.
- compiled = context.compiled
- if (
- compiled
- and not compiled._rewrites_selected_columns
- and compiled.statement is not context.invoked_statement
- ):
- cached_md = cached_md._adapt_to_context(context)
-
- self._metadata = metadata = cached_md
-
+ metadata = self.context.compiled._cached_metadata
else:
- self._metadata = (
- metadata
- ) = context.compiled._cached_metadata = self._cursor_metadata(
- self, cursor_description
- )
+ metadata = self._cursor_metadata(self, cursor_description)
+ if metadata._safe_for_cache:
+ context.compiled._cached_metadata = metadata
+
+ # result rewrite/ adapt step. this is to suit the case
+ # when we are invoked against a cached Compiled object, we want
+ # to rewrite the ResultMetaData to reflect the Column objects
+ # that are in our current SQL statement object, not the one
+ # that is associated with the cached Compiled object.
+ # the Compiled object may also tell us to not
+ # actually do this step; this is to support the ORM where
+ # it is to produce a new Result object in any case, and will
+ # be using the cached Column objects against this database result
+ # so we don't want to rewrite them.
+ #
+ # Basically this step suits the use case where the end user
+ # is using Core SQL expressions and is accessing columns in the
+ # result row using row._mapping[table.c.column].
+ compiled = context.compiled
+ if (
+ compiled
+ and compiled._result_columns
+ and context.cache_hit
+ and not compiled._rewrites_selected_columns
+ and compiled.statement is not context.invoked_statement
+ ):
+ metadata = metadata._adapt_to_context(context)
+
+ self._metadata = metadata
+
else:
self._metadata = metadata = self._cursor_metadata(
self, cursor_description
supports_native_boolean=None,
max_identifier_length=None,
label_length=None,
- query_cache_size=0,
# int() is because the @deprecated_params decorator cannot accommodate
# the direct reference to the "NO_LINTING" object
compiler_linting=int(compiler.NO_LINTING),
if supports_native_boolean is not None:
self.supports_native_boolean = supports_native_boolean
self.case_sensitive = case_sensitive
- if query_cache_size != 0:
- self._compiled_cache = util.LRUCache(query_cache_size)
- else:
- self._compiled_cache = None
self._user_defined_max_identifier_length = max_identifier_length
if self._user_defined_max_identifier_length:
parameters,
invoked_statement,
extracted_parameters,
+ cache_hit=False,
):
"""Initialize execution context for a Compiled construct."""
self.extracted_parameters = extracted_parameters
self.invoked_statement = invoked_statement
self.compiled = compiled
+ self.cache_hit = cache_hit
self.execution_options = execution_options
def _get_cache_stats(self):
if self.compiled is None:
- return "raw SQL"
+ return "raw sql"
now = time.time()
if self.compiled.cache_key is None:
- return "gen %.5fs" % (now - self.compiled._gen_time,)
+ return "no key %.5fs" % (now - self.compiled._gen_time,)
+ elif self.cache_hit:
+ return "cached for %.4gs" % (now - self.compiled._gen_time,)
else:
- return "cached %.5fs" % (now - self.compiled._gen_time,)
+ return "generated in %.5fs" % (now - self.compiled._gen_time,)
@util.memoized_property
def engine(self):
result = self.session.execute(
statement, params, execution_options=execution_options
)
-
if result._attributes.get("is_single_entity", False):
result = result.scalars()
_is_future = True
_setup_joins = ()
_legacy_setup_joins = ()
+ inherit_cache = True
@classmethod
def _create_select(cls, *entities):
self,
class_,
key,
+ parententity,
impl=None,
comparator=None,
- parententity=None,
of_type=None,
):
self.class_ = class_
self.key = key
+ self._parententity = parententity
self.impl = impl
self.comparator = comparator
- self._parententity = parententity
self._of_type = of_type
manager = manager_of_class(class_)
@util.memoized_property
def expression(self):
return self.comparator.__clause_element__()._annotate(
- {"orm_key": self.key}
+ {"orm_key": self.key, "entity_namespace": self._entity_namespace}
)
+ @property
+ def _entity_namespace(self):
+ return self._parententity
+
@property
def _annotations(self):
return self.__clause_element__()._annotations
return QueryableAttribute(
self.class_,
self.key,
- self.impl,
- self.comparator.of_type(entity),
self._parententity,
+ impl=self.impl,
+ comparator=self.comparator.of_type(entity),
of_type=inspection.inspect(entity),
)
"""
+ inherit_cache = True
+
def __set__(self, instance, value):
self.impl.set(
instance_state(instance), instance_dict(instance), value, None
return self.impl.get(instance_state(instance), dict_)
+HasEntityNamespace = util.namedtuple(
+ "HasEntityNamespace", ["entity_namespace"]
+)
+
+
def create_proxied_attribute(descriptor):
"""Create an QueryableAttribute / user descriptor hybrid.
and getattr(self.class_, self.key).impl.uses_objects
)
+ @property
+ def _entity_namespace(self):
+ if hasattr(self._comparator, "_parententity"):
+ return self._comparator._parententity
+ else:
+ # used by hybrid attributes which try to remain
+ # agnostic of any ORM concepts like mappers
+ return HasEntityNamespace(self.class_)
+
@property
def property(self):
return self.comparator.property
"post_load_paths",
"identity_token",
"yield_per",
+ "loaders_require_buffering",
+ "loaders_require_uniquing",
)
class default_load_options(Options):
def __init__(
self,
compile_state,
+ statement,
session,
load_options,
execution_options=None,
bind_arguments=None,
):
-
self.load_options = load_options
self.execution_options = execution_options or _EMPTY_DICT
self.bind_arguments = bind_arguments or _EMPTY_DICT
self.compile_state = compile_state
- self.query = query = compile_state.select_statement
+ self.query = statement
self.session = session
+ self.loaders_require_buffering = False
+ self.loaders_require_uniquing = False
self.propagated_loader_options = {
- o for o in query._with_options if o.propagate_to_loaders
+ o for o in statement._with_options if o.propagate_to_loaders
}
self.attributes = dict(compile_state.attributes)
)
querycontext = QueryContext(
compile_state,
+ statement,
session,
load_options,
execution_options,
_has_orm_entities = False
multi_row_eager_loaders = False
compound_eager_adapter = None
- loaders_require_buffering = False
- loaders_require_uniquing = False
@classmethod
def create_for_statement(cls, statement_container, compiler, **kw):
_has_orm_entities = False
multi_row_eager_loaders = False
compound_eager_adapter = None
- loaders_require_buffering = False
- loaders_require_uniquing = False
correlate = None
_where_criteria = ()
self = cls.__new__(cls)
- self.select_statement = select_statement
+ if select_statement._execution_options:
+ # execution options should not impact the compilation of a
+ # query, and at the moment subqueryloader is putting some things
+ # in here that we explicitly don't want stuck in a cache.
+ self.select_statement = select_statement._clone()
+ self.select_statement._execution_options = util.immutabledict()
+ else:
+ self.select_statement = select_statement
# indicates this select() came from Query.statement
self.for_statement = (
)
self._setup_with_polymorphics()
+ # entities will also set up polymorphic adapters for mappers
+ # that have with_polymorphic configured
_QueryEntity.to_compile_state(self, query._raw_columns)
return self
self._where_criteria += (single_crit,)
-def _column_descriptions(query_or_select_stmt):
- ctx = ORMSelectCompileState._create_entities_collection(
- query_or_select_stmt
- )
+def _column_descriptions(query_or_select_stmt, compile_state=None):
+ if compile_state is None:
+ compile_state = ORMSelectCompileState._create_entities_collection(
+ query_or_select_stmt
+ )
+ ctx = compile_state
return [
{
"name": ent._label_name,
only_load_props = refresh_state = None
_instance = loading._instance_processor(
+ self,
self.mapper,
context,
result,
def expression(self):
clauses = self.clauses._annotate(
{
- "bundle": True,
"parententity": self._parententity,
"parentmapper": self._parententity,
"orm_key": self.prop.key,
"""
def create_row_processor(
- self, context, path, mapper, result, adapter, populators
+ self, context, query_entity, path, mapper, result, adapter, populators
):
"""Produce row processing functions and append to the given
set of populators lists.
"_wildcard_token",
"_default_path_loader_key",
)
-
+ inherit_cache = True
strategy_wildcard_key = None
def _memoized_attr__wildcard_token(self):
)
def create_row_processor(
- self, context, path, mapper, result, adapter, populators
+ self, context, query_entity, path, mapper, result, adapter, populators
):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
else:
strat = self.strategy
strat.create_row_processor(
- context, path, loader, mapper, result, adapter, populators
+ context,
+ query_entity,
+ path,
+ loader,
+ mapper,
+ result,
+ adapter,
+ populators,
)
def do_init(self):
)
-class ORMOption(object):
+class ORMOption(HasCacheKey):
"""Base class for option objects that are passed to ORM queries.
These options may be consumed by :meth:`.Query.options`,
_is_compile_state = False
-class LoaderOption(HasCacheKey, ORMOption):
+class LoaderOption(ORMOption):
"""Describe a loader modification to an ORM statement at compilation time.
.. versionadded:: 1.4
def __init__(self, payload=None):
self.payload = payload
- def _gen_cache_key(self, *arg, **kw):
- return ()
-
@util.deprecated_cls(
"1.4",
"""
def create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
):
"""Establish row processing functions for a given QueryContext.
)
if context.yield_per and (
- context.compile_state.loaders_require_buffering
- or context.compile_state.loaders_require_uniquing
+ context.loaders_require_buffering
+ or context.loaders_require_uniquing
):
raise sa_exc.InvalidRequestError(
"Can't use yield_per with eager loaders that require uniquing "
def _instance_processor(
+ query_entity,
mapper,
context,
result,
# to see if one fits
prop.create_row_processor(
context,
+ query_entity,
path,
mapper,
result,
populators = {key: list(value) for key, value in cached_populators.items()}
for prop in getters["todo"]:
prop.create_row_processor(
- context, path, mapper, result, adapter, populators
+ context, query_entity, path, mapper, result, adapter, populators
)
propagated_loader_options = context.propagated_loader_options
_instance = _decorate_polymorphic_switch(
_instance,
context,
+ query_entity,
mapper,
result,
path,
def _decorate_polymorphic_switch(
instance_fn,
context,
+ query_entity,
mapper,
result,
path,
return False
return _instance_processor(
+ query_entity,
sub_mapper,
context,
result,
return self
_cache_key_traversal = [
- ("class_", visitors.ExtendedInternalTraversal.dp_plain_obj)
+ ("mapper", visitors.ExtendedInternalTraversal.dp_plain_obj),
]
@property
"""
+ inherit_cache = True
+
path = natural_path = ()
has_entity = False
is_aliased_class = False
class TokenRegistry(PathRegistry):
__slots__ = ("token", "parent", "path", "natural_path")
+ inherit_cache = True
+
def __init__(self, parent, token):
token = PathToken.intern(token)
class PropRegistry(PathRegistry):
is_unnatural = False
+ inherit_cache = True
def __init__(self, parent, prop):
# restate this path in terms of the
class SlotsEntityRegistry(AbstractEntityRegistry):
# for aliased class, return lightweight, no-cycles created
# version
+ inherit_cache = True
__slots__ = (
"key",
# for long lived mapper, return dict based caching
# version that creates reference cycles
+ inherit_cache = True
+
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
from ..sql.dml import DeleteDMLState
from ..sql.dml import UpdateDMLState
from ..sql.elements import BooleanClauseList
+from ..sql.util import _entity_namespace_key
def _bulk_insert(
if isinstance(k, util.string_types):
desc = sql.util._entity_namespace_key(mapper, k)
values.extend(desc._bulk_update_tuples(v))
- elif isinstance(k, attributes.QueryableAttribute):
- values.extend(k._bulk_update_tuples(v))
+ elif "entity_namespace" in k._annotations:
+ k_anno = k._annotations
+ attr = _entity_namespace_key(
+ k_anno["entity_namespace"], k_anno["orm_key"]
+ )
+ values.extend(attr._bulk_update_tuples(v))
else:
values.append((k, v))
else:
"""
strategy_wildcard_key = "column"
+ inherit_cache = True
__slots__ = (
"_orig_columns",
from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from ..sql.selectable import SelectStatementGrouping
from ..sql.util import _entity_namespace_key
+from ..sql.visitors import InternalTraversal
from ..util import collections_abc
__all__ = ["Query", "QueryContext", "aliased"]
_label_style=self._label_style,
compile_options=compile_options,
)
+ stmt.__dict__.pop("session", None)
stmt._propagate_attrs = self._propagate_attrs
return stmt
"""
from_entity = self._filter_by_zero()
-
if from_entity is None:
raise sa_exc.InvalidRequestError(
"Can't use filter_by when the first entity '%s' of a query "
compile_state = self._compile_state(for_statement=False)
context = QueryContext(
- compile_state, self.session, self.load_options
+ compile_state,
+ compile_state.statement,
+ self.session,
+ self.load_options,
)
result = loading.instances(result_proxy, context)
def _compile_context(self, for_statement=False):
compile_state = self._compile_state(for_statement=for_statement)
- context = QueryContext(compile_state, self.session, self.load_options)
+ context = QueryContext(
+ compile_state,
+ compile_state.statement,
+ self.session,
+ self.load_options,
+ )
return context
_for_update_arg = None
+ _traverse_internals = [
+ ("_raw_columns", InternalTraversal.dp_clauseelement_list),
+ ("element", InternalTraversal.dp_clauseelement),
+ ] + Executable._executable_traverse_internals
+
def __init__(self, entities, element):
self._raw_columns = [
coercions.expect(
"""
strategy_wildcard_key = "relationship"
+ inherit_cache = True
_persistence_only = dict(
passive_deletes=False,
from .base import _RAISE_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .context import _column_descriptions
+from .context import ORMCompileState
from .interfaces import LoaderStrategy
from .interfaces import StrategizedProperty
from .session import _state_session
column_collection.append(c)
def create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
):
pass
)
def create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
):
# look through list of columns represented here
# to see which, if any, is present in the row.
memoized_populators[self.parent_property] = fetch
def create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
):
# look through list of columns represented here
# to see which, if any, is present in the row.
self.group = self.parent_property.group
def create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
):
# for a DeferredColumnLoader, this method is only used during a
)
def create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
):
def invoke_no_load(state, dict_, row):
if self.uselist:
return None
def create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
):
key = self.key
"""A relationship loader that emits a second SELECT statement."""
def _immediateload_create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
):
return self.parent_property._get_strategy(
(("lazy", "immediate"),)
).create_row_processor(
- context, path, loadopt, mapper, result, adapter, populators
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
)
(("lazy", "select"),)
).init_class_attribute(mapper)
- def setup_query(
+ def create_row_processor(
self,
- compile_state,
- entity,
+ context,
+ query_entity,
path,
loadopt,
+ mapper,
+ result,
adapter,
- column_collection=None,
- parentmapper=None,
- **kwargs
- ):
- pass
-
- def create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ populators,
):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
(("lazy", "select"),)
).init_class_attribute(mapper)
- def setup_query(
- self,
- compile_state,
- entity,
- path,
- loadopt,
- adapter,
- column_collection=None,
- parentmapper=None,
- **kwargs
- ):
- if (
- not compile_state.compile_options._enable_eagerloads
- or compile_state.compile_options._for_refresh_state
- ):
- return
-
- compile_state.loaders_require_buffering = True
-
- path = path[self.parent_property]
-
- # build up a path indicating the path from the leftmost
- # entity to the thing we're subquery loading.
- with_poly_entity = path.get(
- compile_state.attributes, "path_with_polymorphic", None
- )
- if with_poly_entity is not None:
- effective_entity = with_poly_entity
- else:
- effective_entity = self.entity
-
- subq_path = compile_state.attributes.get(
- ("subquery_path", None), orm_util.PathRegistry.root
- )
-
- subq_path = subq_path + path
-
- # if not via query option, check for
- # a cycle
- if not path.contains(compile_state.attributes, "loader"):
- if self.join_depth:
- if (
- (
- compile_state.current_path.length
- if compile_state.current_path
- else 0
- )
- + path.length
- ) / 2 > self.join_depth:
- return
- elif subq_path.contains_mapper(self.mapper):
- return
-
- (
- leftmost_mapper,
- leftmost_attr,
- leftmost_relationship,
- ) = self._get_leftmost(subq_path)
-
- orig_query = compile_state.attributes.get(
- ("orig_query", SubqueryLoader), compile_state.select_statement
- )
-
- # generate a new Query from the original, then
- # produce a subquery from it.
- left_alias = self._generate_from_original_query(
- compile_state,
- orig_query,
- leftmost_mapper,
- leftmost_attr,
- leftmost_relationship,
- entity.entity_zero,
- )
-
- # generate another Query that will join the
- # left alias to the target relationships.
- # basically doing a longhand
- # "from_self()". (from_self() itself not quite industrial
- # strength enough for all contingencies...but very close)
-
- q = query.Query(effective_entity)
-
- def set_state_options(compile_state):
- compile_state.attributes.update(
- {
- ("orig_query", SubqueryLoader): orig_query,
- ("subquery_path", None): subq_path,
- }
- )
-
- q = q._add_context_option(set_state_options, None)._disable_caching()
-
- q = q._set_enable_single_crit(False)
- to_join, local_attr, parent_alias = self._prep_for_joins(
- left_alias, subq_path
- )
-
- q = q.add_columns(*local_attr)
- q = self._apply_joins(
- q, to_join, left_alias, parent_alias, effective_entity
- )
-
- q = self._setup_options(q, subq_path, orig_query, effective_entity)
- q = self._setup_outermost_orderby(q)
-
- # add new query to attributes to be picked up
- # by create_row_processor
- # NOTE: be sure to consult baked.py for some hardcoded logic
- # about this structure as well
- assert q.session is None
- path.set(
- compile_state.attributes, "subqueryload_data", {"query": q},
- )
-
def _get_leftmost(self, subq_path):
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
q,
*{
ent["entity"]
- for ent in _column_descriptions(orig_query)
+ for ent in _column_descriptions(
+ orig_query, compile_state=orig_compile_state
+ )
if ent["entity"] is not None
}
)
- # for column information, look to the compile state that is
- # already being passed through
- compile_state = orig_compile_state
-
# select from the identity columns of the outer (specifically, these
- # are the 'local_cols' of the property). This will remove
- # other columns from the query that might suggest the right entity
- # which is why we do _set_select_from above.
- target_cols = compile_state._adapt_col_list(
+ # are the 'local_cols' of the property). This will remove other
+ # columns from the query that might suggest the right entity which is
+ # why we do set select_from above. The attributes we have are
+ # coerced and adapted using the original query's adapter, which is
+ # needed only for the case of adapting a subclass column to
+ # that of a polymorphic selectable, e.g. we have
+ # Engineer.primary_language and the entity is Person. All other
+ # adaptations, e.g. from_self, select_entity_from(), will occur
+ # within the new query when it compiles, as the compile_state we are
+ # using here is only a partial one. If the subqueryload is from a
+ # with_polymorphic() or other aliased() object, left_attr will already
+ # be the correct attributes so no adaptation is needed.
+ target_cols = orig_compile_state._adapt_col_list(
[
- sql.coercions.expect(sql.roles.ByOfRole, o)
+ sql.coercions.expect(sql.roles.ColumnsClauseRole, o)
for o in leftmost_attr
],
- compile_state._get_current_adapter(),
+ orig_compile_state._get_current_adapter(),
)
- q._set_entities(target_cols)
+ q._raw_columns = target_cols
distinct_target_key = leftmost_relationship.distinct_target_key
"_data",
)
- def __init__(self, context, subq_info):
+ def __init__(self, context, subq):
# avoid creating a cycle by storing context
# even though that's preferable
self.session = context.session
self.execution_options = context.execution_options
self.load_options = context.load_options
- self.subq = subq_info["query"]
+ self.subq = subq
self._data = None
def get(self, key, default):
if self._data is None:
self._load()
+ def _setup_query_from_rowproc(
+ self, context, path, entity, loadopt, adapter,
+ ):
+ compile_state = context.compile_state
+ if (
+ not compile_state.compile_options._enable_eagerloads
+ or compile_state.compile_options._for_refresh_state
+ ):
+ return
+
+ context.loaders_require_buffering = True
+
+ path = path[self.parent_property]
+
+ # build up a path indicating the path from the leftmost
+ # entity to the thing we're subquery loading.
+ with_poly_entity = path.get(
+ compile_state.attributes, "path_with_polymorphic", None
+ )
+ if with_poly_entity is not None:
+ effective_entity = with_poly_entity
+ else:
+ effective_entity = self.entity
+
+ subq_path = context.query._execution_options.get(
+ ("subquery_path", None), orm_util.PathRegistry.root
+ )
+
+ subq_path = subq_path + path
+
+ # if not via query option, check for
+ # a cycle
+ if not path.contains(compile_state.attributes, "loader"):
+ if self.join_depth:
+ if (
+ (
+ compile_state.current_path.length
+ if compile_state.current_path
+ else 0
+ )
+ + path.length
+ ) / 2 > self.join_depth:
+ return
+ elif subq_path.contains_mapper(self.mapper):
+ return
+
+ (
+ leftmost_mapper,
+ leftmost_attr,
+ leftmost_relationship,
+ ) = self._get_leftmost(subq_path)
+
+ # use the current query being invoked, not the compile state
+ # one. this is so that we get the current parameters. however,
+ # it means we can't use the existing compile state, we have to make
+ # a new one. other approaches include possibly using the
+ # compiled query but swapping the params, seems only marginally
+ # less time spent but more complicated
+ orig_query = context.query._execution_options.get(
+ ("orig_query", SubqueryLoader), context.query
+ )
+
+ # make a new compile_state for the query that's probably cached, but
+ # we're sort of undoing a bit of that caching :(
+ compile_state_cls = ORMCompileState._get_plugin_class_for_plugin(
+ orig_query, "orm"
+ )
+
+ # this would create the full blown compile state, which we don't
+ # need
+ # orig_compile_state = compile_state_cls.create_for_statement(
+ # orig_query, None)
+
+ # this is the more "quick" version, however it's not clear how
+ # much of this we need. in particular I can't get a test to
+ # fail if the "set_base_alias" is missing and not sure why that is.
+ orig_compile_state = compile_state_cls._create_entities_collection(
+ orig_query
+ )
+
+ # generate a new Query from the original, then
+ # produce a subquery from it.
+ left_alias = self._generate_from_original_query(
+ orig_compile_state,
+ orig_query,
+ leftmost_mapper,
+ leftmost_attr,
+ leftmost_relationship,
+ entity,
+ )
+
+ # generate another Query that will join the
+ # left alias to the target relationships.
+ # basically doing a longhand
+ # "from_self()". (from_self() itself not quite industrial
+ # strength enough for all contingencies...but very close)
+
+ q = query.Query(effective_entity)
+
+ q._execution_options = q._execution_options.union(
+ {
+ ("orig_query", SubqueryLoader): orig_query,
+ ("subquery_path", None): subq_path,
+ }
+ )
+
+ q = q._set_enable_single_crit(False)
+ to_join, local_attr, parent_alias = self._prep_for_joins(
+ left_alias, subq_path
+ )
+
+ q = q.add_columns(*local_attr)
+ q = self._apply_joins(
+ q, to_join, left_alias, parent_alias, effective_entity
+ )
+
+ q = self._setup_options(q, subq_path, orig_query, effective_entity)
+ q = self._setup_outermost_orderby(q)
+
+ return q
+
def create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
):
if context.refresh_state:
return self._immediateload_create_row_processor(
- context, path, loadopt, mapper, result, adapter, populators
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
)
if not self.parent.class_manager[self.key].impl.supports_population:
"population - eager loading cannot be applied." % self
)
- path = path[self.parent_property]
+ # a little dance here as the "path" is still something that only
+ # semi-tracks the exact series of things we are loading, still not
+ # telling us about with_polymorphic() and stuff like that when it's at
+ # the root.. the initial MapperEntity is more accurate for this case.
+ if len(path) == 1:
+ if not orm_util._entity_isa(query_entity.entity_zero, self.parent):
+ return
+ elif not orm_util._entity_isa(path[-1], self.parent):
+ return
- subq_info = path.get(context.attributes, "subqueryload_data")
+ subq = self._setup_query_from_rowproc(
+ context, path, path[-1], loadopt, adapter,
+ )
- if subq_info is None:
+ if subq is None:
return
- subq = subq_info["query"]
-
assert subq.session is None
+
+ path = path[self.parent_property]
+
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
- collections = self._SubqCollections(context, subq_info)
+ collections = self._SubqCollections(context, subq)
path.set(context.attributes, "collections", collections)
if adapter:
if not compile_state.compile_options._enable_eagerloads:
return
elif self.uselist:
- compile_state.loaders_require_uniquing = True
compile_state.multi_row_eager_loaders = True
path = path[self.parent_property]
return False
def create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"population - eager loading cannot be applied." % self
)
+ if self.uselist:
+ context.loaders_require_uniquing = True
+
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
key = self.key
_instance = loading._instance_processor(
+ query_entity,
self.mapper,
context,
result,
self.parent_property._get_strategy(
(("lazy", "select"),)
).create_row_processor(
- context, path, loadopt, mapper, result, adapter, populators
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
)
def _create_collection_loader(self, context, key, _instance, populators):
return util.preloaded.ext_baked.bakery(size=50)
def create_row_processor(
- self, context, path, loadopt, mapper, result, adapter, populators
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
):
if context.refresh_state:
return self._immediateload_create_row_processor(
- context, path, loadopt, mapper, result, adapter, populators
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
)
if not self.parent.class_manager[self.key].impl.supports_population:
"population - eager loading cannot be applied." % self
)
+ # a little dance here as the "path" is still something that only
+ # semi-tracks the exact series of things we are loading, still not
+ # telling us about with_polymorphic() and stuff like that when it's at
+ # the root.. the initial MapperEntity is more accurate for this case.
+ if len(path) == 1:
+ if not orm_util._entity_isa(query_entity.entity_zero, self.parent):
+ return
+ elif not orm_util._entity_isa(path[-1], self.parent):
+ return
+
selectin_path = (
context.compile_state.current_path or orm_util.PathRegistry.root
) + path
- if not orm_util._entity_isa(path[-1], self.parent):
- return
-
if loading.PostLoad.path_exists(
context, selectin_path, self.parent_property
):
return
elif selectin_path_w_prop.contains_mapper(self.mapper):
return
-
loading.PostLoad.callable_for_path(
context,
selectin_path,
)
)
- orig_query = context.query
+ # a test which exercises what these comments talk about is
+ # test_selectin_relations.py -> test_twolevel_selectin_w_polymorphic
+ #
+ # effective_entity above is given to us in terms of the cached
+ # statement, namely this one:
+ orig_query = context.compile_state.select_statement
+
+ # the actual statement that was requested is this one:
+ # context_query = context.query
+ #
+ # that's not the cached one, however. So while it is of the identical
+ # structure, if it has entities like AliasedInsp, which we get from
+ # aliased() or with_polymorphic(), the AliasedInsp will likely be a
+ # different object identity each time, and will not match up
+ # hashing-wise to the corresponding AliasedInsp that's in the
+ # cached query, meaning it won't match on paths and loader lookups
+ # and loaders like this one will be skipped if it is used in options.
+ #
+ # Now we want to transfer loader options from the parent query to the
+ # "selectinload" query we're about to run. Which query do we transfer
+ # the options from? We use the cached query, because the options in
+ # that query will be in terms of the effective entity we were just
+ # handed.
+ #
+ # But now the selectinload/ baked query we are running is *also*
+ # cached. What if it's cached and running from some previous iteration
+ # of that AliasedInsp? Well in that case it will also use the previous
+ # iteration of the loader options. If the baked query expires and
+ # gets generated again, it will be handed the current effective_entity
+ # and the current _with_options, again in terms of whatever
+ # compile_state.select_statement happens to be right now, so the
+ # query will still be internally consistent and loader callables
+ # will be correctly invoked.
q._add_lazyload_options(
orig_query._with_options, path[self.parent_property]
return cloned
def __clause_element__(self):
- annotations = self._annotations.union(
- {"bundle": self, "entity_namespace": self}
- )
+ # ensure existing entity_namespace remains
+ annotations = {"bundle": self, "entity_namespace": self}
+ annotations.update(self._annotations)
return expression.ClauseList(
_literal_as_text_role=roles.ColumnsClauseRole,
group=False,
__visit_name__ = expression.Join.__visit_name__
+ inherit_cache = True
+
def __init__(
self,
left,
from .elements import AnnotatedColumnElement
from .elements import ClauseList # noqa
from .selectable import AnnotatedFromClause # noqa
+ from .traversals import _preconfigure_traversals
from . import base
from . import coercions
_prepare_annotations(FromClause, AnnotatedFromClause)
_prepare_annotations(ClauseList, Annotated)
+ _preconfigure_traversals(ClauseElement)
+
_sa_util.preloaded.import_prefix("sqlalchemy.sql")
from . import naming # noqa
anno_cls._traverse_internals = list(cls._traverse_internals) + [
("_annotations", InternalTraversal.dp_annotations_key)
]
+ elif cls.__dict__.get("inherit_cache", False):
+ anno_cls._traverse_internals = list(cls._traverse_internals) + [
+ ("_annotations", InternalTraversal.dp_annotations_key)
+ ]
+
+ # some classes include this even if they have traverse_internals
+ # e.g. BindParameter, add it if present.
+ if cls.__dict__.get("inherit_cache", False):
+ anno_cls.inherit_cache = True
anno_cls._is_column_operators = issubclass(cls, operators.ColumnOperators)
_bind = None
_with_options = ()
_with_context_options = ()
- _cache_enable = True
_executable_traverse_internals = [
("_with_options", ExtendedInternalTraversal.dp_has_cache_key_list),
("_with_context_options", ExtendedInternalTraversal.dp_plain_obj),
- ("_cache_enable", ExtendedInternalTraversal.dp_plain_obj),
+ ("_propagate_attrs", ExtendedInternalTraversal.dp_propagate_attrs),
]
- @_generative
- def _disable_caching(self):
- self._cache_enable = HasCacheKey()
-
@_generative
def options(self, *options):
"""Apply options to this statement.
_cached_metadata = None
+ _result_columns = None
+
schema_translate_map = None
execution_options = util.immutabledict()
self,
dialect,
statement,
- bind=None,
schema_translate_map=None,
render_schema_translate=False,
compile_kwargs=util.immutabledict(),
"""
self.dialect = dialect
- self.bind = bind
self.preparer = self.dialect.identifier_preparer
if schema_translate_map:
self.schema_translate_map = schema_translate_map
"""Return the bind params for this compiled object."""
return self.construct_params()
- def execute(self, *multiparams, **params):
- """Execute this compiled object."""
-
- e = self.bind
- if e is None:
- raise exc.UnboundExecutionError(
- "This Compiled object is not bound to any Engine "
- "or Connection.",
- code="2afi",
- )
- return e._execute_compiled(self, multiparams, params)
-
- def scalar(self, *multiparams, **params):
- """Execute this compiled object and return the result's
- scalar value."""
-
- return self.execute(*multiparams, **params).scalar()
-
class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)):
"""Produces DDL specification for TypeEngine objects."""
insert_prefetch = update_prefetch = ()
+ _cache_key_bind_match = None
+ """a mapping that will relate the BindParameter object we compile
+ to those that are part of the extracted collection of parameters
+ in the cache key, if we were given a cache key.
+
+ """
+
def __init__(
self,
dialect,
self.cache_key = cache_key
+ if cache_key:
+ self._cache_key_bind_match = {b: b for b in cache_key[1]}
+
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, "_inline", False)
replace_context=err,
)
+ ckbm = self._cache_key_bind_match
resolved_extracted = {
- b.key: extracted
+ ckbm[b]: extracted
for b, extracted in zip(orig_extracted, extracted_parameters)
}
else:
else:
if resolved_extracted:
value_param = resolved_extracted.get(
- bindparam.key, bindparam
+ bindparam, bindparam
)
else:
value_param = bindparam
)
if resolved_extracted:
- value_param = resolved_extracted.get(
- bindparam.key, bindparam
- )
+ value_param = resolved_extracted.get(bindparam, bindparam)
else:
value_param = bindparam
)
self.binds[bindparam.key] = self.binds[name] = bindparam
+
+ # if we are given a cache key that we're going to match against,
+ # relate the bindparam here to one that is most likely present
+ # in the "extracted params" portion of the cache key. this is used
+ # to set up a positional mapping that is used to determine the
+ # correct parameters for a subsequent use of this compiled with
+ # a different set of parameter values. here, we accommodate for
+ # parameters that may have been cloned both before and after the cache
+ # key was been generated.
+ ckbm = self._cache_key_bind_match
+ if ckbm:
+ ckbm.update({bp: bindparam for bp in bindparam._cloned_set})
+
if bindparam.isoutparam:
self.has_out_parameters = True
return dialect.ddl_compiler(dialect, self, **kw)
+ def _compile_w_cache(self, *arg, **kw):
+ raise NotImplementedError()
+
class DDLElement(roles.DDLRole, Executable, _DDLCompiles):
"""Base class for DDL expression constructs.
if self._preserve_parameter_order:
arg = [
(
- k,
+ coercions.expect(roles.DMLColumnRole, k),
coercions.expect(
roles.ExpressionElementRole,
v,
self._ordered_values = arg
else:
arg = {
- k: coercions.expect(
+ coercions.expect(roles.DMLColumnRole, k): coercions.expect(
roles.ExpressionElementRole,
v,
type_=NullType(),
]
+ HasPrefixes._has_prefixes_traverse_internals
+ DialectKWArgs._dialect_kwargs_traverse_internals
+ + Executable._executable_traverse_internals
)
@ValuesBase._constructor_20_deprecations(
]
+ HasPrefixes._has_prefixes_traverse_internals
+ DialectKWArgs._dialect_kwargs_traverse_internals
+ + Executable._executable_traverse_internals
)
@ValuesBase._constructor_20_deprecations(
)
arg = [
(
- k,
+ coercions.expect(roles.DMLColumnRole, k),
coercions.expect(
roles.ExpressionElementRole,
v,
]
+ HasPrefixes._has_prefixes_traverse_internals
+ DialectKWArgs._dialect_kwargs_traverse_internals
+ + Executable._executable_traverse_internals
)
@ValuesBase._constructor_20_deprecations(
try:
traverse_internals = self._traverse_internals
except AttributeError:
+ # user-defined classes may not have a _traverse_internals
return
for attrname, obj, meth in _copy_internals.run_generated_dispatch(
try:
traverse_internals = self._traverse_internals
except AttributeError:
+ # user-defined classes may not have a _traverse_internals
return []
return itertools.chain.from_iterable(
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
- bind = self.bind
else:
dialect = default.StrCompileDialect()
- return self._compiler(dialect, bind=bind, **kw)
+
+ return self._compiler(dialect, **kw)
+
+ def _compile_w_cache(
+ self,
+ dialect,
+ compiled_cache=None,
+ column_keys=None,
+ inline=False,
+ schema_translate_map=None,
+ **kw
+ ):
+ if compiled_cache is not None:
+ elem_cache_key = self._generate_cache_key()
+ else:
+ elem_cache_key = None
+
+ cache_hit = False
+
+ if elem_cache_key:
+ cache_key, extracted_params = elem_cache_key
+ key = (
+ dialect,
+ cache_key,
+ tuple(column_keys),
+ bool(schema_translate_map),
+ inline,
+ )
+ compiled_sql = compiled_cache.get(key)
+
+ if compiled_sql is None:
+ compiled_sql = self._compiler(
+ dialect,
+ cache_key=elem_cache_key,
+ column_keys=column_keys,
+ inline=inline,
+ schema_translate_map=schema_translate_map,
+ **kw
+ )
+ compiled_cache[key] = compiled_sql
+ else:
+ cache_hit = True
+ else:
+ extracted_params = None
+ compiled_sql = self._compiler(
+ dialect,
+ cache_key=elem_cache_key,
+ column_keys=column_keys,
+ inline=inline,
+ schema_translate_map=schema_translate_map,
+ **kw
+ )
+
+ return compiled_sql, extracted_params, cache_hit
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
_is_bind_parameter = True
_key_is_anon = False
+ # bindparam implements its own _gen_cache_key() method however
+ # we check subclasses for this flag, else no cache key is generated
+ inherit_cache = True
+
def __init__(
self,
key,
return c
def _gen_cache_key(self, anon_map, bindparams):
+ _gen_cache_ok = self.__class__.__dict__.get("inherit_cache", False)
+
+ if not _gen_cache_ok:
+ if anon_map is not None:
+ anon_map[NO_CACHE] = True
+ return None
+
idself = id(self)
if idself in anon_map:
return (anon_map[idself], self.__class__)
roles.InElementRole,
roles.OrderByRole,
roles.ColumnsClauseRole,
+ roles.DMLColumnRole,
ClauseElement,
):
"""Describe a list of clauses, separated by an operator.
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = "clauselist"
+ inherit_cache = True
_tuple_values = False
class AsBoolean(WrapsColumnExpression, UnaryExpression):
+ inherit_cache = True
+
def __init__(self, element, operator, negate):
self.element = element
self.type = type_api.BOOLEANTYPE
("operator", InternalTraversal.dp_operator),
("negate", InternalTraversal.dp_operator),
("modifiers", InternalTraversal.dp_plain_dict),
+ ("type", InternalTraversal.dp_type,), # affects JSON CAST operators
]
_is_implicitly_boolean = True
"""
- def _gen_cache_key(self, anon_map, bindparams):
- # inlined for performance
-
- idself = id(self)
-
- if idself in anon_map:
- return (anon_map[idself], self.__class__)
- else:
- # inline of
- # id_ = anon_map[idself]
- anon_map[idself] = id_ = str(anon_map.index)
- anon_map.index += 1
-
- if self._cache_key_traversal is NO_CACHE:
- anon_map[NO_CACHE] = True
- return None
-
- result = (id_, self.__class__)
-
- return result + (
- ("left", self.left._gen_cache_key(anon_map, bindparams)),
- ("right", self.right._gen_cache_key(anon_map, bindparams)),
- ("operator", self.operator),
- ("negate", self.negate),
- (
- "modifiers",
- tuple(
- (key, self.modifiers[key])
- for key in sorted(self.modifiers)
- )
- if self.modifiers
- else None,
- ),
- )
-
def __init__(
self, left, right, operator, type_=None, negate=None, modifiers=None
):
__visit_name__ = "slice"
_traverse_internals = [
- ("start", InternalTraversal.dp_plain_obj),
- ("stop", InternalTraversal.dp_plain_obj),
- ("step", InternalTraversal.dp_plain_obj),
+ ("start", InternalTraversal.dp_clauseelement),
+ ("stop", InternalTraversal.dp_clauseelement),
+ ("step", InternalTraversal.dp_clauseelement),
]
- def __init__(self, start, stop, step):
- self.start = start
- self.stop = stop
- self.step = step
+ def __init__(self, start, stop, step, _name=None):
+ self.start = coercions.expect(
+ roles.ExpressionElementRole,
+ start,
+ name=_name,
+ type_=type_api.INTEGERTYPE,
+ )
+ self.stop = coercions.expect(
+ roles.ExpressionElementRole,
+ stop,
+ name=_name,
+ type_=type_api.INTEGERTYPE,
+ )
+ self.step = coercions.expect(
+ roles.ExpressionElementRole,
+ step,
+ name=_name,
+ type_=type_api.INTEGERTYPE,
+ )
self.type = type_api.NULLTYPE
def self_group(self, against=None):
coerce_arguments = True
_register = False
+ inherit_cache = True
def __init__(self, *args, **kwargs):
parsed_args = kwargs.pop("_parsed_args", None)
class AnsiFunction(GenericFunction):
+ inherit_cache = True
+
def __init__(self, *args, **kwargs):
GenericFunction.__init__(self, *args, **kwargs)
class ReturnTypeFromArgs(GenericFunction):
"""Define a function whose return type is the same as its arguments."""
+ inherit_cache = True
+
def __init__(self, *args, **kwargs):
args = [
coercions.expect(
class coalesce(ReturnTypeFromArgs):
_has_args = True
+ inherit_cache = True
class max(ReturnTypeFromArgs): # noqa
- pass
+ inherit_cache = True
class min(ReturnTypeFromArgs): # noqa
- pass
+ inherit_cache = True
class sum(ReturnTypeFromArgs): # noqa
- pass
+ inherit_cache = True
class now(GenericFunction): # noqa
type = sqltypes.DateTime
+ inherit_cache = True
class concat(GenericFunction):
type = sqltypes.String
+ inherit_cache = True
class char_length(GenericFunction):
type = sqltypes.Integer
+ inherit_cache = True
def __init__(self, arg, **kwargs):
GenericFunction.__init__(self, arg, **kwargs)
class random(GenericFunction):
_has_args = True
+ inherit_cache = True
class count(GenericFunction):
"""
type = sqltypes.Integer
+ inherit_cache = True
def __init__(self, expression=None, **kwargs):
if expression is None:
class current_date(AnsiFunction):
type = sqltypes.Date
+ inherit_cache = True
class current_time(AnsiFunction):
type = sqltypes.Time
+ inherit_cache = True
class current_timestamp(AnsiFunction):
type = sqltypes.DateTime
+ inherit_cache = True
class current_user(AnsiFunction):
type = sqltypes.String
+ inherit_cache = True
class localtime(AnsiFunction):
type = sqltypes.DateTime
+ inherit_cache = True
class localtimestamp(AnsiFunction):
type = sqltypes.DateTime
+ inherit_cache = True
class session_user(AnsiFunction):
type = sqltypes.String
+ inherit_cache = True
class sysdate(AnsiFunction):
type = sqltypes.DateTime
+ inherit_cache = True
class user(AnsiFunction):
type = sqltypes.String
+ inherit_cache = True
class array_agg(GenericFunction):
"""
type = sqltypes.ARRAY
+ inherit_cache = True
def __init__(self, *args, **kwargs):
args = [
:meth:`.FunctionElement.within_group` method."""
array_for_multi_clause = False
+ inherit_cache = True
def within_group_type(self, within_group):
func_clauses = self.clause_expr.element
"""
+ inherit_cache = True
+
class percentile_cont(OrderedSetAgg):
"""implement the ``percentile_cont`` ordered-set aggregate function.
"""
array_for_multi_clause = True
+ inherit_cache = True
class percentile_disc(OrderedSetAgg):
"""
array_for_multi_clause = True
+ inherit_cache = True
class rank(GenericFunction):
"""
type = sqltypes.Integer()
+ inherit_cache = True
class dense_rank(GenericFunction):
"""
type = sqltypes.Integer()
+ inherit_cache = True
class percent_rank(GenericFunction):
"""
type = sqltypes.Numeric()
+ inherit_cache = True
class cume_dist(GenericFunction):
"""
type = sqltypes.Numeric()
+ inherit_cache = True
class cube(GenericFunction):
"""
_has_args = True
+ inherit_cache = True
class rollup(GenericFunction):
"""
_has_args = True
+ inherit_cache = True
class grouping_sets(GenericFunction):
"""
_has_args = True
+ inherit_cache = True
__visit_name__ = "column"
+ inherit_cache = True
+
def __init__(self, *args, **kwargs):
r"""
Construct a new ``Column`` object.
class _OffsetLimitParam(BindParameter):
+ inherit_cache = True
+
@property
def _limit_offset_value(self):
return self.effective_value
__visit_name__ = "alias"
+ inherit_cache = True
+
@classmethod
def _factory(cls, selectable, name=None, flat=False):
"""Return an :class:`_expression.Alias` object.
__visit_name__ = "lateral"
_is_lateral = True
+ inherit_cache = True
+
@classmethod
def _factory(cls, selectable, name=None):
"""Return a :class:`_expression.Lateral` object.
AliasedReturnsRows._traverse_internals
+ [
("_cte_alias", InternalTraversal.dp_clauseelement),
- ("_restates", InternalTraversal.dp_clauseelement_unordered_set),
+ ("_restates", InternalTraversal.dp_clauseelement_list),
("recursive", InternalTraversal.dp_boolean),
]
+ HasPrefixes._has_prefixes_traverse_internals
name=None,
recursive=False,
_cte_alias=None,
- _restates=frozenset(),
+ _restates=(),
_prefixes=None,
_suffixes=None,
):
self.element.union(other),
name=self.name,
recursive=self.recursive,
- _restates=self._restates.union([self]),
+ _restates=self._restates + (self,),
_prefixes=self._prefixes,
_suffixes=self._suffixes,
)
self.element.union_all(other),
name=self.name,
recursive=self.recursive,
- _restates=self._restates.union([self]),
+ _restates=self._restates + (self,),
_prefixes=self._prefixes,
_suffixes=self._suffixes,
)
_is_subquery = True
+ inherit_cache = True
+
@classmethod
def _factory(cls, selectable, name=None):
"""Return a :class:`.Subquery` object.
("_group_by_clauses", InternalTraversal.dp_clauseelement_list,),
("_setup_joins", InternalTraversal.dp_setup_join_tuple,),
("_legacy_setup_joins", InternalTraversal.dp_setup_join_tuple,),
- ("_correlate", InternalTraversal.dp_clauseelement_unordered_set),
- (
- "_correlate_except",
- InternalTraversal.dp_clauseelement_unordered_set,
- ),
+ ("_correlate", InternalTraversal.dp_clauseelement_list),
+ ("_correlate_except", InternalTraversal.dp_clauseelement_list,),
+ ("_limit_clause", InternalTraversal.dp_clauseelement),
+ ("_offset_clause", InternalTraversal.dp_clauseelement),
("_for_update_arg", InternalTraversal.dp_clauseelement),
("_distinct", InternalTraversal.dp_boolean),
("_distinct_on", InternalTraversal.dp_clauseelement_list),
("_label_style", InternalTraversal.dp_plain_obj),
+ ("_is_future", InternalTraversal.dp_boolean),
]
+ HasPrefixes._has_prefixes_traverse_internals
+ HasSuffixes._has_suffixes_traverse_internals
if fromclauses and fromclauses[0] is None:
self._correlate = ()
else:
- self._correlate = set(self._correlate).union(
+ self._correlate = self._correlate + tuple(
coercions.expect(roles.FromClauseRole, f) for f in fromclauses
)
if fromclauses and fromclauses[0] is None:
self._correlate_except = ()
else:
- self._correlate_except = set(self._correlate_except or ()).union(
+ self._correlate_except = (self._correlate_except or ()) + tuple(
coercions.expect(roles.FromClauseRole, f) for f in fromclauses
)
_from_objects = []
_is_from_container = True
_is_implicitly_boolean = False
+ inherit_cache = True
def __init__(self, element):
self.element = element
"""
_from_objects = []
+ inherit_cache = True
def __init__(self, *args, **kwargs):
"""Construct a new :class:`_expression.Exists` against an existing
return_type = self.type
if self.type.zero_indexes:
index = slice(index.start + 1, index.stop + 1, index.step)
- index = Slice(
- coercions.expect(
- roles.ExpressionElementRole,
- index.start,
- name=self.expr.key,
- type_=type_api.INTEGERTYPE,
- ),
- coercions.expect(
- roles.ExpressionElementRole,
- index.stop,
- name=self.expr.key,
- type_=type_api.INTEGERTYPE,
- ),
- coercions.expect(
- roles.ExpressionElementRole,
- index.step,
- name=self.expr.key,
- type_=type_api.INTEGERTYPE,
- ),
+ slice_ = Slice(
+ index.start, index.stop, index.step, _name=self.expr.key
)
+ return operators.getitem, slice_, return_type
else:
if self.type.zero_indexes:
index += 1
self.type.__class__, **adapt_kw
)
- return operators.getitem, index, return_type
+ return operators.getitem, index, return_type
def contains(self, *arg, **kw):
raise NotImplementedError(
CACHE_IN_PLACE = util.symbol("cache_in_place")
CALL_GEN_CACHE_KEY = util.symbol("call_gen_cache_key")
STATIC_CACHE_KEY = util.symbol("static_cache_key")
+PROPAGATE_ATTRS = util.symbol("propagate_attrs")
ANON_NAME = util.symbol("anon_name")
return strategy.compare(obj1, obj2, **kw)
+def _preconfigure_traversals(target_hierarchy):
+
+ stack = [target_hierarchy]
+ while stack:
+ cls = stack.pop()
+ stack.extend(cls.__subclasses__())
+
+ if hasattr(cls, "_traverse_internals"):
+ cls._generate_cache_attrs()
+ _copy_internals.generate_dispatch(
+ cls,
+ cls._traverse_internals,
+ "_generated_copy_internals_traversal",
+ )
+ _get_children.generate_dispatch(
+ cls,
+ cls._traverse_internals,
+ "_generated_get_children_traversal",
+ )
+
+
class HasCacheKey(object):
_cache_key_traversal = NO_CACHE
__slots__ = ()
+ @classmethod
+ def _generate_cache_attrs(cls):
+ """generate cache key dispatcher for a new class.
+
+ This sets the _generated_cache_key_traversal attribute once called
+ so should only be called once per class.
+
+ """
+ inherit = cls.__dict__.get("inherit_cache", False)
+
+ if inherit:
+ _cache_key_traversal = getattr(cls, "_cache_key_traversal", None)
+ if _cache_key_traversal is None:
+ try:
+ _cache_key_traversal = cls._traverse_internals
+ except AttributeError:
+ cls._generated_cache_key_traversal = NO_CACHE
+ return NO_CACHE
+
+ # TODO: wouldn't we instead get this from our superclass?
+ # also, our superclass may not have this yet, but in any case,
+ # we'd generate for the superclass that has it. this is a little
+ # more complicated, so for the moment this is a little less
+ # efficient on startup but simpler.
+ return _cache_key_traversal_visitor.generate_dispatch(
+ cls, _cache_key_traversal, "_generated_cache_key_traversal"
+ )
+ else:
+ _cache_key_traversal = cls.__dict__.get(
+ "_cache_key_traversal", None
+ )
+ if _cache_key_traversal is None:
+ _cache_key_traversal = cls.__dict__.get(
+ "_traverse_internals", None
+ )
+ if _cache_key_traversal is None:
+ cls._generated_cache_key_traversal = NO_CACHE
+ return NO_CACHE
+
+ return _cache_key_traversal_visitor.generate_dispatch(
+ cls, _cache_key_traversal, "_generated_cache_key_traversal"
+ )
+
@util.preload_module("sqlalchemy.sql.elements")
def _gen_cache_key(self, anon_map, bindparams):
"""return an optional cache key.
else:
id_ = None
- _cache_key_traversal = self._cache_key_traversal
- if _cache_key_traversal is None:
- try:
- _cache_key_traversal = self._traverse_internals
- except AttributeError:
- _cache_key_traversal = NO_CACHE
+ try:
+ dispatcher = self.__class__.__dict__[
+ "_generated_cache_key_traversal"
+ ]
+ except KeyError:
+ # most of the dispatchers are generated up front
+ # in sqlalchemy/sql/__init__.py ->
+ # traversals.py-> _preconfigure_traversals().
+ # this block will generate any remaining dispatchers.
+ dispatcher = self.__class__._generate_cache_attrs()
- if _cache_key_traversal is NO_CACHE:
+ if dispatcher is NO_CACHE:
if anon_map is not None:
anon_map[NO_CACHE] = True
return None
result = (id_, self.__class__)
# inline of _cache_key_traversal_visitor.run_generated_dispatch()
- try:
- dispatcher = self.__class__.__dict__[
- "_generated_cache_key_traversal"
- ]
- except KeyError:
- dispatcher = _cache_key_traversal_visitor.generate_dispatch(
- self, _cache_key_traversal, "_generated_cache_key_traversal"
- )
for attrname, obj, meth in dispatcher(
self, _cache_key_traversal_visitor
):
if obj is not None:
+ # TODO: see if C code can help here as Python lacks an
+ # efficient switch construct
if meth is CACHE_IN_PLACE:
# cache in place is always going to be a Python
# tuple, dict, list, etc. so we can do a boolean check
attrname,
obj._gen_cache_key(anon_map, bindparams),
)
+ elif meth is PROPAGATE_ATTRS:
+ if obj:
+ result += (
+ attrname,
+ obj["compile_state_plugin"],
+ obj["plugin_subject"]._gen_cache_key(
+ anon_map, bindparams
+ ),
+ )
elif meth is InternalTraversal.dp_annotations_key:
# obj is here is the _annotations dict. however,
# we want to use the memoized cache key version of it.
visit_type = STATIC_CACHE_KEY
visit_anon_name = ANON_NAME
+ visit_propagate_attrs = PROPAGATE_ATTRS
+
def visit_inspectable(self, attrname, obj, parent, anon_map, bindparams):
return (attrname, inspect(obj)._gen_cache_key(anon_map, bindparams))
def visit_setup_join_tuple(
self, attrname, obj, parent, anon_map, bindparams
):
+ is_legacy = "legacy" in attrname
+
return tuple(
(
- target._gen_cache_key(anon_map, bindparams),
- onclause._gen_cache_key(anon_map, bindparams)
+ target
+ if is_legacy and isinstance(target, str)
+ else target._gen_cache_key(anon_map, bindparams),
+ onclause
+ if is_legacy and isinstance(onclause, str)
+ else onclause._gen_cache_key(anon_map, bindparams)
if onclause is not None
else None,
from_._gen_cache_key(anon_map, bindparams)
for sequence in element
]
+ def visit_propagate_attrs(
+ self, attrname, parent, element, clone=_clone, **kw
+ ):
+ return element
+
_copy_internals = _CopyInternals()
def visit_dml_multi_values(self, element, **kw):
return ()
+ def visit_propagate_attrs(self, element, **kw):
+ return ()
+
_get_children = _GetChildren()
):
return COMPARE_FAILED
+ def visit_propagate_attrs(
+ self, attrname, left_parent, left, right_parent, right, **kw
+ ):
+ return self.compare_inner(
+ left.get("plugin_subject", None), right.get("plugin_subject", None)
+ )
+
def visit_has_cache_key_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
def _static_cache_key(self):
names = util.get_cls_kwargs(self.__class__)
return (self.__class__,) + tuple(
- (k, self.__dict__[k])
+ (
+ k,
+ self.__dict__[k]._static_cache_key
+ if isinstance(self.__dict__[k], TypeEngine)
+ else self.__dict__[k],
+ )
for k in names
if k in self.__dict__ and not k.startswith("_")
)
try:
dispatcher = target.__class__.__dict__[generate_dispatcher_name]
except KeyError:
+ # most of the dispatchers are generated up front
+ # in sqlalchemy/sql/__init__.py ->
+ # traversals.py-> _preconfigure_traversals().
+ # this block will generate any remaining dispatchers.
dispatcher = self.generate_dispatch(
- target, internal_dispatch, generate_dispatcher_name
+ target.__class__, internal_dispatch, generate_dispatcher_name
)
return dispatcher(target, self)
def generate_dispatch(
- self, target, internal_dispatch, generate_dispatcher_name
+ self, target_cls, internal_dispatch, generate_dispatcher_name
):
dispatcher = _generate_dispatcher(
self, internal_dispatch, generate_dispatcher_name
)
- setattr(target.__class__, generate_dispatcher_name, dispatcher)
+ # assert isinstance(target_cls, type)
+ setattr(target_cls, generate_dispatcher_name, dispatcher)
return dispatcher
dp_has_cache_key = symbol("HC")
"""
- dp_clauseelement_unordered_set = symbol("CU")
- """Visit an unordered set of :class:`_expression.ClauseElement`
- objects. """
-
dp_fromclause_ordered_set = symbol("CO")
"""Visit an ordered set of :class:`_expression.FromClause` objects. """
"""
+ dp_propagate_attrs = symbol("PA")
+ """Visit the propagate attrs dict. this hardcodes to the particular
+ elements we care about right now."""
+
class ExtendedInternalTraversal(InternalTraversal):
"""defines additional symbols that are useful in caching applications.
else:
map_ = None
- if isinstance(context.compiled.statement, _DDLCompiles):
+ if isinstance(execute_observed.clauseelement, _DDLCompiles):
- compiled = context.compiled.statement.compile(
+ compiled = execute_observed.clauseelement.compile(
dialect=compare_dialect, schema_translate_map=map_
)
else:
- compiled = context.compiled.statement.compile(
+ compiled = execute_observed.clauseelement.compile(
dialect=compare_dialect,
column_keys=context.compiled.column_keys,
inline=context.compiled.inline,
)
if assert_no_sessions:
- assert len(_sessions) == 0, "sessions remain"
+ assert len(_sessions) == 0, "%d sessions remain" % (
+ len(_sessions),
+ )
# queue.put(('samples', samples))
else:
queue.put(("result", True, "success"))
+ def run_plain(*func_args):
+ import queue as _queue
+
+ q = _queue.Queue()
+ profile(q, func_args)
+
+ while True:
+ row = q.get()
+ typ = row[0]
+ if typ == "samples":
+ print("sample gc sizes:", row[1])
+ elif typ == "status":
+ print(row[1])
+ elif typ == "result":
+ break
+ else:
+ assert False, "can't parse row"
+ assert row[1], row[2]
+
+ # return run_plain
+
def run_in_process(*func_args):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
def setup(self):
_sessions.clear()
_mapper_registry.clear()
- self.engine = engines.testing_engine(options={"use_reaper": False})
+
+ # enable query caching, however make the cache small so that
+ # the tests don't take too long. issues w/ caching include making
+ # sure sessions don't get stuck inside of it. However it will
+ # make tests like test_mapper_reset take a long time because mappers
+ # are very much a part of what's in the cache.
+ self.engine = engines.testing_engine(
+ options={"use_reaper": False, "query_cache_size": 10}
+ )
class MemUsageTest(EnsureZeroed):
sess = Session()
sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all()
sess.close()
+ del sess
try:
go()
sess.delete(a)
sess.flush()
- # don't need to clear_mappers()
+ # mappers necessarily find themselves in the compiled cache,
+ # so to allow them to be GC'ed clear out the cache
+ self.engine.clear_compiled_cache()
del B
del A
go()
+ def test_proxied_attribute(self):
+ from sqlalchemy.ext import hybrid
+
+ users = self.tables.users
+
+ class Foo(object):
+ @hybrid.hybrid_property
+ def user_name(self):
+ return self.name
+
+ mapper(Foo, users)
+
+ # unfortunately there's a lot of cycles with an aliased()
+ # for now, however calling upon clause_element does not seem
+ # to make it worse which is what this was looking to test
+ @assert_cycles(68)
+ def go():
+ a1 = aliased(Foo)
+ a1.user_name.__clause_element__()
+
+ go()
+
def test_raise_from(self):
@assert_cycles()
def go():
current_key = None
for stmt in stmt_fixture_one:
key = stmt._generate_cache_key()
+ assert key is not None
if current_key:
eq_(key, current_key)
else:
current_key = None
for stmt in stmt_fixture_one:
key = stmt._generate_cache_key()
+ assert key is not None
if current_key:
eq_(key, current_key)
else:
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
+from sqlalchemy.testing import config
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import profiling
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
-class MergeTest(fixtures.MappedTest):
+class NoCache(object):
+ run_setup_bind = "each"
+
+ @classmethod
+ def setup_class(cls):
+ super(NoCache, cls).setup_class()
+ cls._cache = config.db._compiled_cache
+ config.db._compiled_cache = None
+
+ @classmethod
+ def teardown_class(cls):
+ config.db._compiled_cache = cls._cache
+ super(NoCache, cls).teardown_class()
+
+
+class MergeTest(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
@classmethod
self.assert_sql_count(testing.db, go2, 2)
-class LoadManyToOneFromIdentityTest(fixtures.MappedTest):
+class LoadManyToOneFromIdentityTest(NoCache, fixtures.MappedTest):
"""test overhead associated with many-to-one fetches.
go()
-class MergeBackrefsTest(fixtures.MappedTest):
+class MergeBackrefsTest(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
@classmethod
s.merge(a)
-class DeferOptionsTest(fixtures.MappedTest):
+class DeferOptionsTest(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
@classmethod
).all()
-class AttributeOverheadTest(fixtures.MappedTest):
+class AttributeOverheadTest(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
@classmethod
go()
-class SessionTest(fixtures.MappedTest):
+class SessionTest(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
@classmethod
go()
-class QueryTest(fixtures.MappedTest):
+class QueryTest(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
@classmethod
go()
-class SelectInEagerLoadTest(fixtures.MappedTest):
+class SelectInEagerLoadTest(NoCache, fixtures.MappedTest):
"""basic test for selectin() loading, which uses a baked query.
if the baked query starts spoiling due to some bug in cache keys,
go()
-class JoinedEagerLoadTest(fixtures.MappedTest):
+class JoinedEagerLoadTest(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
@classmethod
go()
-class JoinConditionTest(fixtures.DeclarativeMappedTest):
+class JoinConditionTest(NoCache, fixtures.DeclarativeMappedTest):
__requires__ = ("python_profiling_backend",)
@classmethod
go()
-class BranchedOptionTest(fixtures.MappedTest):
+class BranchedOptionTest(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
@classmethod
go()
-class AnnotatedOverheadTest(fixtures.MappedTest):
+class AnnotatedOverheadTest(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
@classmethod
@profiling.function_call_count()
def test_string(self):
- [tuple(row) for row in t.select().execute().fetchall()]
+ with testing.db.connect().execution_options(
+ compiled_cache=None
+ ) as conn:
+ [tuple(row) for row in conn.execute(t.select()).fetchall()]
@profiling.function_call_count()
def test_unicode(self):
- [tuple(row) for row in t2.select().execute().fetchall()]
+ with testing.db.connect().execution_options(
+ compiled_cache=None
+ ) as conn:
+ [tuple(row) for row in conn.execute(t2.select()).fetchall()]
@profiling.function_call_count(variance=0.10)
def test_raw_string(self):
@profiling.function_call_count()
def test_fetch_by_key_legacy(self):
- with testing.db.connect() as conn:
+ with testing.db.connect().execution_options(
+ compiled_cache=None
+ ) as conn:
for row in conn.execute(t.select()).fetchall():
[row["field%d" % fnum] for fnum in range(NUM_FIELDS)]
@profiling.function_call_count()
def test_fetch_by_key_mappings(self):
- with testing.db.connect() as conn:
+ with testing.db.connect().execution_options(
+ compiled_cache=None
+ ) as conn:
for row in conn.execute(t.select()).mappings().fetchall():
[row["field%d" % fnum] for fnum in range(NUM_FIELDS)]
def test_one_or_none(self, one_or_first, rows_present):
# TODO: this is not testing the ORM level "scalar_mapping"
# mode which has a different performance profile
- with testing.db.connect() as conn:
+ with testing.db.connect().execution_options(
+ compiled_cache=None
+ ) as conn:
stmt = t.select()
if rows_present == 0:
stmt = stmt.where(1 == 0)
import datetime
from sqlalchemy import and_
-from sqlalchemy import bindparam
from sqlalchemy import Column
from sqlalchemy import Date
from sqlalchemy import DateTime
self.metadata.drop_all()
self.metadata.clear()
- def test_compiled_insert(self):
- table = Table(
- "testtable",
- self.metadata,
- Column("id", Integer, primary_key=True),
- Column("data", String(30)),
- )
- self.metadata.create_all()
- ins = table.insert(
- inline=True, values={"data": bindparam("x")}
- ).compile()
- ins.execute({"x": "five"}, {"x": "seven"})
- eq_(table.select().execute().fetchall(), [(1, "five"), (2, "seven")])
-
def test_foreignkey_missing_insert(self):
Table("t1", self.metadata, Column("id", Integer, primary_key=True))
t2 = Table(
def _assert_data_noautoincrement(self, table):
engine = engines.testing_engine(options={"implicit_returning": False})
- with engine.connect() as conn:
+ # turning off the cache because we are checking for compile-time
+ # warnings
+ with engine.connect().execution_options(compiled_cache=None) as conn:
conn.execute(table.insert(), {"id": 30, "data": "d1"})
with expect_warnings(
ins = users.insert()
with patch.object(
- ins, "compile", Mock(side_effect=ins.compile)
+ ins, "_compiler", Mock(side_effect=ins._compiler)
) as compile_mock:
cached_conn.execute(ins, {"user_name": "u1"})
cached_conn.execute(ins, {"user_name": "u2"})
ins = photo.insert()
with patch.object(
- ins, "compile", Mock(side_effect=ins.compile)
+ ins, "_compiler", Mock(side_effect=ins._compiler)
) as compile_mock:
cached_conn.execute(ins, {"photo_blob": blob})
eq_(compile_mock.call_count, 1)
upd = users.update().where(users.c.user_id == bindparam("b_user_id"))
with patch.object(
- upd, "compile", Mock(side_effect=upd.compile)
+ upd, "_compiler", Mock(side_effect=upd._compiler)
) as compile_mock:
cached_conn.execute(
upd,
)
eq_(
self.buf.buffer[1].message,
- "[{'data': '0'}, {'data': '1'}, {'data': '2'}, {'data': '3'}, "
+ "[raw sql] [{'data': '0'}, {'data': '1'}, {'data': '2'}, "
+ "{'data': '3'}, "
"{'data': '4'}, {'data': '5'}, {'data': '6'}, {'data': '7'}"
" ... displaying 10 of 100 total bound "
"parameter sets ... {'data': '98'}, {'data': '99'}]",
)
eq_(
self.buf.buffer[1].message,
- "[SQL parameters hidden due to hide_parameters=True]",
+ "[raw sql] [SQL parameters hidden due to hide_parameters=True]",
)
def test_log_large_list_of_tuple(self):
)
eq_(
self.buf.buffer[1].message,
- "[('0',), ('1',), ('2',), ('3',), ('4',), ('5',), "
+ "[raw sql] [('0',), ('1',), ('2',), ('3',), ('4',), ('5',), "
"('6',), ('7',) ... displaying 10 of 100 total "
"bound parameter sets ... ('98',), ('99',)]",
)
"[parameters: ([1, 2, 3], 'hi')]\n" in str(exc_info)
)
- eq_(self.buf.buffer[1].message, "([1, 2, 3], 'hi')")
+ eq_regex(
+ self.buf.buffer[1].message,
+ r"\[generated .*\] \(\[1, 2, 3\], 'hi'\)",
+ )
def test_repr_params_positional_array(self):
eq_(
eq_(
self.buf.buffer[1].message,
- "('%s ... (4702 characters truncated) ... %s',)"
+ "[raw sql] ('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
eq_(
self.buf.buffer[1].message,
- "('%s', '%s', '%s ... (372 characters truncated) ... %s')"
- % (lp1, lp2, lp3[0:149], lp3[-149:]),
+ "[raw sql] ('%s', '%s', '%s ... (372 characters truncated) "
+ "... %s')" % (lp1, lp2, lp3[0:149], lp3[-149:]),
)
def test_log_large_parameter_multiple(self):
eq_(
self.buf.buffer[1].message,
- "[('%s ... (4702 characters truncated) ... %s',), ('%s',), "
+ "[raw sql] [('%s ... (4702 characters truncated) ... %s',), "
+ "('%s',), "
"('%s ... (372 characters truncated) ... %s',)]"
% (lp1[0:149], lp1[-149:], lp2, lp3[0:149], lp3[-149:]),
)
eq_(
self.buf.buffer[1].message,
- "('%s ... (4702 characters truncated) ... %s',)"
+ "[raw sql] ('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
from sqlalchemy.orm import defaultload
from sqlalchemy.orm import join
from sqlalchemy.orm import joinedload
+from sqlalchemy.orm import selectinload
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.testing import assert_raises
count = {"": 14, "Polymorphic": 9}.get(self.select_type, 10)
self.assert_sql_count(testing.db, go, count)
- def test_primary_eager_aliasing_one(self):
+ def test_primary_eager_aliasing_joinedload(self):
# For both joinedload() and subqueryload(), if the original q is
# not loading the subclass table, the joinedload doesn't happen.
count = {"": 6, "Polymorphic": 3}.get(self.select_type, 4)
self.assert_sql_count(testing.db, go, count)
- def test_primary_eager_aliasing_two(self):
+ def test_primary_eager_aliasing_subqueryload(self):
+ # test that subqueryload does not occur because the parent
+ # row cannot support it
sess = create_session()
def go():
count = {"": 14, "Polymorphic": 7}.get(self.select_type, 8)
self.assert_sql_count(testing.db, go, count)
+ def test_primary_eager_aliasing_selectinload(self):
+ # test that selectinload does not occur because the parent
+ # row cannot support it
+ sess = create_session()
+
+ def go():
+ eq_(
+ sess.query(Person)
+ .order_by(Person.person_id)
+ .options(selectinload(Engineer.machines))
+ .all(),
+ all_employees,
+ )
+
+ count = {"": 14, "Polymorphic": 7}.get(self.select_type, 8)
+ self.assert_sql_count(testing.db, go, count)
+
def test_primary_eager_aliasing_three(self):
# assert the JOINs don't over JOIN
from sqlalchemy import true
from sqlalchemy.orm import aliased
from sqlalchemy.orm import Bundle
-from sqlalchemy.orm import class_mapper
from sqlalchemy.orm import create_session
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import mapper
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
+from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
)
@classmethod
- def setup_classes(cls):
- class Employee(cls.Comparable):
- pass
-
- class Manager(Employee):
- pass
-
- class Stuff(cls.Comparable):
- pass
-
- def test_subquery_load(self):
+ def setup_mappers(cls):
employee, employee_stuff, Employee, Stuff, Manager = (
- self.tables.employee,
- self.tables.employee_stuff,
- self.classes.Employee,
- self.classes.Stuff,
- self.classes.Manager,
+ cls.tables.employee,
+ cls.tables.employee_stuff,
+ cls.classes.Employee,
+ cls.classes.Stuff,
+ cls.classes.Manager,
)
mapper(
)
mapper(Stuff, employee_stuff)
- sess = create_session()
- context = (
- sess.query(Manager)
- .options(subqueryload("stuff"))
- ._compile_context()
+ @classmethod
+ def setup_classes(cls):
+ class Employee(cls.Comparable):
+ pass
+
+ class Manager(Employee):
+ pass
+
+ class Stuff(cls.Comparable):
+ pass
+
+ @classmethod
+ def insert_data(cls, connection):
+ Employee, Stuff, Manager = cls.classes("Employee", "Stuff", "Manager")
+ s = Session(connection)
+
+ s.add_all(
+ [
+ Employee(
+ name="e1", stuff=[Stuff(name="es1"), Stuff(name="es2")]
+ ),
+ Manager(
+ name="m1", stuff=[Stuff(name="ms1"), Stuff(name="ms2")]
+ ),
+ ]
)
- subq = context.attributes[
- (
- "subqueryload_data",
- (class_mapper(Manager), class_mapper(Manager).attrs.stuff),
- )
- ]["query"]
+ s.commit()
- self.assert_compile(
- subq,
- "SELECT employee_stuff.id AS "
- "employee_stuff_id, employee_stuff.employee"
- "_id AS employee_stuff_employee_id, "
- "employee_stuff.name AS "
- "employee_stuff_name, anon_1.employee_id "
- "AS anon_1_employee_id FROM (SELECT "
- "employee.id AS employee_id FROM employee "
- "WHERE employee.type IN ([POSTCOMPILE_type_1])) AS anon_1 "
- "JOIN employee_stuff ON anon_1.employee_id "
- "= employee_stuff.employee_id",
- use_default_dialect=True,
+ def test_subquery_load(self):
+ Employee, Stuff, Manager = self.classes("Employee", "Stuff", "Manager")
+
+ sess = create_session()
+
+ with self.sql_execution_asserter(testing.db) as asserter:
+ sess.query(Manager).options(subqueryload("stuff")).all()
+
+ asserter.assert_(
+ CompiledSQL(
+ "SELECT employee.id AS employee_id, employee.name AS "
+ "employee_name, employee.type AS employee_type "
+ "FROM employee WHERE employee.type IN ([POSTCOMPILE_type_1])",
+ params=[{"type_1": ["manager"]}],
+ ),
+ CompiledSQL(
+ "SELECT employee_stuff.id AS "
+ "employee_stuff_id, employee_stuff.employee"
+ "_id AS employee_stuff_employee_id, "
+ "employee_stuff.name AS "
+ "employee_stuff_name, anon_1.employee_id "
+ "AS anon_1_employee_id FROM (SELECT "
+ "employee.id AS employee_id FROM employee "
+ "WHERE employee.type IN ([POSTCOMPILE_type_1])) AS anon_1 "
+ "JOIN employee_stuff ON anon_1.employee_id "
+ "= employee_stuff.employee_id",
+ params=[{"type_1": ["manager"]}],
+ ),
)
from sqlalchemy import inspect
+from sqlalchemy import text
from sqlalchemy.future import select as future_select
from sqlalchemy.orm import aliased
from sqlalchemy.orm import defaultload
from sqlalchemy.orm import defer
+from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import Load
+from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import with_polymorphic
defer("id"),
defer("*"),
defer(Address.id),
+ subqueryload(User.orders),
+ selectinload(User.orders),
joinedload(User.addresses).defer(Address.id),
joinedload(aliased(User).addresses).defer(Address.id),
joinedload(User.addresses).defer("id"),
future_select(User)
.join(Address, User.addresses)
.join_from(User, User.orders),
+ future_select(User.id, Order.id).select_from(
+ orm_join(User, Order, User.orders)
+ ),
+ ),
+ compare_values=True,
+ )
+
+ def test_orm_query_w_orm_joins(self):
+
+ User, Address, Keyword, Order, Item = self.classes(
+ "User", "Address", "Keyword", "Order", "Item"
+ )
+
+ a1 = aliased(Address)
+
+ self._run_cache_key_fixture(
+ lambda: stmt_20(
+ Session().query(User).join(User.addresses),
+ Session().query(User).join(User.orders),
+ Session().query(User).join(User.addresses).join(User.orders),
+ Session()
+ .query(User)
+ .join("addresses")
+ .join("dingalings", from_joinpoint=True),
+ Session().query(User).join("addresses"),
+ Session().query(User).join("orders"),
+ Session().query(User).join("addresses").join("orders"),
+ Session().query(User).join(Address, User.addresses),
+ Session().query(User).join(a1, "addresses"),
+ Session().query(User).join(a1, "addresses", aliased=True),
+ Session().query(User).join(User.addresses.of_type(a1)),
+ ),
+ compare_values=True,
+ )
+
+ def test_orm_query_from_statement(self):
+ User, Address, Keyword, Order, Item = self.classes(
+ "User", "Address", "Keyword", "Order", "Item"
+ )
+
+ self._run_cache_key_fixture(
+ lambda: stmt_20(
+ Session()
+ .query(User)
+ .from_statement(text("select * from user")),
+ Session()
+ .query(User)
+ .options(selectinload(User.addresses))
+ .from_statement(text("select * from user")),
+ Session()
+ .query(User)
+ .options(subqueryload(User.addresses))
+ .from_statement(text("select * from user")),
+ Session()
+ .query(User)
+ .from_statement(text("select * from user order by id")),
+ Session()
+ .query(User.id)
+ .from_statement(text("select * from user")),
),
compare_values=True,
)
import sqlalchemy as sa
from sqlalchemy import bindparam
from sqlalchemy import ForeignKey
-from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import literal_column
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import aliased
+from sqlalchemy.orm import backref
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import close_all_sessions
from sqlalchemy.orm import create_session
self.assert_sql_count(testing.db, go, 2)
+ def test_params_arent_cached(self):
+ users, Address, addresses, User = (
+ self.tables.users,
+ self.classes.Address,
+ self.tables.addresses,
+ self.classes.User,
+ )
+
+ mapper(
+ User,
+ users,
+ properties={
+ "addresses": relationship(
+ mapper(Address, addresses),
+ lazy="subquery",
+ order_by=Address.id,
+ )
+ },
+ )
+ query_cache = {}
+ sess = create_session()
+
+ u1 = (
+ sess.query(User)
+ .execution_options(query_cache=query_cache)
+ .filter(User.id == 7)
+ .one()
+ )
+
+ u2 = (
+ sess.query(User)
+ .execution_options(query_cache=query_cache)
+ .filter(User.id == 8)
+ .one()
+ )
+ eq_(len(u1.addresses), 1)
+ eq_(len(u2.addresses), 3)
+
def test_from_aliased(self):
users, Dingaling, User, dingalings, Address, addresses = (
self.tables.users,
movies = relationship("Movie", foreign_keys=Movie.director_id)
name = Column(String(50))
+ @classmethod
+ def insert_data(cls, connection):
+ Director, Movie = cls.classes("Director", "Movie")
+ s = Session(connection)
+ s.add_all([Director(movies=[Movie(title="m1"), Movie(title="m2")])])
+ s.commit()
+
def test_from_subclass(self):
Director = self.classes.Director
s = create_session()
- ctx = s.query(Director).options(subqueryload("*"))._compile_context()
-
- q = ctx.attributes[
- (
- "subqueryload_data",
- (inspect(Director), inspect(Director).attrs.movies),
- )
- ]["query"]
- self.assert_compile(
- q,
- "SELECT movie.id AS movie_id, "
- "persistent.id AS persistent_id, "
- "movie.director_id AS movie_director_id, "
- "movie.title AS movie_title, "
- "anon_1.director_id AS anon_1_director_id "
- "FROM (SELECT director.id AS director_id "
- "FROM persistent JOIN director "
- "ON persistent.id = director.id) AS anon_1 "
- "JOIN (persistent JOIN movie "
- "ON persistent.id = movie.id) "
- "ON anon_1.director_id = movie.director_id",
- dialect="default",
+ with self.sql_execution_asserter(testing.db) as asserter:
+ s.query(Director).options(subqueryload("*")).all()
+ asserter.assert_(
+ CompiledSQL(
+ "SELECT director.id AS director_id, "
+ "persistent.id AS persistent_id, director.name "
+ "AS director_name FROM persistent JOIN director "
+ "ON persistent.id = director.id"
+ ),
+ CompiledSQL(
+ "SELECT movie.id AS movie_id, "
+ "persistent.id AS persistent_id, "
+ "movie.director_id AS movie_director_id, "
+ "movie.title AS movie_title, "
+ "anon_1.director_id AS anon_1_director_id "
+ "FROM (SELECT director.id AS director_id "
+ "FROM persistent JOIN director "
+ "ON persistent.id = director.id) AS anon_1 "
+ "JOIN (persistent JOIN movie "
+ "ON persistent.id = movie.id) "
+ "ON anon_1.director_id = movie.director_id",
+ ),
)
def test_integrate(self):
)
path = Column(String(255))
director_id = Column(Integer, ForeignKey("director.id"))
- director = relationship(Director, backref="photos")
+ director = relationship(
+ Director, backref=backref("photos", order_by=id)
+ )
class Movie(Base):
__tablename__ = "movie"
s = create_session(testing.db)
- q = s.query(Movie).options(
- subqueryload(Movie.director).subqueryload(Director.photos)
- )
- ctx = q._compile_context()
-
- q2 = ctx.attributes[
- (
- "subqueryload_data",
- (inspect(Movie), inspect(Movie).attrs.director),
+ with self.sql_execution_asserter(testing.db) as asserter:
+ result = (
+ s.query(Movie)
+ .options(
+ subqueryload(Movie.director).subqueryload(Director.photos)
+ )
+ .all()
)
- ]["query"]
- self.assert_compile(
- q2,
- "SELECT director.id AS director_id, "
- "director.name AS director_name, "
- "anon_1.movie_director_id AS anon_1_movie_director_id "
- "FROM (SELECT%s movie.director_id AS movie_director_id "
- "FROM movie) AS anon_1 "
- "JOIN director ON director.id = anon_1.movie_director_id"
- % (" DISTINCT" if expect_distinct else ""),
+ asserter.assert_(
+ CompiledSQL(
+ "SELECT movie.id AS movie_id, movie.director_id "
+ "AS movie_director_id, movie.title AS movie_title FROM movie"
+ ),
+ CompiledSQL(
+ "SELECT director.id AS director_id, "
+ "director.name AS director_name, "
+ "anon_1.movie_director_id AS anon_1_movie_director_id "
+ "FROM (SELECT%s movie.director_id AS movie_director_id "
+ "FROM movie) AS anon_1 "
+ "JOIN director ON director.id = anon_1.movie_director_id"
+ % (" DISTINCT" if expect_distinct else ""),
+ ),
+ CompiledSQL(
+ "SELECT director_photo.id AS director_photo_id, "
+ "director_photo.path AS director_photo_path, "
+ "director_photo.director_id AS director_photo_director_id, "
+ "director_1.id AS director_1_id "
+ "FROM (SELECT%s movie.director_id AS movie_director_id "
+ "FROM movie) AS anon_1 "
+ "JOIN director AS director_1 "
+ "ON director_1.id = anon_1.movie_director_id "
+ "JOIN director_photo "
+ "ON director_1.id = director_photo.director_id "
+ "ORDER BY director_photo.id"
+ % (" DISTINCT" if expect_distinct else ""),
+ ),
)
- ctx2 = q2._compile_context()
- stmt = q2.statement
-
- result = s.connection().execute(stmt)
- rows = result.fetchall()
-
- if expect_distinct:
- eq_(rows, [(1, "Woody Allen", 1)])
- else:
- eq_(rows, [(1, "Woody Allen", 1), (1, "Woody Allen", 1)])
-
- q3 = ctx2.attributes[
- (
- "subqueryload_data",
- (inspect(Director), inspect(Director).attrs.photos),
- )
- ]["query"]
-
- self.assert_compile(
- q3,
- "SELECT director_photo.id AS director_photo_id, "
- "director_photo.path AS director_photo_path, "
- "director_photo.director_id AS director_photo_director_id, "
- "director_1.id AS director_1_id "
- "FROM (SELECT%s movie.director_id AS movie_director_id "
- "FROM movie) AS anon_1 "
- "JOIN director AS director_1 "
- "ON director_1.id = anon_1.movie_director_id "
- "JOIN director_photo "
- "ON director_1.id = director_photo.director_id"
- % (" DISTINCT" if expect_distinct else ""),
- )
-
- stmt = q3.statement
-
- result = s.connection().execute(stmt)
-
- rows = result.fetchall()
- if expect_distinct:
- eq_(
- set(tuple(t) for t in rows),
- set([(1, "/1.jpg", 1, 1), (2, "/2.jpg", 1, 1)]),
- )
- else:
- # oracle might not order the way we expect here
- eq_(
- set(tuple(t) for t in rows),
- set(
- [
- (1, "/1.jpg", 1, 1),
- (2, "/2.jpg", 1, 1),
- (1, "/1.jpg", 1, 1),
- (2, "/2.jpg", 1, 1),
- ]
- ),
- )
-
- movies = q.all() # noqa
-
+ eq_(
+ [
+ (
+ movie.title,
+ movie.director.name,
+ [photo.path for photo in movie.director.photos],
+ )
+ for movie in result
+ ],
+ [
+ ("Manhattan", "Woody Allen", ["/1.jpg", "/2.jpg"]),
+ ("Sweet and Lowdown", "Woody Allen", ["/1.jpg", "/2.jpg"]),
+ ],
+ )
# check number of persistent objects in session
eq_(len(list(s)), 5)
s = create_session(testing.db)
- q = s.query(Credit).options(
- subqueryload(Credit.movie).subqueryload(Movie.director)
+ with self.sql_execution_asserter(testing.db) as asserter:
+ result = (
+ s.query(Credit)
+ .options(
+ subqueryload(Credit.movie).subqueryload(Movie.director)
+ )
+ .all()
+ )
+ asserter.assert_(
+ CompiledSQL(
+ "SELECT credit.id AS credit_id, credit.movie_id AS "
+ "credit_movie_id FROM credit"
+ ),
+ CompiledSQL(
+ "SELECT movie.id AS movie_id, movie.director_id "
+ "AS movie_director_id, movie.title AS movie_title, "
+ "anon_1.credit_movie_id AS anon_1_credit_movie_id "
+ "FROM (SELECT DISTINCT credit.movie_id AS credit_movie_id "
+ "FROM credit) AS anon_1 JOIN movie ON movie.id = "
+ "anon_1.credit_movie_id"
+ ),
+ CompiledSQL(
+ "SELECT director.id AS director_id, director.name "
+ "AS director_name, movie_1.director_id AS movie_1_director_id "
+ "FROM (SELECT DISTINCT credit.movie_id AS credit_movie_id "
+ "FROM credit) AS anon_1 JOIN movie AS movie_1 ON "
+ "movie_1.id = anon_1.credit_movie_id JOIN director "
+ "ON director.id = movie_1.director_id"
+ ),
)
- ctx = q._compile_context()
-
- q2 = ctx.attributes[
- ("subqueryload_data", (inspect(Credit), Credit.movie.property))
- ]["query"]
- ctx2 = q2._compile_context()
- q3 = ctx2.attributes[
- ("subqueryload_data", (inspect(Movie), Movie.director.property))
- ]["query"]
-
- stmt = q3.statement
-
- result = s.connection().execute(stmt)
- eq_(result.fetchall(), [(1, "Woody Allen", 1), (1, "Woody Allen", 1)])
+ eq_(
+ [credit.movie.director.name for credit in result],
+ ["Woody Allen", "Woody Allen", "Woody Allen"],
+ )
class JoinedNoLoadConflictTest(fixtures.DeclarativeMappedTest):
neutron is currently dependent on this use case which means others
are too.
+ Additionally tests functionality related to #5836, where we are using the
+ non-cached context.query, rather than
+ context.compile_state.select_statement to generate the subquery. this is
+ so we get the current parameters from the new statement being run, but it
+ also means we have to get a new CompileState from that query in order to
+ deal with the correct entities.
+
"""
@classmethod
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
a = relationship("A")
+ ds = relationship("D", order_by="D.id")
class C(Base, ComparableEntity):
__tablename__ = "c"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
+ class D(Base, ComparableEntity):
+ __tablename__ = "d"
+ id = Column(Integer, primary_key=True)
+ b_id = Column(ForeignKey("b.id"))
+
@classmethod
def insert_data(cls, connection):
- A, B, C = cls.classes("A", "B", "C")
+ A, B, C, D = cls.classes("A", "B", "C", "D")
s = Session(connection)
- as_ = [A(id=i, cs=[C(), C()]) for i in range(1, 5)]
+ as_ = [A(id=i, cs=[C(), C()],) for i in range(1, 5)]
- s.add_all([B(a=as_[0]), B(a=as_[1]), B(a=as_[2]), B(a=as_[3])])
+ s.add_all(
+ [
+ B(a=as_[0], ds=[D()]),
+ B(a=as_[1], ds=[D()]),
+ B(a=as_[2]),
+ B(a=as_[3]),
+ ]
+ )
s.commit()
- def test_subqload_w_from_self(self):
+ def test_subq_w_from_self_one(self):
A, B, C = self.classes("A", "B", "C")
s = Session()
- q = (
- s.query(B)
- .join(B.a)
- .filter(B.id < 4)
- .filter(A.id > 1)
- .from_self()
- .options(subqueryload(B.a).subqueryload(A.cs))
- .from_self()
- )
+ cache = {}
+
+ for i in range(3):
+ q = (
+ s.query(B)
+ .execution_options(compiled_cache=cache)
+ .join(B.a)
+ .filter(B.id < 4)
+ .filter(A.id > 1)
+ .from_self()
+ .options(subqueryload(B.a).subqueryload(A.cs))
+ .from_self()
+ )
- def go():
- results = q.all()
- eq_(
- results,
- [
- B(
- a=A(cs=[C(a_id=2, id=3), C(a_id=2, id=4)], id=2),
- a_id=2,
- id=2,
- ),
- B(
- a=A(cs=[C(a_id=3, id=5), C(a_id=3, id=6)], id=3),
- a_id=3,
- id=3,
- ),
- ],
+ def go():
+ results = q.all()
+ eq_(
+ results,
+ [
+ B(
+ a=A(cs=[C(a_id=2, id=3), C(a_id=2, id=4)], id=2),
+ a_id=2,
+ id=2,
+ ),
+ B(
+ a=A(cs=[C(a_id=3, id=5), C(a_id=3, id=6)], id=3),
+ a_id=3,
+ id=3,
+ ),
+ ],
+ )
+
+ self.assert_sql_execution(
+ testing.db,
+ go,
+ CompiledSQL(
+ "SELECT anon_1.anon_2_b_id AS anon_1_anon_2_b_id, "
+ "anon_1.anon_2_b_a_id AS anon_1_anon_2_b_a_id FROM "
+ "(SELECT anon_2.b_id AS anon_2_b_id, anon_2.b_a_id "
+ "AS anon_2_b_a_id FROM (SELECT b.id AS b_id, b.a_id "
+ "AS b_a_id FROM b JOIN a ON a.id = b.a_id "
+ "WHERE b.id < :id_1 AND a.id > :id_2) AS anon_2) AS anon_1"
+ ),
+ CompiledSQL(
+ "SELECT a.id AS a_id, anon_1.anon_2_anon_3_b_a_id AS "
+ "anon_1_anon_2_anon_3_b_a_id FROM (SELECT DISTINCT "
+ "anon_2.anon_3_b_a_id AS anon_2_anon_3_b_a_id FROM "
+ "(SELECT anon_3.b_id AS anon_3_b_id, anon_3.b_a_id "
+ "AS anon_3_b_a_id FROM (SELECT b.id AS b_id, b.a_id "
+ "AS b_a_id FROM b JOIN a ON a.id = b.a_id "
+ "WHERE b.id < :id_1 AND a.id > :id_2) AS anon_3) "
+ "AS anon_2) AS anon_1 JOIN a "
+ "ON a.id = anon_1.anon_2_anon_3_b_a_id"
+ ),
+ CompiledSQL(
+ "SELECT c.id AS c_id, c.a_id AS c_a_id, a_1.id "
+ "AS a_1_id FROM (SELECT DISTINCT anon_2.anon_3_b_a_id AS "
+ "anon_2_anon_3_b_a_id FROM "
+ "(SELECT anon_3.b_id AS anon_3_b_id, anon_3.b_a_id "
+ "AS anon_3_b_a_id FROM (SELECT b.id AS b_id, b.a_id "
+ "AS b_a_id FROM b JOIN a ON a.id = b.a_id "
+ "WHERE b.id < :id_1 AND a.id > :id_2) AS anon_3) "
+ "AS anon_2) AS anon_1 JOIN a AS a_1 ON a_1.id = "
+ "anon_1.anon_2_anon_3_b_a_id JOIN c ON a_1.id = c.a_id "
+ "ORDER BY c.id"
+ ),
)
- self.assert_sql_count(testing.db, go, 3)
+ s.close()
+
+ def test_subq_w_from_self_two(self):
+
+ A, B, C = self.classes("A", "B", "C")
+
+ s = Session()
+ cache = {}
+
+ for i in range(3):
+
+ def go():
+ q = (
+ s.query(B)
+ .execution_options(compiled_cache=cache)
+ .join(B.a)
+ .from_self()
+ )
+ q = q.options(subqueryload(B.ds))
+
+ q.all()
+
+ self.assert_sql_execution(
+ testing.db,
+ go,
+ CompiledSQL(
+ "SELECT anon_1.b_id AS anon_1_b_id, anon_1.b_a_id AS "
+ "anon_1_b_a_id FROM (SELECT b.id AS b_id, b.a_id "
+ "AS b_a_id FROM b JOIN a ON a.id = b.a_id) AS anon_1"
+ ),
+ CompiledSQL(
+ "SELECT d.id AS d_id, d.b_id AS d_b_id, "
+ "anon_1.anon_2_b_id AS anon_1_anon_2_b_id "
+ "FROM (SELECT anon_2.b_id AS anon_2_b_id FROM "
+ "(SELECT b.id AS b_id, b.a_id AS b_a_id FROM b "
+ "JOIN a ON a.id = b.a_id) AS anon_2) AS anon_1 "
+ "JOIN d ON anon_1.anon_2_b_id = d.b_id ORDER BY d.id"
+ ),
+ )
+ s.close()
s = Session()
assert_raises_message(
- exc.InvalidRequestError,
- "Invalid expression type: 5",
+ exc.ArgumentError,
+ "SET/VALUES column expression or string key expected, got .*Thing",
s.query(User).update,
{Thing(): "moonbeam"},
synchronize_session="evaluate",
os.remove("orm2010.db")
# use a file based database so that cursor.execute() has some
# palpable overhead.
-engine = create_engine("sqlite:///orm2010.db", query_cache_size=100)
+engine = create_engine("sqlite:///orm2010.db")
Base.metadata.create_all(engine)
def status(msg):
print("%d - %s" % (time.time() - now, msg))
- runit_persist(status, 10)
+ runit_persist(status, factor)
print("Total time: %d" % (time.time() - now))
- runit_query_runs(status, 10)
+ runit_query_runs(status, factor)
print("Total time: %d" % (time.time() - now))
from sqlalchemy import extract
from sqlalchemy import Float
from sqlalchemy import Integer
+from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy.sql.elements import _label_reference
from sqlalchemy.sql.elements import _textual_label_reference
from sqlalchemy.sql.elements import Annotated
+from sqlalchemy.sql.elements import BindParameter
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql.elements import ClauseList
from sqlalchemy.sql.elements import CollationClause
from sqlalchemy.testing import is_true
from sqlalchemy.testing import ne_
from sqlalchemy.testing.util import random_choices
+from sqlalchemy.types import ARRAY
+from sqlalchemy.types import JSON
from sqlalchemy.util import class_hierarchy
meta = MetaData()
column("q").like("somstr", escape="\\"),
column("q").like("somstr", escape="X"),
),
+ lambda: (
+ column("q", ARRAY(Integer))[3] == 5,
+ column("q", ARRAY(Integer))[3:5] == 5,
+ ),
lambda: (
table_a.c.a,
table_a.c.a._annotate({"orm": True}),
cast(column("q"), Float),
cast(column("p"), Integer),
),
+ lambda: (
+ column("x", JSON)["key1"],
+ column("x", JSON)["key1"].as_boolean(),
+ column("x", JSON)["key1"].as_float(),
+ column("x", JSON)["key1"].as_integer(),
+ column("x", JSON)["key1"].as_string(),
+ column("y", JSON)["key1"].as_integer(),
+ column("y", JSON)["key1"].as_string(),
+ ),
lambda: (
bindparam("x"),
bindparam("y"),
select([table_a.c.a]),
select([table_a.c.a, table_a.c.b]),
select([table_a.c.b, table_a.c.a]),
+ select([table_a.c.b, table_a.c.a]).limit(5),
+ select([table_a.c.b, table_a.c.a]).limit(5).offset(10),
+ select([table_a.c.b, table_a.c.a])
+ .limit(literal_column("foobar"))
+ .offset(10),
select([table_a.c.b, table_a.c.a]).apply_labels(),
select([table_a.c.a]).where(table_a.c.b == 5),
select([table_a.c.a])
ne_(a_key.key, b_key.key)
# ClauseElement-specific test to ensure the cache key
- # collected all the bound parameters
+ # collected all the bound parameters that aren't marked
+ # as "literal execute"
if isinstance(case_a[a], ClauseElement) and isinstance(
case_b[b], ClauseElement
):
assert_a_params = []
assert_b_params = []
- visitors.traverse(
- case_a[a], {}, {"bindparam": assert_a_params.append}
- )
- visitors.traverse(
- case_b[b], {}, {"bindparam": assert_b_params.append}
- )
+
+ for elem in visitors.iterate(case_a[a]):
+ if elem.__visit_name__ == "bindparam":
+ assert_a_params.append(elem)
+
+ for elem in visitors.iterate(case_b[b]):
+ if elem.__visit_name__ == "bindparam":
+ assert_b_params.append(elem)
# note we're asserting the order of the params as well as
# if there are dupes or not. ordering has to be
for fixture in fixtures_:
self._run_cache_key_fixture(fixture, compare_values)
+ def test_literal_binds(self):
+ def fixture():
+ return (
+ bindparam(None, value="x", literal_execute=True),
+ bindparam(None, value="y", literal_execute=True),
+ )
+
+ self._run_cache_key_fixture(
+ fixture, True,
+ )
+
+ def test_bindparam_subclass_nocache(self):
+ # does not implement inherit_cache
+ class _literal_bindparam(BindParameter):
+ pass
+
+ l1 = _literal_bindparam(None, value="x1")
+ is_(l1._generate_cache_key(), None)
+
+ def test_bindparam_subclass_ok_cache(self):
+ # implements inherit_cache
+ class _literal_bindparam(BindParameter):
+ inherit_cache = True
+
+ def fixture():
+ return (
+ _literal_bindparam(None, value="x1"),
+ _literal_bindparam(None, value="x2"),
+ _literal_bindparam(None),
+ )
+
+ self._run_cache_key_fixture(fixture, True)
+
def test_cache_key_unknown_traverse(self):
class Foobar1(ClauseElement):
_traverse_internals = [
is_false(l1.compare(l2))
+ def test_cache_key_limit_offset_values(self):
+ s1 = select([column("q")]).limit(10)
+ s2 = select([column("q")]).limit(25)
+ s3 = select([column("q")]).limit(25).offset(5)
+ s4 = select([column("q")]).limit(25).offset(18)
+ s5 = select([column("q")]).limit(7).offset(12)
+ s6 = select([column("q")]).limit(literal_column("q")).offset(12)
+
+ for should_eq_left, should_eq_right in [(s1, s2), (s3, s4), (s3, s5)]:
+ eq_(
+ should_eq_left._generate_cache_key().key,
+ should_eq_right._generate_cache_key().key,
+ )
+
+ for shouldnt_eq_left, shouldnt_eq_right in [
+ (s1, s3),
+ (s5, s6),
+ (s2, s3),
+ ]:
+ ne_(
+ shouldnt_eq_left._generate_cache_key().key,
+ shouldnt_eq_right._generate_cache_key().key,
+ )
+
def test_compare_labels(self):
is_true(column("q").label(None).compare(column("q").label(None)))
from sqlalchemy.sql import label
from sqlalchemy.sql import operators
from sqlalchemy.sql import table
+from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql.elements import BooleanClauseList
from sqlalchemy.sql.expression import ClauseList
from sqlalchemy.sql.expression import HasPrefixes
from sqlalchemy.testing import eq_ignore_whitespace
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
+from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
+from sqlalchemy.testing import ne_
from sqlalchemy.util import u
table1 = table(
extracted_parameters=s1_cache_key[1],
)
+ def test_construct_params_w_bind_clones_post(self):
+ """test that a BindParameter that has been cloned after the cache
+ key was generated still matches up when construct_params()
+ is called with an extracted parameter collection.
+
+ This case occurs now with the ORM as the ORM construction will
+ frequently run clause adaptation on elements of the statement within
+ compilation, after the cache key has been generated. this adaptation
+ hits BindParameter objects which will change their key as they
+ will usually have unqique=True. So the construct_params() process
+ when it links its internal bind_names to the cache key binds,
+ must do this badsed on bindparam._identifying_key, which does not
+ change across clones, rather than .key which usually will.
+
+ """
+
+ stmt = select([table1.c.myid]).where(table1.c.myid == 5)
+
+ # get the original bindparam.
+ original_bind = stmt._where_criteria[0].right
+
+ # it's anonymous so unique=True
+ is_true(original_bind.unique)
+
+ # cache key against hte original param
+ cache_key = stmt._generate_cache_key()
+
+ # now adapt the statement
+ stmt_adapted = sql_util.ClauseAdapter(table1).traverse(stmt)
+
+ # new bind parameter has a different key but same
+ # identifying key
+ new_bind = stmt_adapted._where_criteria[0].right
+ eq_(original_bind._identifying_key, new_bind._identifying_key)
+ ne_(original_bind.key, new_bind.key)
+
+ # compile the adapted statement but set the cache key to the one
+ # generated from the unadapted statement. this will look like
+ # when the ORM runs clause adaption inside of visit_select, after
+ # the cache key is generated but before the compiler is given the
+ # core select statement to actually render.
+ compiled = stmt_adapted.compile(cache_key=cache_key)
+
+ # params set up as 5
+ eq_(compiled.construct_params(params={},), {"myid_1": 5})
+
+ # also works w the original cache key
+ eq_(
+ compiled.construct_params(
+ params={}, extracted_parameters=cache_key[1]
+ ),
+ {"myid_1": 5},
+ )
+
+ # now make a totally new statement with the same cache key
+ new_stmt = select([table1.c.myid]).where(table1.c.myid == 10)
+ new_cache_key = new_stmt._generate_cache_key()
+
+ # cache keys match
+ eq_(cache_key.key, new_cache_key.key)
+
+ # ensure we get "10" from construct params. if it matched
+ # based on .key and not ._identifying_key, it would not see that
+ # the bind parameter is part of the cache key.
+ eq_(
+ compiled.construct_params(
+ params={}, extracted_parameters=new_cache_key[1]
+ ),
+ {"myid_1": 10},
+ )
+
+ def test_construct_params_w_bind_clones_pre(self):
+ """test that a BindParameter that has been cloned before the cache
+ key was generated, and was doubled up just to make sure it has to
+ be unique, still matches up when construct_params()
+ is called with an extracted parameter collection.
+
+ other ORM feaures like optimized_compare() end up doing something
+ like this, such as if there are multiple "has()" or "any()" which would
+ have cloned the join condition and changed the values of bound
+ parameters.
+
+ """
+
+ stmt = select([table1.c.myid]).where(table1.c.myid == 5)
+
+ original_bind = stmt._where_criteria[0].right
+ # it's anonymous so unique=True
+ is_true(original_bind.unique)
+
+ b1 = original_bind._clone()
+ b1.value = 10
+ b2 = original_bind._clone()
+ b2.value = 12
+
+ # make a new statement that uses the clones as distinct
+ # parameters
+ modified_stmt = select([table1.c.myid]).where(
+ or_(table1.c.myid == b1, table1.c.myid == b2)
+ )
+
+ cache_key = modified_stmt._generate_cache_key()
+ compiled = modified_stmt.compile(cache_key=cache_key)
+
+ eq_(
+ compiled.construct_params(params={}), {"myid_1": 10, "myid_2": 12},
+ )
+
+ # make a new statement doing the same thing and make sure
+ # the binds match up correctly
+ new_stmt = select([table1.c.myid]).where(table1.c.myid == 8)
+
+ new_original_bind = new_stmt._where_criteria[0].right
+ new_b1 = new_original_bind._clone()
+ new_b1.value = 20
+ new_b2 = new_original_bind._clone()
+ new_b2.value = 18
+ modified_new_stmt = select([table1.c.myid]).where(
+ or_(table1.c.myid == new_b1, table1.c.myid == new_b2)
+ )
+
+ new_cache_key = modified_new_stmt._generate_cache_key()
+
+ # cache keys match
+ eq_(cache_key.key, new_cache_key.key)
+
+ # ensure we get both values
+ eq_(
+ compiled.construct_params(
+ params={}, extracted_parameters=new_cache_key[1]
+ ),
+ {"myid_1": 20, "myid_2": 18},
+ )
+
def test_tuple_expanding_in_no_values(self):
expr = tuple_(table1.c.myid, table1.c.name).in_(
[(1, "foo"), (5, "bar")]
def test_operate(self, operator, right):
left = column("left")
+ if operators.is_comparison(operator):
+ type_ = sqltypes.BOOLEANTYPE
+ else:
+ type_ = sqltypes.NULLTYPE
+
assert left.comparator.operate(operator, right).compare(
BinaryExpression(
coercions.expect(roles.WhereHavingRole, left),
coercions.expect(roles.WhereHavingRole, right),
operator,
+ type_=type_,
)
)
coercions.expect(roles.WhereHavingRole, right),
operator,
modifiers=modifiers,
+ type_=type_,
)
)
"left", value=[1, 2, 3], unique=True, expanding=True
),
operators.in_op,
+ type_=sqltypes.BOOLEANTYPE,
)
)
self._loop_test(operators.in_op, [1, 2, 3])
"left", value=[1, 2, 3], unique=True, expanding=True
),
operators.notin_op,
+ type_=sqltypes.BOOLEANTYPE,
)
)
self._loop_test(operators.notin_op, [1, 2, 3])
def test_update_literal_binds(self):
table1 = self.tables.mytable
+ stmt = (
+ table1.update().values(name="jack").where(table1.c.name == "jill")
+ )
+
+ self.assert_compile(
+ stmt,
+ "UPDATE mytable SET name='jack' WHERE mytable.name = 'jill'",
+ literal_binds=True,
+ )
+
+ def test_update_custom_key_thing(self):
table1 = self.tables.mytable
+ class Thing(object):
+ def __clause_element__(self):
+ return table1.c.name
+
stmt = (
- table1.update().values(name="jack").where(table1.c.name == "jill")
+ table1.update()
+ .values({Thing(): "jack"})
+ .where(table1.c.name == "jill")
+ )
+
+ self.assert_compile(
+ stmt,
+ "UPDATE mytable SET name='jack' WHERE mytable.name = 'jill'",
+ literal_binds=True,
+ )
+
+ def test_update_ordered_custom_key_thing(self):
+ table1 = self.tables.mytable
+
+ class Thing(object):
+ def __clause_element__(self):
+ return table1.c.name
+
+ stmt = (
+ table1.update()
+ .ordered_values((Thing(), "jack"))
+ .where(table1.c.name == "jill")
)
self.assert_compile(
literal_binds=True,
)
+ def test_update_broken_custom_key_thing(self):
+ table1 = self.tables.mytable
+
+ class Thing(object):
+ def __clause_element__(self):
+ return 5
+
+ assert_raises_message(
+ exc.ArgumentError,
+ "SET/VALUES column expression or string key expected, got .*Thing",
+ table1.update().values,
+ {Thing(): "jack"},
+ )
+
+ def test_update_ordered_broken_custom_key_thing(self):
+ table1 = self.tables.mytable
+
+ class Thing(object):
+ def __clause_element__(self):
+ return 5
+
+ assert_raises_message(
+ exc.ArgumentError,
+ "SET/VALUES column expression or string key expected, got .*Thing",
+ table1.update().ordered_values,
+ (Thing(), "jack"),
+ )
+
def test_correlated_update_one(self):
table1 = self.tables.mytable