to work for subclasses, if they are present, for
example::
- sess.query(Company).options(
- eagerload_all(
- ))
+ sess.query(Company).options(eagerload_all())
to load Company objects, their employees, and the
'machines' collection of employees who happen to be
del_ = delete(SomeMappedClass).where(SomeMappedClass.id == 5)
- upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name='ed')
+ upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name="ed")
.. change::
:tags: bug, orm
to the original, older use case for :meth:`_query.Query.select_from`, which is that
of restating the mapped entity in terms of a different selectable::
- session.query(User.name).\
- select_from(user_table.select().where(user_table.c.id > 5))
+ session.query(User.name).select_from(user_table.select().where(user_table.c.id > 5))
Which produces::
original. Allows symmetry when using :class:`_engine.Engine` and
:class:`_engine.Connection` objects as context managers::
- with conn.connect() as c: # leaves the Connection open
- c.execute("...")
+ with conn.connect() as c: # leaves the Connection open
+ c.execute("...")
with engine.connect() as c: # closes the Connection
- c.execute("...")
+ c.execute("...")
.. change::
:tags: engine
ad-hoc keyword arguments within the :attr:`.Index.kwargs` collection,
after construction::
- idx = Index('a', 'b')
- idx.kwargs['mysql_someargument'] = True
+ idx = Index("a", "b")
+ idx.kwargs["mysql_someargument"] = True
To suit the use case of allowing custom arguments at construction time,
the :meth:`.DialectKWArgs.argument_for` method now allows this registration::
- Index.argument_for('mysql', 'someargument', False)
+ Index.argument_for("mysql", "someargument", False)
- idx = Index('a', 'b', mysql_someargument=True)
+ idx = Index("a", "b", mysql_someargument=True)
.. seealso::
::
- myengine = create_engine('sqlite://')
+ myengine = create_engine("sqlite://")
meta = MetaData(myengine)
from sqlalchemy import *
+
class UTCDateTime(types.TypeDecorator):
pass
from sqlalchemy import *
from sqlalchemy import types
+
class UTCDateTime(types.TypeDecorator):
pass
::
- session.query(User).filter(and_(User.name == 'fred', User.id > 17))
+ session.query(User).filter(and_(User.name == "fred", User.id > 17))
While simple column-based comparisons are no big deal, the
class attributes have some new "higher level" constructs
# return all users who contain a particular address with
# the email_address like '%foo%'
- filter(User.addresses.any(Address.email_address.like('%foo%')))
+ filter(User.addresses.any(Address.email_address.like("%foo%")))
# same, email address equals 'foo@bar.com'. can fall back to keyword
# args for simple comparisons
- filter(User.addresses.any(email_address = 'foo@bar.com'))
+ filter(User.addresses.any(email_address="foo@bar.com"))
# return all Addresses whose user attribute has the username 'ed'
- filter(Address.user.has(name='ed'))
+ filter(Address.user.has(name="ed"))
# return all Addresses whose user attribute has the username 'ed'
# and an id > 5 (mixing clauses with kwargs)
- filter(Address.user.has(User.id > 5, name='ed'))
+ filter(Address.user.has(User.id > 5, name="ed"))
The ``Column`` collection remains available on mapped
classes in the ``.c`` attribute. Note that property-based
::
# standard self-referential TreeNode mapper with backref
- mapper(TreeNode, tree_nodes, properties={
- 'children':relation(TreeNode, backref=backref('parent', remote_side=tree_nodes.id))
- })
+ mapper(
+ TreeNode,
+ tree_nodes,
+ properties={
+ "children": relation(
+ TreeNode, backref=backref("parent", remote_side=tree_nodes.id)
+ )
+ },
+ )
# query for node with child containing "bar" two levels deep
- session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(name='bar')
+ session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(
+ name="bar"
+ )
To add criterion for each table along the way in an aliased
join, you can use ``from_joinpoint`` to keep joining against
# search for the treenode along the path "n1/n12/n122"
# first find a Node with name="n122"
- q = sess.query(Node).filter_by(name='n122')
+ q = sess.query(Node).filter_by(name="n122")
# then join to parent with "n12"
- q = q.join('parent', aliased=True).filter_by(name='n12')
+ q = q.join("parent", aliased=True).filter_by(name="n12")
# join again to the next parent with 'n1'. use 'from_joinpoint'
# so we join from the previous point, instead of joining off the
# root table
- q = q.join('parent', aliased=True, from_joinpoint=True).filter_by(name='n1')
+ q = q.join("parent", aliased=True, from_joinpoint=True).filter_by(name="n1")
node = q.first()
::
- nodes = Table('nodes', metadata,
- Column('id', Integer, primary_key=True),
- Column('parent_id', Integer, ForeignKey('nodes.id')),
- Column('name', String(30)))
+ nodes = Table(
+ "nodes",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("parent_id", Integer, ForeignKey("nodes.id")),
+ Column("name", String(30)),
+ )
+
class TreeNode(object):
pass
- mapper(TreeNode, nodes, properties={
- 'children':relation(TreeNode, lazy=False, join_depth=3)
- })
+
+ mapper(
+ TreeNode,
+ nodes,
+ properties={"children": relation(TreeNode, lazy=False, join_depth=3)},
+ )
So what happens when we say:
def __init__(self, x, y):
self.x = x
self.y = y
+
def __composite_values__(self):
return self.x, self.y
+
def __eq__(self, other):
return other.x == self.x and other.y == self.y
+
def __ne__(self, other):
return not self.__eq__(other)
::
- vertices = Table('vertices', metadata,
- Column('id', Integer, primary_key=True),
- Column('x1', Integer),
- Column('y1', Integer),
- Column('x2', Integer),
- Column('y2', Integer),
- )
+ vertices = Table(
+ "vertices",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("x1", Integer),
+ Column("y1", Integer),
+ Column("x2", Integer),
+ Column("y2", Integer),
+ )
Then, map it ! We'll create a ``Vertex`` object which
stores two ``Point`` objects:
self.start = start
self.end = end
- mapper(Vertex, vertices, properties={
- 'start':composite(Point, vertices.c.x1, vertices.c.y1),
- 'end':composite(Point, vertices.c.x2, vertices.c.y2)
- })
+
+ mapper(
+ Vertex,
+ vertices,
+ properties={
+ "start": composite(Point, vertices.c.x1, vertices.c.y1),
+ "end": composite(Point, vertices.c.x2, vertices.c.y2),
+ },
+ )
Once you've set up your composite type, it's usable just
like any other type:
::
- v = Vertex(Point(3, 4), Point(26,15))
+ v = Vertex(Point(3, 4), Point(26, 15))
session.save(v)
session.flush()
# a Document class which uses a composite Version
# object as primary key
- document = query.get(Version(1, 'a'))
+ document = query.get(Version(1, "a"))
``dynamic_loader()`` relations
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
- mapper(Foo, foo_table, properties={
- 'bar':relation(Bar)
- })
- mapper(Bar, bar_table, properties={
- 'bat':relation(Bat)
- })
+ mapper(Foo, foo_table, properties={"bar": relation(Bar)})
+ mapper(Bar, bar_table, properties={"bat": relation(Bat)})
mapper(Bat, bat_table)
# eager load bar and bat
- session.query(Foo).options(eagerload_all('bar.bat')).filter(...).all()
+ session.query(Foo).options(eagerload_all("bar.bat")).filter(...).all()
New Collection API
^^^^^^^^^^^^^^^^^^
# use a dictionary relation keyed by a column
relation(Item, collection_class=column_mapped_collection(items.c.keyword))
# or named attribute
- relation(Item, collection_class=attribute_mapped_collection('keyword'))
+ relation(Item, collection_class=attribute_mapped_collection("keyword"))
# or any function you like
relation(Item, collection_class=mapped_collection(lambda entity: entity.a + entity.b))
::
- mapper(User, users, properties={
- 'fullname': column_property((users.c.firstname + users.c.lastname).label('fullname')),
- 'numposts': column_property(
- select([func.count(1)], users.c.id==posts.c.user_id).correlate(users).label('posts')
- )
- })
+ mapper(
+ User,
+ users,
+ properties={
+ "fullname": column_property(
+ (users.c.firstname + users.c.lastname).label("fullname")
+ ),
+ "numposts": column_property(
+ select([func.count(1)], users.c.id == posts.c.user_id)
+ .correlate(users)
+ .label("posts")
+ ),
+ },
+ )
a typical query looks like:
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
- engine = create_engine('myengine://')
+ engine = create_engine("myengine://")
Session = sessionmaker(bind=engine, autoflush=True, transactional=True)
# use the new Session() freely
sess.save(someobject)
sess.flush()
-
If you need to post-configure your Session, say with an
engine, add it later with ``configure()``:
Session = scoped_session(sessionmaker(autoflush=True, transactional=True))
Session.configure(bind=engine)
- u = User(name='wendy')
+ u = User(name="wendy")
sess = Session()
sess.save(u)
sess2 = Session()
assert sess is sess2
-
When using a thread-local ``Session``, the returned class
has all of ``Session's`` interface implemented as
classmethods, and "assignmapper"'s functionality is
# "assignmapper"-like functionality available via ScopedSession.mapper
Session.mapper(User, users_table)
- u = User(name='wendy')
+ u = User(name="wendy")
Session.commit()
-
Sessions are again Weak Referencing By Default
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Session = sessionmaker(bind=engine, autoflush=True, transactional=True)
- u = User(name='wendy')
+ u = User(name="wendy")
sess = Session()
sess.save(u)
# wendy is flushed, comes right back from a query
- wendy = sess.query(User).filter_by(name='wendy').one()
+ wendy = sess.query(User).filter_by(name="wendy").one()
Transactional methods moved onto sessions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# use the session
- sess.commit() # commit transaction
+ sess.commit() # commit transaction
Sharing a ``Session`` with an enclosing engine-level (i.e.
non-ORM) transaction is easy:
::
- b = bindparam('foo', type_=String)
+ b = bindparam("foo", type_=String)
in\_ Function Changed to Accept Sequence or Selectable
------------------------------------------------------
::
- result = engine.execute(text("begin foo(:x, :y, :z); end;", bindparams=[bindparam('x', Numeric), outparam('y', Numeric), outparam('z', Numeric)]), x=5)
- assert result.out_parameters == {'y':10, 'z':75}
+ result = engine.execute(
+ text(
+ "begin foo(:x, :y, :z); end;",
+ bindparams=[
+ bindparam("x", Numeric),
+ outparam("y", Numeric),
+ outparam("z", Numeric),
+ ],
+ ),
+ x=5,
+ )
+ assert result.out_parameters == {"y": 10, "z": 75}
Connection-bound ``MetaData``, ``Sessions``
-------------------------------------------
::
- session.query(User.name, func.count(Address.id).label("numaddresses")).join(Address).group_by(User.name)
+ session.query(User.name, func.count(Address.id).label("numaddresses")).join(
+ Address
+ ).group_by(User.name)
The tuples returned by any multi-column/entity query are
*named*' tuples:
::
- for row in session.query(User.name, func.count(Address.id).label('numaddresses')).join(Address).group_by(User.name):
- print("name", row.name, "number", row.numaddresses)
+ for row in (
+ session.query(User.name, func.count(Address.id).label("numaddresses"))
+ .join(Address)
+ .group_by(User.name)
+ ):
+ print("name", row.name, "number", row.numaddresses)
``Query`` has a ``statement`` accessor, as well as a
``subquery()`` method which allow ``Query`` to be used to
::
- subq = session.query(Keyword.id.label('keyword_id')).filter(Keyword.name.in_(['beans', 'carrots'])).subquery()
- recipes = session.query(Recipe).filter(exists().
- where(Recipe.id==recipe_keywords.c.recipe_id).
- where(recipe_keywords.c.keyword_id==subq.c.keyword_id)
+ subq = (
+ session.query(Keyword.id.label("keyword_id"))
+ .filter(Keyword.name.in_(["beans", "carrots"]))
+ .subquery()
+ )
+ recipes = session.query(Recipe).filter(
+ exists()
+ .where(Recipe.id == recipe_keywords.c.recipe_id)
+ .where(recipe_keywords.c.keyword_id == subq.c.keyword_id)
)
* **Explicit ORM aliases are recommended for aliased joins**
::
- mapper(User, users, properties={
- 'addresses':relation(Address, order_by=addresses.c.id)
- }, order_by=users.c.id)
+ mapper(
+ User,
+ users,
+ properties={"addresses": relation(Address, order_by=addresses.c.id)},
+ order_by=users.c.id,
+ )
To set ordering on a backref, use the ``backref()``
function:
::
- 'keywords':relation(Keyword, secondary=item_keywords,
- order_by=keywords.c.name, backref=backref('items', order_by=items.c.id))
+ "keywords": relation(
+ Keyword,
+ secondary=item_keywords,
+ order_by=keywords.c.name,
+ backref=backref("items", order_by=items.c.id),
+ )
Using declarative ? To help with the new ``order_by``
requirement, ``order_by`` and friends can now be set using
class MyClass(MyDeclarativeBase):
...
- 'addresses':relation("Address", order_by="Address.id")
+ "addresses": relation("Address", order_by="Address.id")
It's generally a good idea to set ``order_by`` on
``relation()s`` which load list-based collections of
convert_result_value methods
"""
+
def bind_processor(self, dialect):
def convert(value):
return self.convert_bind_param(value, dialect)
+
return convert
def result_processor(self, dialect):
def convert(value):
return self.convert_result_value(value, dialect)
+
return convert
def convert_result_value(self, value, dialect):
dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125) # 125 usec
# old way
- '2008-06-27 12:00:00.125'
+ "2008-06-27 12:00:00.125"
# new way
- '2008-06-27 12:00:00.000125'
+ "2008-06-27 12:00:00.000125"
So if an existing SQLite file-based database intends to be
used across 0.4 and 0.5, you either have to upgrade the
::
from sqlalchemy.databases.sqlite import DateTimeMixin
+
DateTimeMixin.__legacy_microseconds__ = True
Connection Pool no longer threadlocal by default
::
- query.join('orders', 'items')
+ query.join("orders", "items")
query.join(User.orders, Order.items)
* the ``in_()`` method on columns and similar only accepts a
::
from sqlalchemy.orm import aliased
+
address_alias = aliased(Address)
print(session.query(User, address_alias).join((address_alias, User.addresses)).all())
::
- create_engine('postgresql://scott:tiger@localhost/test')
+ create_engine("postgresql://scott:tiger@localhost/test")
However to specify a specific DBAPI backend such as pg8000,
add it to the "protocol" section of the URL using a plus
::
- create_engine('postgresql+pg8000://scott:tiger@localhost/test')
+ create_engine("postgresql+pg8000://scott:tiger@localhost/test")
Important Dialect Links:
::
- from sqlalchemy.dialects.postgresql import INTEGER, BIGINT, SMALLINT,\
- VARCHAR, MACADDR, DATE, BYTEA
+ from sqlalchemy.dialects.postgresql import (
+ INTEGER,
+ BIGINT,
+ SMALLINT,
+ VARCHAR,
+ MACADDR,
+ DATE,
+ BYTEA,
+ )
Above, ``INTEGER`` is actually the plain ``INTEGER`` type
from ``sqlalchemy.types``, but the PG dialect makes it
::
>>> from sqlalchemy.sql import column
- >>> column('foo') == 5
+ >>> column("foo") == 5
<sqlalchemy.sql.expression._BinaryExpression object at 0x1252490>
This so that Python expressions produce SQL expressions when
::
- >>> str(column('foo') == 5)
+ >>> str(column("foo") == 5)
'foo = :foo_1'
But what happens if we say this?
::
- >>> if column('foo') == 5:
+ >>> if column("foo") == 5:
... print("yes")
- ...
In previous versions of SQLAlchemy, the returned
``_BinaryExpression`` was a plain Python object which
::
- >>> bool(column('foo') == 5)
+ >>> bool(column("foo") == 5)
False
- >>> bool(column('foo') == column('foo'))
+ >>> bool(column("foo") == column("foo"))
False
- >>> c = column('foo')
+ >>> c = column("foo")
>>> bool(c == c)
True
>>>
::
- connection.execute(table.insert(), {'data':'row1'}, {'data':'row2'}, {'data':'row3'})
+ connection.execute(table.insert(), {"data": "row1"}, {"data": "row2"}, {"data": "row3"})
When the ``Connection`` object sends off the given
``insert()`` construct for compilation, it passes to the
::
- connection.execute(table.insert(),
- {'timestamp':today, 'data':'row1'},
- {'timestamp':today, 'data':'row2'},
- {'data':'row3'})
+ connection.execute(
+ table.insert(),
+ {"timestamp": today, "data": "row1"},
+ {"timestamp": today, "data": "row2"},
+ {"data": "row3"},
+ )
Because the third row does not specify the 'timestamp'
column. Previous versions of SQLAlchemy would simply insert
from sqlalchemy.schema import DDL
- DDL('CREATE TRIGGER users_trigger ...').execute_at('after-create', metadata)
+ DDL("CREATE TRIGGER users_trigger ...").execute_at("after-create", metadata)
Now the full suite of DDL constructs are available under the
same system, including those for CREATE TABLE, ADD
from sqlalchemy.schema import Constraint, AddConstraint
- AddContraint(CheckConstraint("value > 5")).execute_at('after-create', mytable)
+ AddContraint(CheckConstraint("value > 5")).execute_at("after-create", mytable)
Additionally, all the DDL objects are now regular
``ClauseElement`` objects just like any other SQLAlchemy
from sqlalchemy.schema import DDLElement
from sqlalchemy.ext.compiler import compiles
- class AlterColumn(DDLElement):
+ class AlterColumn(DDLElement):
def __init__(self, column, cmd):
self.column = column
self.cmd = cmd
+
@compiles(AlterColumn)
def visit_alter_column(element, compiler, **kw):
return "ALTER TABLE %s ALTER COLUMN %s %s ..." % (
element.column.table.name,
element.column.name,
- element.cmd
+ element.cmd,
)
+
engine.execute(AlterColumn(table.c.mycolumn, "SET DEFAULT 'test'"))
Deprecated/Removed Schema Elements
::
from sqlalchemy.engine.reflection import Inspector
+
insp = Inspector.from_engine(my_engine)
print(insp.get_schema_names())
::
- my_engine = create_engine('postgresql://...')
+ my_engine = create_engine("postgresql://...")
pg_insp = Inspector.from_engine(my_engine)
- print(pg_insp.get_table_oid('my_table'))
+ print(pg_insp.get_table_oid("my_table"))
RETURNING Support
=================
result = connection.execute(
- table.insert().values(data='some data').returning(table.c.id, table.c.timestamp)
- )
+ table.insert().values(data="some data").returning(table.c.id, table.c.timestamp)
+ )
row = result.first()
- print("ID:", row['id'], "Timestamp:", row['timestamp'])
+ print("ID:", row["id"], "Timestamp:", row["timestamp"])
The implementation of RETURNING across the four supported
backends varies wildly, in the case of Oracle requiring an
def process_result_value(self, value, dialect):
if isinstance(value, unicode):
- value = value.encode('utf-8')
+ value = value.encode("utf-8")
return value
Note that the ``assert_unicode`` flag is now deprecated.
::
mapper(Child, child)
- mapper(Parent, parent, properties={
- 'child':relationship(Child, lazy='joined', innerjoin=True)
- })
+ mapper(
+ Parent,
+ parent,
+ properties={"child": relationship(Child, lazy="joined", innerjoin=True)},
+ )
At query time level:
::
- query.join(SomeClass, SomeClass.id==ParentClass.some_id)
+ query.join(SomeClass, SomeClass.id == ParentClass.some_id)
In 0.6, this usage was considered to be an error, because
``join()`` accepts multiple arguments corresponding to
::
- Table('mytable', metadata,
- Column('id',Integer, primary_key=True),
- Column('name', String(50), nullable=False),
- Index('idx_name', 'name')
+ Table(
+ "mytable",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50), nullable=False),
+ Index("idx_name", "name"),
)
The primary rationale here is for the benefit of declarative
::
class HasNameMixin(object):
- name = Column('name', String(50), nullable=False)
+ name = Column("name", String(50), nullable=False)
+
@declared_attr
def __table_args__(cls):
- return (Index('name'), {})
+ return (Index("name"), {})
+
class User(HasNameMixin, Base):
- __tablename__ = 'user'
- id = Column('id', Integer, primary_key=True)
+ __tablename__ = "user"
+ id = Column("id", Integer, primary_key=True)
`Indexes <https://www.sqlalchemy.org/docs/07/core/schema.html
#indexes>`_
from sqlalchemy.sql import table, column, select, func
- empsalary = table('empsalary',
- column('depname'),
- column('empno'),
- column('salary'))
+ empsalary = table("empsalary", column("depname"), column("empno"), column("salary"))
- s = select([
+ s = select(
+ [
empsalary,
- func.avg(empsalary.c.salary).
- over(partition_by=empsalary.c.depname).
- label('avg')
- ])
+ func.avg(empsalary.c.salary)
+ .over(partition_by=empsalary.c.depname)
+ .label("avg"),
+ ]
+ )
print(s)
::
- query.from_self(func.count(literal_column('1'))).scalar()
+ query.from_self(func.count(literal_column("1"))).scalar()
Previously, internal logic attempted to rewrite the columns
clause of the query itself, and upon detection of a
::
from sqlalchemy import func
+
session.query(func.count(MyClass.id)).scalar()
or for ``count(*)``:
::
from sqlalchemy import func, literal_column
- session.query(func.count(literal_column('*'))).select_from(MyClass).scalar()
+
+ session.query(func.count(literal_column("*"))).select_from(MyClass).scalar()
LIMIT/OFFSET clauses now use bind parameters
--------------------------------------------
from sqlalchemy import select, func
from sqlalchemy.orm import mapper
+
class Subset(object):
pass
+
+
selectable = select(["x", "y", "z"]).select_from(func.some_db_function()).alias()
mapper(Subset, selectable, primary_key=[selectable.c.x])
::
- Table('mytable', metadata,
+ Table(
+ "mytable",
+ metadata,
# ....
-
- Column('pickled_data', PickleType(mutable=True))
+ Column("pickled_data", PickleType(mutable=True)),
)
The ``mutable=True`` flag is being phased out, in favor of
::
- foobar = foo.join(bar, foo.c.id==bar.c.foo_id)
+ foobar = foo.join(bar, foo.c.id == bar.c.foo_id)
mapper(FooBar, foobar)
This because the ``mapper()`` refuses to guess what column
::
- foobar = foo.join(bar, foo.c.id==bar.c.foo_id)
- mapper(FooBar, foobar, properties={
- 'id':[foo.c.id, bar.c.id]
- })
+ foobar = foo.join(bar, foo.c.id == bar.c.foo_id)
+ mapper(FooBar, foobar, properties={"id": [foo.c.id, bar.c.id]})
:ticket:`1896`
::
- select([mytable], distinct='ALL', prefixes=['HIGH_PRIORITY'])
+ select([mytable], distinct="ALL", prefixes=["HIGH_PRIORITY"])
The ``prefixes`` keyword or ``prefix_with()`` method should
be used for non-standard or unusual prefixes:
::
- select([mytable]).prefix_with('HIGH_PRIORITY', 'ALL')
+ select([mytable]).prefix_with("HIGH_PRIORITY", "ALL")
``useexisting`` superseded by ``extend_existing`` and ``keep_existing``
-----------------------------------------------------------------------
class Parent(Base):
- __tablename__ = 'parent'
+ __tablename__ = "parent"
id = Column(Integer, primary_key=True)
- child_id_one = Column(Integer, ForeignKey('child.id'))
- child_id_two = Column(Integer, ForeignKey('child.id'))
+ child_id_one = Column(Integer, ForeignKey("child.id"))
+ child_id_two = Column(Integer, ForeignKey("child.id"))
child_one = relationship("Child", foreign_keys=child_id_one)
child_two = relationship("Child", foreign_keys=child_id_two)
+
class Child(Base):
- __tablename__ = 'child'
+ __tablename__ = "child"
id = Column(Integer, primary_key=True)
* relationships against self-referential, composite foreign
::
class Folder(Base):
- __tablename__ = 'folder'
+ __tablename__ = "folder"
__table_args__ = (
- ForeignKeyConstraint(
- ['account_id', 'parent_id'],
- ['folder.account_id', 'folder.folder_id']),
+ ForeignKeyConstraint(
+ ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"]
+ ),
)
account_id = Column(Integer, primary_key=True)
parent_id = Column(Integer)
name = Column(String)
- parent_folder = relationship("Folder",
- backref="child_folders",
- remote_side=[account_id, folder_id]
- )
+ parent_folder = relationship(
+ "Folder", backref="child_folders", remote_side=[account_id, folder_id]
+ )
Above, the ``Folder`` refers to its parent ``Folder``
joining from ``account_id`` to itself, and ``parent_id``
expected in most cases::
class HostEntry(Base):
- __tablename__ = 'host_entry'
+ __tablename__ = "host_entry"
id = Column(Integer, primary_key=True)
ip_address = Column(INET)
content = Column(String(50))
# relationship() using explicit foreign_keys, remote_side
- parent_host = relationship("HostEntry",
- primaryjoin=ip_address == cast(content, INET),
- foreign_keys=content,
- remote_side=ip_address
- )
+ parent_host = relationship(
+ "HostEntry",
+ primaryjoin=ip_address == cast(content, INET),
+ foreign_keys=content,
+ remote_side=ip_address,
+ )
The new :func:`_orm.relationship` mechanics make use of a
SQLAlchemy concept known as :term:`annotations`. These annotations
from sqlalchemy.orm import foreign, remote
+
class HostEntry(Base):
- __tablename__ = 'host_entry'
+ __tablename__ = "host_entry"
id = Column(Integer, primary_key=True)
ip_address = Column(INET)
# relationship() using explicit foreign() and remote() annotations
# in lieu of separate arguments
- parent_host = relationship("HostEntry",
- primaryjoin=remote(ip_address) == \
- cast(foreign(content), INET),
- )
-
+ parent_host = relationship(
+ "HostEntry",
+ primaryjoin=remote(ip_address) == cast(foreign(content), INET),
+ )
.. seealso::
A walkthrough of some key capabilities follows::
>>> class User(Base):
- ... __tablename__ = 'user'
+ ... __tablename__ = "user"
... id = Column(Integer, primary_key=True)
... name = Column(String)
... name_syn = synonym(name)
... addresses = relationship("Address")
- ...
>>> # universal entry point is inspect()
>>> b = inspect(User)
"user".id = address.user_id
>>> # inspect works on instances
- >>> u1 = User(id=3, name='x')
+ >>> u1 = User(id=3, name="x")
>>> b = inspect(u1)
>>> # it returns the InstanceState
::
from sqlalchemy.orm import with_polymorphic
+
palias = with_polymorphic(Person, [Engineer, Manager])
- session.query(Company).\
- join(palias, Company.employees).\
- filter(or_(Engineer.language=='java', Manager.hair=='pointy'))
+ session.query(Company).join(palias, Company.employees).filter(
+ or_(Engineer.language == "java", Manager.hair == "pointy")
+ )
.. seealso::
# use eager loading in conjunction with with_polymorphic targets
Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True)
- q = s.query(DataContainer).\
- join(DataContainer.jobs.of_type(Job_P)).\
- options(contains_eager(DataContainer.jobs.of_type(Job_P)))
+ q = (
+ s.query(DataContainer)
+ .join(DataContainer.jobs.of_type(Job_P))
+ .options(contains_eager(DataContainer.jobs.of_type(Job_P)))
+ )
The method now works equally well in most places a regular relationship
attribute is accepted, including with loader functions like
# use eager loading in conjunction with with_polymorphic targets
Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True)
- q = s.query(DataContainer).\
- join(DataContainer.jobs.of_type(Job_P)).\
- options(contains_eager(DataContainer.jobs.of_type(Job_P)))
+ q = (
+ s.query(DataContainer)
+ .join(DataContainer.jobs.of_type(Job_P))
+ .options(contains_eager(DataContainer.jobs.of_type(Job_P)))
+ )
# pass subclasses to eager loads (implicitly applies with_polymorphic)
- q = s.query(ParentThing).\
- options(
- joinedload_all(
- ParentThing.container,
- DataContainer.jobs.of_type(SubJob)
- ))
+ q = s.query(ParentThing).options(
+ joinedload_all(ParentThing.container, DataContainer.jobs.of_type(SubJob))
+ )
# control self-referential aliasing with any()/has()
Job_A = aliased(Job)
- q = s.query(Job).join(DataContainer.jobs).\
- filter(
- DataContainer.jobs.of_type(Job_A).\
- any(and_(Job_A.id < Job.id, Job_A.type=='fred')
- )
- )
+ q = (
+ s.query(Job)
+ .join(DataContainer.jobs)
+ .filter(
+ DataContainer.jobs.of_type(Job_A).any(
+ and_(Job_A.id < Job.id, Job_A.type == "fred")
+ )
+ )
+ )
.. seealso::
Base = declarative_base()
+
@event.listens_for("load", Base, propagate=True)
def on_load(target, context):
print("New instance loaded:", target)
+
# on_load() will be applied to SomeClass
class SomeClass(Base):
- __tablename__ = 'sometable'
+ __tablename__ = "sometable"
# ...
class Snack(Base):
# ...
- peanuts = relationship("nuts.Peanut",
- primaryjoin="nuts.Peanut.snack_id == Snack.id")
+ peanuts = relationship(
+ "nuts.Peanut", primaryjoin="nuts.Peanut.snack_id == Snack.id"
+ )
The resolution allows that any full or partial
disambiguating package name can be used. If the
class ReflectedOne(DeferredReflection, Base):
__abstract__ = True
+
class ReflectedTwo(DeferredReflection, Base):
__abstract__ = True
+
class MyClass(ReflectedOne):
- __tablename__ = 'mytable'
+ __tablename__ = "mytable"
+
class MyOtherClass(ReflectedOne):
- __tablename__ = 'myothertable'
+ __tablename__ = "myothertable"
+
class YetAnotherClass(ReflectedTwo):
- __tablename__ = 'yetanothertable'
+ __tablename__ = "yetanothertable"
+
ReflectedOne.prepare(engine_one)
ReflectedTwo.prepare(engine_two)
a FROM clause (or equivalent, depending on backend)
against ``SomeOtherEntity``::
- query(SomeEntity).\
- filter(SomeEntity.id==SomeOtherEntity.id).\
- filter(SomeOtherEntity.foo=='bar').\
- update({"data":"x"})
+ query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter(
+ SomeOtherEntity.foo == "bar"
+ ).update({"data": "x"})
In particular, updates to joined-inheritance
entities are supported, provided the target of the UPDATE is local to the
::
- query(Engineer).\
- filter(Person.id==Engineer.id).\
- filter(Person.name=='dilbert').\
- update({"engineer_data":"java"})
+ query(Engineer).filter(Person.id == Engineer.id).filter(
+ Person.name == "dilbert"
+ ).update({"engineer_data": "java"})
would produce:
from sqlalchemy.types import Numeric
from sqlalchemy.sql import func
+
class CustomNumeric(Numeric):
class comparator_factory(Numeric.Comparator):
def log(self, other):
::
- data = Table('data', metadata,
- Column('id', Integer, primary_key=True),
- Column('x', CustomNumeric(10, 5)),
- Column('y', CustomNumeric(10, 5))
- )
+ data = Table(
+ "data",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("x", CustomNumeric(10, 5)),
+ Column("y", CustomNumeric(10, 5)),
+ )
stmt = select([data.c.x.log(data.c.y)]).where(data.c.x.log(2) < value)
print(conn.execute(stmt).fetchall())
-
New features which have come from this immediately include
support for PostgreSQL's HSTORE type, as well as new
operations associated with PostgreSQL's ARRAY
not the same thing as the usual ``executemany()`` style of INSERT which
remains unchanged::
- users.insert().values([
- {"name": "some name"},
- {"name": "some other name"},
- {"name": "yet another name"},
- ])
+ users.insert().values(
+ [
+ {"name": "some name"},
+ {"name": "some other name"},
+ {"name": "yet another name"},
+ ]
+ )
.. seealso::
from sqlalchemy.types import String
from sqlalchemy import func, Table, Column, MetaData
+
class LowerString(String):
def bind_expression(self, bindvalue):
return func.lower(bindvalue)
def column_expression(self, col):
return func.lower(col)
+
metadata = MetaData()
- test_table = Table(
- 'test_table',
- metadata,
- Column('data', LowerString)
- )
+ test_table = Table("test_table", metadata, Column("data", LowerString))
Above, the ``LowerString`` type defines a SQL expression that will be emitted
whenever the ``test_table.c.data`` column is rendered in the columns
clause of a SELECT statement::
- >>> print(select([test_table]).where(test_table.c.data == 'HI'))
+ >>> print(select([test_table]).where(test_table.c.data == "HI"))
SELECT lower(test_table.data) AS data
FROM test_table
WHERE test_table.data = lower(:data_1)
signatures = relationship("Signature", lazy=False)
+
class Signature(Base):
__tablename__ = "signature"
id = Column(Integer, primary_key=True)
sig_count = column_property(
- select([func.count('*')]).\
- where(SnortEvent.signature == id).
- correlate_except(SnortEvent)
- )
+ select([func.count("*")])
+ .where(SnortEvent.signature == id)
+ .correlate_except(SnortEvent)
+ )
.. seealso::
from sqlalchemy.dialects.postgresql import HSTORE
- data = Table('data_table', metadata,
- Column('id', Integer, primary_key=True),
- Column('hstore_data', HSTORE)
- )
-
- engine.execute(
- select([data.c.hstore_data['some_key']])
- ).scalar()
+ data = Table(
+ "data_table",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("hstore_data", HSTORE),
+ )
- engine.execute(
- select([data.c.hstore_data.matrix()])
- ).scalar()
+ engine.execute(select([data.c.hstore_data["some_key"]])).scalar()
+ engine.execute(select([data.c.hstore_data.matrix()])).scalar()
.. seealso::
The type also introduces new operators, using the new type-specific
operator framework. New operations include indexed access::
- result = conn.execute(
- select([mytable.c.arraycol[2]])
- )
+ result = conn.execute(select([mytable.c.arraycol[2]]))
slice access in SELECT::
- result = conn.execute(
- select([mytable.c.arraycol[2:4]])
- )
+ result = conn.execute(select([mytable.c.arraycol[2:4]]))
slice updates in UPDATE::
- conn.execute(
- mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]})
- )
+ conn.execute(mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]}))
freestanding array literals::
>>> from sqlalchemy.dialects import postgresql
- >>> conn.scalar(
- ... select([
- ... postgresql.array([1, 2]) + postgresql.array([3, 4, 5])
- ... ])
- ... )
+ >>> conn.scalar(select([postgresql.array([1, 2]) + postgresql.array([3, 4, 5])]))
[1, 2, 3, 4, 5]
array concatenation, where below, the right side ``[4, 5, 6]`` is coerced into an array literal::
::
- Column('sometimestamp', sqlite.DATETIME(truncate_microseconds=True))
- Column('sometimestamp', sqlite.DATETIME(
- storage_format=(
- "%(year)04d%(month)02d%(day)02d"
- "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d"
- ),
- regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})"
- )
- )
- Column('somedate', sqlite.DATE(
- storage_format="%(month)02d/%(day)02d/%(year)04d",
- regexp="(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)",
- )
- )
+ Column("sometimestamp", sqlite.DATETIME(truncate_microseconds=True))
+ Column(
+ "sometimestamp",
+ sqlite.DATETIME(
+ storage_format=(
+ "%(year)04d%(month)02d%(day)02d"
+ "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d"
+ ),
+ regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})",
+ ),
+ )
+ Column(
+ "somedate",
+ sqlite.DATE(
+ storage_format="%(month)02d/%(day)02d/%(year)04d",
+ regexp="(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)",
+ ),
+ )
Huge thanks to Nate Dub for the sprinting on this at Pycon 2012.
on all :class:`.String` types and will render on any backend, including
when features such as :meth:`_schema.MetaData.create_all` and :func:`.cast` is used::
- >>> stmt = select([cast(sometable.c.somechar, String(20, collation='utf8'))])
+ >>> stmt = select([cast(sometable.c.somechar, String(20, collation="utf8"))])
>>> print(stmt)
SELECT CAST(sometable.somechar AS VARCHAR(20) COLLATE "utf8") AS anon_1
FROM sometable
Base = declarative_base()
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String(64))
+
class UserKeyword(Base):
- __tablename__ = 'user_keyword'
- user_id = Column(Integer, ForeignKey('user.id'), primary_key=True)
- keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True)
+ __tablename__ = "user_keyword"
+ user_id = Column(Integer, ForeignKey("user.id"), primary_key=True)
+ keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True)
- user = relationship(User,
- backref=backref("user_keywords",
- cascade="all, delete-orphan")
- )
+ user = relationship(
+ User, backref=backref("user_keywords", cascade="all, delete-orphan")
+ )
- keyword = relationship("Keyword",
- backref=backref("user_keywords",
- cascade="all, delete-orphan")
- )
+ keyword = relationship(
+ "Keyword", backref=backref("user_keywords", cascade="all, delete-orphan")
+ )
# uncomment this to enable the old behavior
# __mapper_args__ = {"legacy_is_orphan": True}
+
class Keyword(Base):
- __tablename__ = 'keyword'
+ __tablename__ = "keyword"
id = Column(Integer, primary_key=True)
- keyword = Column('keyword', String(64))
+ keyword = Column("keyword", String(64))
+
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
session.commit()
-
:ticket:`2655`
The after_attach event fires after the item is associated with the Session instead of before; before_attach added
@event.listens_for(Session, "before_attach")
def before_attach(session, instance):
- instance.some_necessary_attribute = session.query(Widget).\
- filter_by(instance.widget_name).\
- first()
+ instance.some_necessary_attribute = (
+ session.query(Widget).filter_by(instance.widget_name).first()
+ )
:ticket:`2464`
::
- subq = session.query(Entity.value).\
- filter(Entity.id==Parent.entity_id).\
- correlate(Parent).\
- as_scalar()
- session.query(Parent).filter(subq=="some value")
+ subq = (
+ session.query(Entity.value)
+ .filter(Entity.id == Parent.entity_id)
+ .correlate(Parent)
+ .as_scalar()
+ )
+ session.query(Parent).filter(subq == "some value")
This was the opposite behavior of a plain ``select()``
construct which would assume auto-correlation by default.
::
- subq = session.query(Entity.value).\
- filter(Entity.id==Parent.entity_id).\
- as_scalar()
- session.query(Parent).filter(subq=="some value")
+ subq = session.query(Entity.value).filter(Entity.id == Parent.entity_id).as_scalar()
+ session.query(Parent).filter(subq == "some value")
like in ``select()``, correlation can be disabled by calling
``query.correlate(None)`` or manually set by passing an
from sqlalchemy.sql import table, column, select
- t1 = table('t1', column('x'))
- t2 = table('t2', column('y'))
+ t1 = table("t1", column("x"))
+ t2 = table("t2", column("y"))
s = select([t1, t2]).correlate(t1)
print(s)
::
- scalar_subq = select([someothertable.c.id]).where(someothertable.c.data=='foo')
- select([sometable]).where(sometable.c.id==scalar_subq)
+ scalar_subq = select([someothertable.c.id]).where(someothertable.c.data == "foo")
+ select([sometable]).where(sometable.c.id == scalar_subq)
SQL Server doesn't allow an equality comparison to a scalar
SELECT, that is, "x = (SELECT something)". The MSSQL dialect
::
# before 0.8
- table1 = Table('t1', metadata,
- Column('col1', Integer, key='column_one')
- )
+ table1 = Table("t1", metadata, Column("col1", Integer, key="column_one"))
s = select([table1])
- s.c.column_one # would be accessible like this
- s.c.col1 # would raise AttributeError
+ s.c.column_one # would be accessible like this
+ s.c.col1 # would raise AttributeError
s = select([table1]).apply_labels()
- s.c.table1_column_one # would raise AttributeError
- s.c.table1_col1 # would be accessible like this
+ s.c.table1_column_one # would raise AttributeError
+ s.c.table1_col1 # would be accessible like this
In 0.8, :attr:`_schema.Column.key` is honored in both cases:
::
# with 0.8
- table1 = Table('t1', metadata,
- Column('col1', Integer, key='column_one')
- )
+ table1 = Table("t1", metadata, Column("col1", Integer, key="column_one"))
s = select([table1])
- s.c.column_one # works
- s.c.col1 # AttributeError
+ s.c.column_one # works
+ s.c.col1 # AttributeError
s = select([table1]).apply_labels()
- s.c.table1_column_one # works
- s.c.table1_col1 # AttributeError
+ s.c.table1_column_one # works
+ s.c.table1_col1 # AttributeError
All other behavior regarding "name" and "key" are the same,
including that the rendered SQL will still use the form
::
- t1 = table('t1', column('x'))
- t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z"
+ t1 = table("t1", column("x"))
+ t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z"
:ticket:`2415`
::
>>> row = result.fetchone()
- >>> row['foo'] == row['FOO'] == row['Foo']
+ >>> row["foo"] == row["FOO"] == row["Foo"]
True
This was for the benefit of a few dialects which in the
type maintained by that composite, rather than being broken out into individual
columns. Using the mapping setup at :ref:`mapper_composite`::
- >>> session.query(Vertex.start, Vertex.end).\
- ... filter(Vertex.start == Point(3, 4)).all()
+ >>> session.query(Vertex.start, Vertex.end).filter(Vertex.start == Point(3, 4)).all()
[(Point(x=3, y=4), Point(x=5, y=6))]
This change is backwards-incompatible with code that expects the individual attribute
accessor::
- >>> session.query(Vertex.start.clauses, Vertex.end.clauses).\
- ... filter(Vertex.start == Point(3, 4)).all()
+ >>> session.query(Vertex.start.clauses, Vertex.end.clauses).filter(
+ ... Vertex.start == Point(3, 4)
+ ... ).all()
[(3, 4, 5, 6)]
.. seealso::
select_stmt = select([User]).where(User.id == 7).alias()
- q = session.query(User).\
- join(select_stmt, User.id == select_stmt.c.id).\
- filter(User.name == 'ed')
+ q = (
+ session.query(User)
+ .join(select_stmt, User.id == select_stmt.c.id)
+ .filter(User.name == "ed")
+ )
The above statement predictably renders SQL like the following::
JOIN, the documentation would lead us to believe we could use
:meth:`_query.Query.select_from` to do so::
- q = session.query(User).\
- select_from(select_stmt).\
- join(User, User.id == select_stmt.c.id).\
- filter(User.name == 'ed')
+ q = (
+ session.query(User)
+ .select_from(select_stmt)
+ .join(User, User.id == select_stmt.c.id)
+ .filter(User.name == "ed")
+ )
However, in version 0.8 and earlier, the above use of :meth:`_query.Query.select_from`
would apply the ``select_stmt`` to **replace** the ``User`` entity, as it
select_stmt = select([User]).where(User.id == 7)
user_from_stmt = aliased(User, select_stmt.alias())
- q = session.query(user_from_stmt).filter(user_from_stmt.name == 'ed')
+ q = session.query(user_from_stmt).filter(user_from_stmt.name == "ed")
So with SQLAlchemy 0.9, our query that selects from ``select_stmt`` produces
the SQL we expect::
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(Integer, ForeignKey('a.id'))
+ a_id = Column(Integer, ForeignKey("a.id"))
a = relationship("A", backref=backref("bs", viewonly=True))
+
e = create_engine("sqlite://")
Base.metadata.create_all(e)
Consider this mapping::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
- b_id = Column(Integer, ForeignKey('b.id'), primary_key=True)
+ b_id = Column(Integer, ForeignKey("b.id"), primary_key=True)
b = relationship("B")
b_value = association_proxy("b", "value")
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
value = Column(String)
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
b = relationship("B", uselist=False)
bname = association_proxy("b", "name")
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(Integer, ForeignKey('a.id'))
+ a_id = Column(Integer, ForeignKey("a.id"))
name = Column(String)
+
a1 = A()
# this is how m2o's always have worked
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String)
+
e = create_engine("sqlite://", echo=True)
Base.metadata.create_all(e)
sess = Session(e)
- a1 = A(data='a1')
+ a1 = A(data="a1")
sess.add(a1)
sess.commit() # a1 is now expired
assert inspect(a1).attrs.data.history == (None, None, None)
# in 0.8, this would fail to load the unloaded state.
- assert attributes.get_history(a1, 'data') == ((), ['a1',], ())
+ assert attributes.get_history(a1, "data") == (
+ (),
+ [
+ "a1",
+ ],
+ (),
+ )
# load_history() is now equivalent to get_history() with
# passive=PASSIVE_OFF ^ INIT_OK
- assert inspect(a1).attrs.data.load_history() == ((), ['a1',], ())
+ assert inspect(a1).attrs.data.load_history() == (
+ (),
+ [
+ "a1",
+ ],
+ (),
+ )
:ticket:`2787`
from sqlalchemy.dialects.mysql import INTEGER
d = Date().with_variant(
- DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"),
- "sqlite"
- )
+ DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"), "sqlite"
+ )
- i = Integer().with_variant(
- INTEGER(display_width=5),
- "mysql"
- )
+ i = Integer().with_variant(INTEGER(display_width=5), "mysql")
:meth:`.TypeEngine.with_variant` isn't new, it was added in SQLAlchemy
0.7.2. So code that is running on the 0.8 series can be corrected to use
Previously, an expression like the following::
- print((column('x') == 'somevalue').collate("en_EN"))
+ print((column("x") == "somevalue").collate("en_EN"))
would produce an expression like this::
:meth:`.ColumnOperators.collate` operator is being applied to the right-hand
column, as follows::
- print(column('x') == literal('somevalue').collate("en_EN"))
+ print(column("x") == literal("somevalue").collate("en_EN"))
In 0.8, this produces::
generated::
>>> # 0.8
- >>> print(column('x').collate('en_EN').desc())
+ >>> print(column("x").collate("en_EN").desc())
(x COLLATE en_EN) DESC
>>> # 0.9
- >>> print(column('x').collate('en_EN').desc())
+ >>> print(column("x").collate("en_EN").desc())
x COLLATE en_EN DESC
:ticket:`2879`
signs within the enumerated values::
>>> from sqlalchemy.dialects import postgresql
- >>> type = postgresql.ENUM('one', 'two', "three's", name="myenum")
+ >>> type = postgresql.ENUM("one", "two", "three's", name="myenum")
>>> from sqlalchemy.dialects.postgresql import base
>>> print(base.CreateEnumType(type).compile(dialect=postgresql.dialect()))
CREATE TYPE myenum AS ENUM ('one','two','three''s')
"""listen for before_insert"""
# ...
+
event.remove(MyClass, "before_insert", my_before_insert)
In the example above, the ``propagate=True`` flag is set. This
links in the path be spelled out as class bound attributes, since the
:meth:`.PropComparator.of_type` method needs to be called::
- session.query(Company).\
- options(
- subqueryload_all(
- Company.employees.of_type(Engineer),
- Engineer.machines
- )
- )
+ session.query(Company).options(
+ subqueryload_all(Company.employees.of_type(Engineer), Engineer.machines)
+ )
**New Way**
query(User).options(defaultload("orders").defaultload("items").subqueryload("keywords"))
-
The dotted style can still be taken advantage of, particularly in the case
of skipping over several path elements::
# undefer all Address columns
query(User).options(defaultload(User.addresses).undefer("*"))
-
:ticket:`1418`
stmt = stmt.alias()
stmt = select([addresses]).select_from(
- addresses.join(stmt), addresses.c.user_id == stmt.c.id)
+ addresses.join(stmt), addresses.c.user_id == stmt.c.id
+ )
# or into a cte():
stmt = stmt.cte("x")
stmt = select([addresses]).select_from(
- addresses.join(stmt), addresses.c.user_id == stmt.c.id)
+ addresses.join(stmt), addresses.c.user_id == stmt.c.id
+ )
:ticket:`2877`
where it will be used to render an ``INSERT .. SELECT`` construct::
>>> from sqlalchemy.sql import table, column
- >>> t1 = table('t1', column('a'), column('b'))
- >>> t2 = table('t2', column('x'), column('y'))
- >>> print(t1.insert().from_select(['a', 'b'], t2.select().where(t2.c.y == 5)))
+ >>> t1 = table("t1", column("a"), column("b"))
+ >>> t2 = table("t2", column("x"), column("y"))
+ >>> print(t1.insert().from_select(["a", "b"], t2.select().where(t2.c.y == 5)))
INSERT INTO t1 (a, b) SELECT t2.x, t2.y
FROM t2
WHERE t2.y = :y_1
and :class:`_query.Query` objects::
s = Session()
- q = s.query(User.id, User.name).filter_by(name='ed')
+ q = s.query(User.id, User.name).filter_by(name="ed")
ins = insert(Address).from_select((Address.id, Address.email_address), q)
rendering::
from sqlalchemy.dialects.mysql import DOUBLE
import decimal
- data = Table('data', metadata,
- Column('double_value',
- mysql.DOUBLE(decimal_return_scale=12, asdecimal=True))
+ data = Table(
+ "data",
+ metadata,
+ Column("double_value", mysql.DOUBLE(decimal_return_scale=12, asdecimal=True)),
)
conn.execute(
# much precision for DOUBLE
assert result == decimal.Decimal("45.768392065789")
-
:ticket:`2867`
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B", backref="a")
print("A.bs validator")
return item
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(Integer, ForeignKey('a.id'))
+ a_id = Column(Integer, ForeignKey("a.id"))
@validates("a", include_backrefs=False)
def validate_a(self, key, item):
print("B.a validator")
return item
+
a1 = A()
a1.bs.append(B()) # prints only "A.bs validator"
-
:ticket:`1535`
employee_alias = with_polymorphic(Person, [Engineer, Manager], flat=True)
- session.query(Company).join(
- Company.employees.of_type(employee_alias)
- ).filter(
- or_(
- Engineer.primary_language == 'python',
- Manager.manager_name == 'dilbert'
- )
- )
+ session.query(Company).join(Company.employees.of_type(employee_alias)).filter(
+ or_(Engineer.primary_language == "python", Manager.manager_name == "dilbert")
+ )
Generates (everywhere except SQLite)::
Normally, a joined eager load chain like the following::
- query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True))
+ query(User).options(
+ joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)
+ )
Would not produce an inner join; because of the LEFT OUTER JOIN from user->order,
joined eager loading could not use an INNER join from order->items without changing
Since we missed the boat on that, to avoid further regressions we've added the above
functionality by specifying the string ``"nested"`` to :paramref:`_orm.joinedload.innerjoin`::
- query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested"))
+ query(User).options(
+ joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested")
+ )
This feature is new in 0.9.4.
previous collection::
class Parent(Base):
- __tablename__ = 'parent'
+ __tablename__ = "parent"
id = Column(Integer, primary_key=True)
children = relationship("Child", backref="parent")
+
class Child(Base):
- __tablename__ = 'child'
+ __tablename__ = "child"
id = Column(Integer, primary_key=True)
- parent_id = Column(ForeignKey('parent.id'))
+ parent_id = Column(ForeignKey("parent.id"))
+
p1 = Parent()
p2 = Parent()
from sqlalchemy import Table, Boolean, Integer, Column, MetaData
- t1 = Table('t', MetaData(), Column('x', Boolean()), Column('y', Integer))
+ t1 = Table("t", MetaData(), Column("x", Boolean()), Column("y", Integer))
A select construct will now render the boolean column as a binary expression
on backends that don't feature ``true``/``false`` constant behavior::
"short circuit" behavior, that is truncating a rendered expression, when a
:func:`.true` or :func:`.false` constant is present::
- >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile(
- ... dialect=postgresql.dialect()))
+ >>> print(
+ ... select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=postgresql.dialect())
+ ... )
SELECT t.x, t.y FROM t WHERE false
:func:`.true` can be used as the base to build up an expression::
The boolean constants :func:`.true` and :func:`.false` themselves render as
``0 = 1`` and ``1 = 1`` for a backend with no boolean constants::
- >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile(
- ... dialect=mysql.dialect()))
+ >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=mysql.dialect()))
SELECT t.x, t.y FROM t WHERE 0 = 1
Interpretation of ``None``, while not particularly valid SQL, is at least
from sqlalchemy.sql import table, column, select, func
- t = table('t', column('c1'), column('c2'))
+ t = table("t", column("c1"), column("c2"))
expr = (func.foo(t.c.c1) + t.c.c2).label("expr")
stmt = select([expr]).order_by(expr)
an ``__lt__()`` method has been added::
users.insert().execute(
- dict(user_id=1, user_name='foo'),
- dict(user_id=2, user_name='bar'),
- dict(user_id=3, user_name='def'),
- )
+ dict(user_id=1, user_name="foo"),
+ dict(user_id=2, user_name="bar"),
+ dict(user_id=3, user_name="def"),
+ )
rows = users.select().order_by(users.c.user_name).execute().fetchall()
- eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')])
+ eq_(rows, [(2, "bar"), (3, "def"), (1, "foo")])
- eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')])
+ eq_(sorted(rows), [(1, "foo"), (2, "bar"), (3, "def")])
:ticket:`2848`
the statement is executed, which we can see by examining the ``binds`` dictionary::
>>> compiled = stmt.compile()
- >>> compiled.binds['some_col'].type
+ >>> compiled.binds["some_col"].type
String
The feature allows custom types to take their expected effect within INSERT/UPDATE
>>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey
>>> metadata = MetaData()
- >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id')))
+ >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id")))
>>> t2.c.t1id.type
NullType()
- >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True))
+ >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True))
>>> t2.c.t1id.type
Integer()
>>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKeyConstraint
>>> metadata = MetaData()
- >>> t2 = Table('t2', metadata,
- ... Column('t1a'), Column('t1b'),
- ... ForeignKeyConstraint(['t1a', 't1b'], ['t1.a', 't1.b']))
+ >>> t2 = Table(
+ ... "t2",
+ ... metadata,
+ ... Column("t1a"),
+ ... Column("t1b"),
+ ... ForeignKeyConstraint(["t1a", "t1b"], ["t1.a", "t1.b"]),
+ ... )
>>> t2.c.t1a.type
NullType()
>>> t2.c.t1b.type
NullType()
- >>> t1 = Table('t1', metadata,
- ... Column('a', Integer, primary_key=True),
- ... Column('b', Integer, primary_key=True))
+ >>> t1 = Table(
+ ... "t1",
+ ... metadata,
+ ... Column("a", Integer, primary_key=True),
+ ... Column("b", Integer, primary_key=True),
+ ... )
>>> t2.c.t1a.type
Integer()
>>> t2.c.t1b.type
>>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey
>>> metadata = MetaData()
- >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id')))
- >>> t3 = Table('t3', metadata, Column('t2t1id', ForeignKey('t2.t1id')))
+ >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id")))
+ >>> t3 = Table("t3", metadata, Column("t2t1id", ForeignKey("t2.t1id")))
>>> t2.c.t1id.type
NullType()
>>> t3.c.t2t1id.type
NullType()
- >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True))
+ >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True))
>>> t2.c.t1id.type
Integer()
>>> t3.c.t2t1id.type
bakery = baked.bakery()
+
def search_for_user(session, username, email=None):
baked_query = bakery(lambda session: session.query(User))
- baked_query += lambda q: q.filter(User.name == bindparam('username'))
+ baked_query += lambda q: q.filter(User.name == bindparam("username"))
baked_query += lambda q: q.order_by(User.id)
if email:
- baked_query += lambda q: q.filter(User.email == bindparam('email'))
+ baked_query += lambda q: q.filter(User.email == bindparam("email"))
result = baked_query(session).params(username=username, email=email).all()
@declared_attr
def foobar_prop(cls):
- return column_property('foobar: ' + cls.foobar)
+ return column_property("foobar: " + cls.foobar)
+
class SomeClass(HasFooBar, Base):
- __tablename__ = 'some_table'
+ __tablename__ = "some_table"
id = Column(Integer, primary_key=True)
Above, ``SomeClass.foobar_prop`` will be invoked against ``SomeClass``,
@declared_attr
def foobar_prop(cls):
- return column_property('foobar: ' + cls.foobar)
+ return column_property("foobar: " + cls.foobar)
+
class SomeClass(HasFooBar, Base):
- __tablename__ = 'some_table'
+ __tablename__ = "some_table"
id = Column(Integer, primary_key=True)
Previously, ``SomeClass`` would be mapped with one particular copy of
@declared_attr.cascading
def id(cls):
if has_inherited_table(cls):
- return Column(ForeignKey('myclass.id'), primary_key=True)
+ return Column(ForeignKey("myclass.id"), primary_key=True)
else:
return Column(Integer, primary_key=True)
+
class MyClass(HasIdMixin, Base):
- __tablename__ = 'myclass'
+ __tablename__ = "myclass"
# ...
+
class MySubClass(MyClass):
- ""
+ """"""
+
# ...
.. seealso::
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
- from sqlalchemy.ext.declarative import (declarative_base, declared_attr,
- AbstractConcreteBase)
+ from sqlalchemy.ext.declarative import (
+ declarative_base,
+ declared_attr,
+ AbstractConcreteBase,
+ )
Base = declarative_base()
+
class Something(Base):
- __tablename__ = u'something'
+ __tablename__ = "something"
id = Column(Integer, primary_key=True)
class Concrete(Abstract):
- __tablename__ = u'cca'
- __mapper_args__ = {'polymorphic_identity': 'cca', 'concrete': True}
-
+ __tablename__ = "cca"
+ __mapper_args__ = {"polymorphic_identity": "cca", "concrete": True}
The above mapping will set up a table ``cca`` with both an ``id`` and
a ``something_id`` column, and ``Concrete`` will also have a relationship
Base = declarative_base()
+
class Foo(Base):
__table__ = Table(
- 'foo', Base.metadata,
- Column('id', Integer, primary_key=True),
- Column('a', Integer(), nullable=False),
- Column('b', Integer(), nullable=False),
- Column('c', Integer(), nullable=False),
+ "foo",
+ Base.metadata,
+ Column("id", Integer, primary_key=True),
+ Column("a", Integer(), nullable=False),
+ Column("b", Integer(), nullable=False),
+ Column("c", Integer(), nullable=False),
)
- engine = create_engine(
- 'mysql+mysqldb://scott:tiger@localhost/test', echo=True)
+
+ engine = create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo=True)
sess = Session(engine)
* Binding to a Mixin or Abstract Class::
class MyClass(SomeMixin, Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
# ...
- session = Session(binds={SomeMixin: some_engine})
+ session = Session(binds={SomeMixin: some_engine})
* Binding to inherited concrete subclasses individually based on table::
class BaseClass(Base):
- __tablename__ = 'base'
+ __tablename__ = "base"
# ...
+
class ConcreteSubClass(BaseClass):
- __tablename__ = 'concrete'
+ __tablename__ = "concrete"
# ...
- __mapper_args__ = {'concrete': True}
-
+ __mapper_args__ = {"concrete": True}
- session = Session(binds={
- base_table: some_engine,
- concrete_table: some_other_engine
- })
+ session = Session(binds={base_table: some_engine, concrete_table: some_other_engine})
:ticket:`3035`
statement as well as for the SELECT used by the "fetch" strategy::
session.query(User).filter(User.id == 15).update(
- {"name": "foob"}, synchronize_session='fetch')
+ {"name": "foob"}, synchronize_session="fetch"
+ )
- session.query(User).filter(User.id == 15).delete(
- synchronize_session='fetch')
+ session.query(User).filter(User.id == 15).delete(synchronize_session="fetch")
* Queries against individual columns::
return self.value + 5
- inspect(SomeObject).all_orm_descriptors.some_prop.info['foo'] = 'bar'
+ inspect(SomeObject).all_orm_descriptors.some_prop.info["foo"] = "bar"
It is also available as a constructor argument for all :class:`.SchemaItem`
objects (e.g. :class:`_schema.ForeignKey`, :class:`.UniqueConstraint` etc.) as well
Given a mapping like the following::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
- A.b = column_property(
- select([func.max(B.id)]).where(B.a_id == A.id).correlate(A)
- )
+ A.b = column_property(select([func.max(B.id)]).where(B.a_id == A.id).correlate(A))
A simple scenario that included "A.b" twice would fail to render
correctly::
to order by label, for example if the mapping were "polymorphic"::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
type = Column(String)
- __mapper_args__ = {'polymorphic_on': type, 'with_polymorphic': '*'}
+ __mapper_args__ = {"polymorphic_on": type, "with_polymorphic": "*"}
The order_by would fail to use the label, as it would be anonymized due
to the polymorphic loading::
this is used to allow a bound parameter to be passed, which can be substituted
with a value later::
- sel = select([table]).limit(bindparam('mylimit')).offset(bindparam('myoffset'))
+ sel = select([table]).limit(bindparam("mylimit")).offset(bindparam("myoffset"))
Dialects which don't support non-integer LIMIT or OFFSET expressions may continue
to not support this behavior; third party dialects may also need modification
The ``%(column_0_name)s`` will derive from the first column found in the
expression of a :class:`.CheckConstraint`::
- metadata = MetaData(
- naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}
- )
+ metadata = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"})
- foo = Table('foo', metadata,
- Column('value', Integer),
+ foo = Table(
+ "foo",
+ metadata,
+ Column("value", Integer),
)
CheckConstraint(foo.c.value > 5)
m = MetaData()
- t = Table('t', m,
- Column('a', Integer),
- Column('b', Integer)
- )
+ t = Table("t", m, Column("a", Integer), Column("b", Integer))
uq = UniqueConstraint(t.c.a, t.c.b) # will auto-attach to Table
m = MetaData()
- a = Column('a', Integer)
- b = Column('b', Integer)
+ a = Column("a", Integer)
+ b = Column("b", Integer)
uq = UniqueConstraint(a, b)
- t = Table('t', m, a, b)
+ t = Table("t", m, a, b)
assert uq in t.constraints # constraint auto-attached
m = MetaData()
- a = Column('a', Integer)
- b = Column('b', Integer)
+ a = Column("a", Integer)
+ b = Column("b", Integer)
- uq = UniqueConstraint(a, 'b')
+ uq = UniqueConstraint(a, "b")
- t = Table('t', m, a, b)
+ t = Table("t", m, a, b)
# constraint *not* auto-attached, as we do not have tracking
# to locate when a name 'b' becomes available on the table
m = MetaData()
- a = Column('a', Integer)
- b = Column('b', Integer)
+ a = Column("a", Integer)
+ b = Column("b", Integer)
- t = Table('t', m, a, b)
+ t = Table("t", m, a, b)
- uq = UniqueConstraint(a, 'b')
+ uq = UniqueConstraint(a, "b")
# constraint auto-attached normally as in older versions
assert uq in t.constraints
-
:ticket:`3341`
:ticket:`3411`
m = MetaData()
t = Table(
- 't', m,
- Column('x', Integer),
- Column('y', Integer, default=func.somefunction()))
+ "t", m, Column("x", Integer), Column("y", Integer, default=func.somefunction())
+ )
stmt = select([t.c.x])
- print(t.insert().from_select(['x'], stmt))
+ print(t.insert().from_select(["x"], stmt))
Will render::
metadata = MetaData()
- tbl = Table("derp", metadata,
- Column("arr", ARRAY(Text),
- server_default=array(["foo", "bar", "baz"])),
+ tbl = Table(
+ "derp",
+ metadata,
+ Column("arr", ARRAY(Text), server_default=array(["foo", "bar", "baz"])),
)
print(CreateTable(tbl).compile(dialect=postgresql.dialect()))
warnings.filterwarnings("once")
for i in range(1000):
- e.execute(select([cast(
- ('foo_%d' % random.randint(0, 1000000)).encode('ascii'), Unicode)]))
+ e.execute(
+ select([cast(("foo_%d" % random.randint(0, 1000000)).encode("ascii"), Unicode)])
+ )
The format of the warning here is::
The string names are now resolved as attribute names in earnest::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
- name = Column('user_name', String(50))
+ name = Column("user_name", String(50))
Above, the column ``user_name`` is mapped as ``name``. Previously,
a call to :meth:`_query.Query.update` that was passed strings would have to
have been called as follows::
- session.query(User).update({'user_name': 'moonbeam'})
+ session.query(User).update({"user_name": "moonbeam"})
The given string is now resolved against the entity::
- session.query(User).update({'name': 'moonbeam'})
+ session.query(User).update({"name": "moonbeam"})
It is typically preferable to use the attribute directly, to avoid any
ambiguity::
- session.query(User).update({User.name: 'moonbeam'})
+ session.query(User).update({User.name: "moonbeam"})
The change also indicates that synonyms and hybrid attributes can be referred
to by string name as well::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
- name = Column('user_name', String(50))
+ name = Column("user_name", String(50))
@hybrid_property
def fullname(self):
return self.name
- session.query(User).update({'fullname': 'moonbeam'})
+
+ session.query(User).update({"fullname": "moonbeam"})
:ticket:`3228`
Given a mapping::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
a = relationship("A")
Given ``A``, with primary key of 7, but which we changed to be 10
Given a mapping::
class A(Base):
- __tablename__ = 'table_a'
+ __tablename__ = "table_a"
id = Column(Integer, primary_key=True)
+
class B(Base):
- __tablename__ = 'table_b'
+ __tablename__ = "table_b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('table_a.id'))
+ a_id = Column(ForeignKey("table_a.id"))
a = relationship(A)
In 1.0, the relationship-bound attribute takes precedence over the FK-bound
session.flush()
b1 = B()
- b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0
+ b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0
b2 = B()
b2.a = None # we expect a_id to be None; takes precedence only in 1.0
When this error is raised, the :func:`.lazyload` option can be sent with
an asterisk::
- q = sess.query(Object).options(lazyload('*')).yield_per(100)
+ q = sess.query(Object).options(lazyload("*")).yield_per(100)
or use :meth:`_query.Query.enable_eagerloads`::
The :func:`.lazyload` option has the advantage that additional many-to-one
joined loader options can still be used::
- q = sess.query(Object).options(
- lazyload('*'), joinedload("some_manytoone")).yield_per(100)
+ q = (
+ sess.query(Object)
+ .options(lazyload("*"), joinedload("some_manytoone"))
+ .yield_per(100)
+ )
.. _bug_3233:
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B")
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
A query that joins to ``A.bs`` twice::
The query deduplicates the redundant ``A.bs`` because it is attempting
to support a case like the following::
- s.query(A).join(A.bs).\
- filter(B.foo == 'bar').\
- reset_joinpoint().join(A.bs, B.cs).filter(C.bar == 'bat')
+ s.query(A).join(A.bs).filter(B.foo == "bar").reset_joinpoint().join(A.bs, B.cs).filter(
+ C.bar == "bat"
+ )
That is, the ``A.bs`` is part of a "path". As part of :ticket:`3367`,
arriving at the same endpoint twice without it being part of a
Base = declarative_base()
+
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
type = Column(String)
- __mapper_args__ = {'polymorphic_on': type, 'polymorphic_identity': 'a'}
+ __mapper_args__ = {"polymorphic_on": type, "polymorphic_identity": "a"}
class ASub1(A):
- __mapper_args__ = {'polymorphic_identity': 'asub1'}
+ __mapper_args__ = {"polymorphic_identity": "asub1"}
class ASub2(A):
- __mapper_args__ = {'polymorphic_identity': 'asub2'}
+ __mapper_args__ = {"polymorphic_identity": "asub2"}
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(Integer, ForeignKey("a.id"))
- a = relationship("A", primaryjoin="B.a_id == A.id", backref='b')
+ a = relationship("A", primaryjoin="B.a_id == A.id", backref="b")
+
s = Session()
from sqlalchemy.orm import Bundle
+
class DictBundle(Bundle):
def create_row_processor(self, query, procs, labels):
"""Override create_row_processor to return values as dictionaries"""
+
def proc(row, result):
- return dict(
- zip(labels, (proc(row, result) for proc in procs))
- )
+ return dict(zip(labels, (proc(row, result) for proc in procs)))
+
return proc
The unused ``result`` member is now removed::
from sqlalchemy.orm import Bundle
+
class DictBundle(Bundle):
def create_row_processor(self, query, procs, labels):
"""Override create_row_processor to return values as dictionaries"""
+
def proc(row):
- return dict(
- zip(labels, (proc(row) for proc in procs))
- )
+ return dict(zip(labels, (proc(row) for proc in procs)))
+
return proc
.. seealso::
when using ``innerjoin=True``::
query(User).options(
- joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True))
+ joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)
+ )
With the new default, this will render the FROM clause in the form::
To get the older behavior, use ``innerjoin="unnested"``::
query(User).options(
- joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested"))
+ joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested")
+ )
This will avoid right-nested joins and chain the joins together using all
OUTER joins despite the innerjoin directive::
Given a joined eager load like the following::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
b = relationship("B", uselist=False)
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
+
s = Session()
print(s.query(A).options(joinedload(A.b)).limit(5))
Given a single-table inheritance mapping, such as::
class Widget(Base):
- __table__ = 'widget_table'
+ __table__ = "widget_table"
+
class FooWidget(Widget):
pass
mapping as::
class Widget(Base):
- __tablename__ = 'widget'
+ __tablename__ = "widget"
id = Column(Integer, primary_key=True)
type = Column(String)
- related_id = Column(ForeignKey('related.id'))
+ related_id = Column(ForeignKey("related.id"))
related = relationship("Related", backref="widget")
- __mapper_args__ = {'polymorphic_on': type}
+ __mapper_args__ = {"polymorphic_on": type}
class FooWidget(Widget):
- __mapper_args__ = {'polymorphic_identity': 'foo'}
+ __mapper_args__ = {"polymorphic_identity": "foo"}
class Related(Base):
- __tablename__ = 'related'
+ __tablename__ = "related"
id = Column(Integer, primary_key=True)
It's been the behavior for quite some time that a JOIN on the relationship
# This is a normal Core expression with a string argument -
# we aren't talking about this!!
- stmt = select([sometable]).where(sometable.c.somecolumn == 'value')
+ stmt = select([sometable]).where(sometable.c.somecolumn == "value")
The Core tutorial has long featured an example of the use of this technique,
using a :func:`_expression.select` construct where virtually all components of it
should be used::
import warnings
- warnings.simplefilter("error") # all warnings raise an exception
+
+ warnings.simplefilter("error") # all warnings raise an exception
Given the above warnings, our statement works just fine, but
to get rid of the warnings we would rewrite our statement as follows::
from sqlalchemy import select, text
- stmt = select([
- text("a"),
- text("b")
- ]).where(text("a = b")).select_from(text("sometable"))
+
+ stmt = (
+ select([text("a"), text("b")]).where(text("a = b")).select_from(text("sometable"))
+ )
and as the warnings suggest, we can give our statement more specificity
about the text if we use :func:`_expression.column` and :func:`.table`::
from sqlalchemy import select, text, column, table
- stmt = select([column("a"), column("b")]).\
- where(text("a = b")).select_from(table("sometable"))
+ stmt = (
+ select([column("a"), column("b")])
+ .where(text("a = b"))
+ .select_from(table("sometable"))
+ )
Where note also that :func:`.table` and :func:`_expression.column` can now
be imported from "sqlalchemy" without the "sql" part.
:func:`_expression.select` or :class:`_query.Query` that refers to some column name or named
label, we might want to GROUP BY and/or ORDER BY known columns or labels::
- stmt = select([
- user.c.name,
- func.count(user.c.id).label("id_count")
- ]).group_by("name").order_by("id_count")
+ stmt = (
+ select([user.c.name, func.count(user.c.id).label("id_count")])
+ .group_by("name")
+ .order_by("id_count")
+ )
In the above statement we expect to see "ORDER BY id_count", as opposed to a
re-statement of the function. The string argument given is actively
However, if we refer to a name that cannot be located, then we get
the warning again, as below::
- stmt = select([
- user.c.name,
- func.count(user.c.id).label("id_count")
- ]).order_by("some_label")
+ stmt = select([user.c.name, func.count(user.c.id).label("id_count")]).order_by(
+ "some_label"
+ )
The output does what we say, but again it warns us::
counter = itertools.count(1)
t = Table(
- 'my_table', metadata,
- Column('id', Integer, default=lambda: next(counter)),
- Column('data', String)
+ "my_table",
+ metadata,
+ Column("id", Integer, default=lambda: next(counter)),
+ Column("data", String),
)
- conn.execute(t.insert().values([
- {"data": "d1"},
- {"data": "d2"},
- {"data": "d3"},
- ]))
+ conn.execute(
+ t.insert().values(
+ [
+ {"data": "d1"},
+ {"data": "d2"},
+ {"data": "d3"},
+ ]
+ )
+ )
The above example will invoke ``next(counter)`` for each row individually
as would be expected::
an exception is raised::
t = Table(
- 'my_table', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', String, server_default='some default')
+ "my_table",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("data", String, server_default="some default"),
)
- conn.execute(t.insert().values([
- {"data": "d1"},
- {"data": "d2"},
- {},
- ]))
+ conn.execute(
+ t.insert().values(
+ [
+ {"data": "d1"},
+ {"data": "d2"},
+ {},
+ ]
+ )
+ )
will raise::
A :class:`_schema.Table` can be set up for reflection by passing
:paramref:`_schema.Table.autoload_with` alone::
- my_table = Table('my_table', metadata, autoload_with=some_engine)
+ my_table = Table("my_table", metadata, autoload_with=some_engine)
:ticket:`3027`
associated with a :class:`_schema.MetaData` object will be created *and* dropped
corresponding to :meth:`_schema.Table.create` and :meth:`_schema.Table.drop`::
- table = Table('sometable', metadata,
- Column('some_enum', ENUM('a', 'b', 'c', name='myenum'))
+ table = Table(
+ "sometable", metadata, Column("some_enum", ENUM("a", "b", "c", name="myenum"))
)
table.create(engine) # will emit CREATE TYPE and CREATE TABLE
the exception of :meth:`_schema.Table.create` called with the ``checkfirst=True``
flag::
- my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata)
+ my_enum = ENUM("a", "b", "c", name="myenum", metadata=metadata)
- table = Table('sometable', metadata,
- Column('some_enum', my_enum)
- )
+ table = Table("sometable", metadata, Column("some_enum", my_enum))
# will fail: ENUM 'my_enum' does not exist
table.create(engine)
table.drop(engine) # will emit DROP TABLE, *not* DROP TYPE
- metadata.drop_all(engine) # will emit DROP TYPE
-
- metadata.create_all(engine) # will emit CREATE TYPE
+ metadata.drop_all(engine) # will emit DROP TYPE
+ metadata.create_all(engine) # will emit CREATE TYPE
:ticket:`3319`
metadata = MetaData()
user_tmp = Table(
- "user_tmp", metadata,
+ "user_tmp",
+ metadata,
Column("id", INT, primary_key=True),
- Column('name', VARCHAR(50)),
- prefixes=['TEMPORARY']
+ Column("name", VARCHAR(50)),
+ prefixes=["TEMPORARY"],
)
- e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug')
+ e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug")
with e.begin() as conn:
user_tmp.create(conn, checkfirst=True)
metadata = MetaData()
user_tmp = Table(
- "user_tmp", metadata,
+ "user_tmp",
+ metadata,
Column("id", INT, primary_key=True),
- Column('name', VARCHAR(50)),
- prefixes=['TEMPORARY']
+ Column("name", VARCHAR(50)),
+ prefixes=["TEMPORARY"],
)
- e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug')
+ e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug")
with e.begin() as conn:
user_tmp.create(conn, checkfirst=True)
m2 = MetaData()
user = Table(
- "user_tmp", m2,
+ "user_tmp",
+ m2,
Column("id", INT, primary_key=True),
- Column('name', VARCHAR(50)),
+ Column("name", VARCHAR(50)),
)
# in 0.9, *will create* the new table, overwriting the old one.
on MySQL::
>>> connection.execute(
- ... select([
- ... matchtable.c.title.match('Agile Ruby Programming').label('ruby'),
- ... matchtable.c.title.match('Dive Python').label('python'),
- ... matchtable.c.title
- ... ]).order_by(matchtable.c.id)
+ ... select(
+ ... [
+ ... matchtable.c.title.match("Agile Ruby Programming").label("ruby"),
+ ... matchtable.c.title.match("Dive Python").label("python"),
+ ... matchtable.c.title,
+ ... ]
+ ... ).order_by(matchtable.c.id)
... )
[
(2.0, 0.0, 'Agile Web Development with Ruby On Rails'),
with an explicit hostname, now requires a driver name - SQLAlchemy will no
longer attempt to guess a default::
- engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0")
+ engine = create_engine(
+ "mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0"
+ )
SQLAlchemy's previously hardcoded default of "SQL Server" is obsolete on
Windows, and SQLAlchemy cannot be tasked with guessing the best driver
CTE support has been fixed up for Oracle, and there is also a new feature
:meth:`_expression.CTE.with_suffixes` that can assist with Oracle's special directives::
- included_parts = select([
- part.c.sub_part, part.c.part, part.c.quantity
- ]).where(part.c.part == "p1").\
- cte(name="included_parts", recursive=True).\
- suffix_with(
+ included_parts = (
+ select([part.c.sub_part, part.c.part, part.c.quantity])
+ .where(part.c.part == "p1")
+ .cte(name="included_parts", recursive=True)
+ .suffix_with(
"search depth first by part set ord1",
- "cycle part set y_cycle to 1 default 0", dialect='oracle')
+ "cycle part set y_cycle to 1 default 0",
+ dialect="oracle",
+ )
+ )
:ticket:`3220`
examples will return duplicate rows due to the joined eager load unless
explicit typing is applied::
- result = session.query(
- func.substr(A.some_thing, 0, 4), A
- ).options(joinedload(A.bs)).all()
+ result = (
+ session.query(func.substr(A.some_thing, 0, 4), A).options(joinedload(A.bs)).all()
+ )
- users = session.query(
- func.date(
- User.date_created, 'start of month'
- ).label('month'),
- User,
- ).options(joinedload(User.orders)).all()
+ users = (
+ session.query(
+ func.date(User.date_created, "start of month").label("month"),
+ User,
+ )
+ .options(joinedload(User.orders))
+ .all()
+ )
The above examples, in order to retain deduping, should be specified as::
- result = session.query(
- func.substr(A.some_thing, 0, 4, type_=String), A
- ).options(joinedload(A.bs)).all()
+ result = (
+ session.query(func.substr(A.some_thing, 0, 4, type_=String), A)
+ .options(joinedload(A.bs))
+ .all()
+ )
- users = session.query(
- func.date(
- User.date_created, 'start of month', type_=DateTime
- ).label('month'),
- User,
- ).options(joinedload(User.orders)).all()
+ users = (
+ session.query(
+ func.date(User.date_created, "start of month", type_=DateTime).label("month"),
+ User,
+ )
+ .options(joinedload(User.orders))
+ .all()
+ )
Additionally, the treatment of a so-called "unhashable" type is slightly
different than its been in previous releases; internally we are using
>>> some_user = User()
>>> q = s.query(User).filter(User.name == some_user)
- ...
sqlalchemy.exc.ArgumentError: Object <__main__.User object at 0x103167e90> is not legal as a SQL literal value
The exception is now immediate when the comparison is made between
or JSON field::
class Person(Base):
- __tablename__ = 'person'
+ __tablename__ = "person"
id = Column(Integer, primary_key=True)
data = Column(JSON)
- name = index_property('data', 'name')
+ name = index_property("data", "name")
Above, the ``name`` attribute will read/write the field ``"name"``
from the JSON column ``data``, after initializing it to an
empty dictionary::
- >>> person = Person(name='foobar')
+ >>> person = Person(name="foobar")
>>> person.name
foobar
query is against a subquery expression such as an exists::
class Widget(Base):
- __tablename__ = 'widget'
+ __tablename__ = "widget"
id = Column(Integer, primary_key=True)
type = Column(String)
data = Column(String)
- __mapper_args__ = {'polymorphic_on': type}
+ __mapper_args__ = {"polymorphic_on": type}
class FooWidget(Widget):
- __mapper_args__ = {'polymorphic_identity': 'foo'}
+ __mapper_args__ = {"polymorphic_identity": "foo"}
- q = session.query(FooWidget).filter(FooWidget.data == 'bar').exists()
+
+ q = session.query(FooWidget).filter(FooWidget.data == "bar").exists()
session.query(q).all()
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
e = create_engine("sqlite://", echo=True)
Base.metadata.create_all(e)
class A(Base):
__tablename__ = "a"
- id = Column('id', Integer, primary_key=True)
+ id = Column("id", Integer, primary_key=True)
type = Column(String)
__mapper_args__ = {
- 'polymorphic_on': type,
- 'polymorphic_identity': 'a',
- 'passive_deletes': True
+ "polymorphic_on": type,
+ "polymorphic_identity": "a",
+ "passive_deletes": True,
}
class B(A):
- __tablename__ = 'b'
- b_table_id = Column('b_table_id', Integer, primary_key=True)
- bid = Column('bid', Integer, ForeignKey('a.id', ondelete="CASCADE"))
- data = Column('data', String)
+ __tablename__ = "b"
+ b_table_id = Column("b_table_id", Integer, primary_key=True)
+ bid = Column("bid", Integer, ForeignKey("a.id", ondelete="CASCADE"))
+ data = Column("data", String)
- __mapper_args__ = {
- 'polymorphic_identity': 'b'
- }
+ __mapper_args__ = {"polymorphic_identity": "b"}
With the above mapping, the :paramref:`.orm.mapper.passive_deletes` option
is configured on the base mapper; it takes effect for all non-base mappers
The following mapping has always been possible without issue::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
b = relationship("B", foreign_keys="B.a_id", backref="a")
+
class A1(A):
- __tablename__ = 'a1'
+ __tablename__ = "a1"
id = Column(Integer, primary_key=True)
b = relationship("B", foreign_keys="B.a1_id", backref="a1")
- __mapper_args__ = {'concrete': True}
+ __mapper_args__ = {"concrete": True}
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
- a1_id = Column(ForeignKey('a1.id'))
+ a_id = Column(ForeignKey("a.id"))
+ a1_id = Column(ForeignKey("a1.id"))
Above, even though class ``A`` and class ``A1`` have a relationship
named ``b``, no conflict warning or error occurs because class ``A1`` is
would occur::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
class A1(A):
- __tablename__ = 'a1'
+ __tablename__ = "a1"
id = Column(Integer, primary_key=True)
- __mapper_args__ = {'concrete': True}
+ __mapper_args__ = {"concrete": True}
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
- a1_id = Column(ForeignKey('a1.id'))
+ a_id = Column(ForeignKey("a.id"))
+ a1_id = Column(ForeignKey("a1.id"))
a = relationship("A", backref="b")
a1 = relationship("A1", backref="b")
An example is as follows::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B")
class ASub(A):
- __tablename__ = 'a_sub'
- id = Column(Integer, ForeignKey('a.id'), primary_key=True)
+ __tablename__ = "a_sub"
+ id = Column(Integer, ForeignKey("a.id"), primary_key=True)
bs = relationship("B")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
-
+ a_id = Column(ForeignKey("a.id"))
This warning dates back to the 0.4 series in 2007 and is based on a version of
the unit of work code that has since been entirely rewritten. Currently, there
present in the original docstring::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
name = Column(String)
expression. That is, accessing ``A.some_name.info`` now returns the same
dictionary that you'd get from ``inspect(A).all_orm_descriptors['some_name'].info``::
- >>> A.some_name.info['foo'] = 'bar'
+ >>> A.some_name.info["foo"] = "bar"
>>> from sqlalchemy import inspect
- >>> inspect(A).all_orm_descriptors['some_name'].info
+ >>> inspect(A).all_orm_descriptors["some_name"].info
{'foo': 'bar'}
Note that this ``.info`` dictionary is **separate** from that of a mapped attribute
Given::
- u1 = User(id=7, name='x')
+ u1 = User(id=7, name="x")
u1.orders = [
- Order(description='o1', address=Address(id=1, email_address='a')),
- Order(description='o2', address=Address(id=1, email_address='b')),
- Order(description='o3', address=Address(id=1, email_address='c'))
+ Order(description="o1", address=Address(id=1, email_address="a")),
+ Order(description="o2", address=Address(id=1, email_address="b")),
+ Order(description="o3", address=Address(id=1, email_address="c")),
]
sess = Session()
deep use case that's hard to reproduce, but the general idea is as follows::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
- b_id = Column(ForeignKey('b.id'))
- c_id = Column(ForeignKey('c.id'))
+ b_id = Column(ForeignKey("b.id"))
+ c_id = Column(ForeignKey("c.id"))
b = relationship("B")
c = relationship("C")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- c_id = Column(ForeignKey('c.id'))
+ c_id = Column(ForeignKey("c.id"))
c = relationship("C")
class C(Base):
- __tablename__ = 'c'
+ __tablename__ = "c"
id = Column(Integer, primary_key=True)
- d_id = Column(ForeignKey('d.id'))
+ d_id = Column(ForeignKey("d.id"))
d = relationship("D")
class D(Base):
- __tablename__ = 'd'
+ __tablename__ = "d"
id = Column(Integer, primary_key=True)
q = s.query(A)
q = q.join(A.b).join(c_alias_1, B.c).join(c_alias_1.d)
- q = q.options(contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d))
+ q = q.options(
+ contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d)
+ )
q = q.join(c_alias_2, A.c)
q = q.options(contains_eager(A.c, alias=c_alias_2))
engine = create_engine("postgresql+psycopg2://")
+
@event.listens_for(engine, "handle_error")
def cancel_disconnect(ctx):
if isinstance(ctx.original_exception, KeyboardInterrupt):
>>> from sqlalchemy import table, column, select, literal, exists
>>> orders = table(
- ... 'orders',
- ... column('region'),
- ... column('amount'),
- ... column('product'),
- ... column('quantity')
+ ... "orders", column("region"), column("amount"), column("product"), column("quantity")
... )
>>>
>>> upsert = (
... orders.update()
- ... .where(orders.c.region == 'Region1')
- ... .values(amount=1.0, product='Product1', quantity=1)
- ... .returning(*(orders.c._all_columns)).cte('upsert'))
+ ... .where(orders.c.region == "Region1")
+ ... .values(amount=1.0, product="Product1", quantity=1)
+ ... .returning(*(orders.c._all_columns))
+ ... .cte("upsert")
+ ... )
>>>
>>> insert = orders.insert().from_select(
... orders.c.keys(),
- ... select([
- ... literal('Region1'), literal(1.0),
- ... literal('Product1'), literal(1)
- ... ]).where(~exists(upsert.select()))
+ ... select([literal("Region1"), literal(1.0), literal("Product1"), literal(1)]).where(
+ ... ~exists(upsert.select())
+ ... ),
... )
>>>
>>> print(insert) # note formatting added for clarity
>>> from sqlalchemy import func
- >>> print(func.row_number().over(order_by='x', range_=(-5, 10)))
+ >>> print(func.row_number().over(order_by="x", range_=(-5, 10)))
row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND :param_2 FOLLOWING)
- >>> print(func.row_number().over(order_by='x', rows=(None, 0)))
+ >>> print(func.row_number().over(order_by="x", rows=(None, 0)))
row_number() OVER (ORDER BY x ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
- >>> print(func.row_number().over(order_by='x', range_=(-2, None)))
+ >>> print(func.row_number().over(order_by="x", range_=(-2, None)))
row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND UNBOUNDED FOLLOWING)
:paramref:`.expression.over.range_` and :paramref:`.expression.over.rows` are specified as
selectable, e.g. lateral correlation::
>>> from sqlalchemy import table, column, select, true
- >>> people = table('people', column('people_id'), column('age'), column('name'))
- >>> books = table('books', column('book_id'), column('owner_id'))
- >>> subq = select([books.c.book_id]).\
- ... where(books.c.owner_id == people.c.people_id).lateral("book_subq")
+ >>> people = table("people", column("people_id"), column("age"), column("name"))
+ >>> books = table("books", column("book_id"), column("owner_id"))
+ >>> subq = (
+ ... select([books.c.book_id])
+ ... .where(books.c.owner_id == people.c.people_id)
+ ... .lateral("book_subq")
+ ... )
>>> print(select([people]).select_from(people.join(subq, true())))
SELECT people.people_id, people.age, people.name
FROM people JOIN LATERAL (SELECT books.book_id AS book_id
from sqlalchemy import func
- selectable = people.tablesample(
- func.bernoulli(1),
- name='alias',
- seed=func.random())
+ selectable = people.tablesample(func.bernoulli(1), name="alias", seed=func.random())
stmt = select([selectable.c.people_id])
Assuming ``people`` with a column ``people_id``, the above
*composite* primary key; previously, a table definition such as::
Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True)
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True),
)
Would have "autoincrement" semantics applied to the ``'x'`` column, only
# old way
Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True, autoincrement=False),
- Column('y', Integer, primary_key=True, autoincrement=False)
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True, autoincrement=False),
+ Column("y", Integer, primary_key=True, autoincrement=False),
)
With the new behavior, the composite primary key will not have autoincrement
# column 'y' will be SERIAL/AUTO_INCREMENT/ auto-generating
Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True, autoincrement=True)
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True, autoincrement=True),
)
In order to anticipate some potential backwards-incompatible scenarios,
have autoincrement set up; given a table such as::
Table(
- 'b', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True)
+ "b",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True),
)
An INSERT emitted with no values for this table will produce this warning::
value generator can be indicated using :class:`.FetchedValue`::
Table(
- 'b', metadata,
- Column('x', Integer, primary_key=True, server_default=FetchedValue()),
- Column('y', Integer, primary_key=True, server_default=FetchedValue())
+ "b",
+ metadata,
+ Column("x", Integer, primary_key=True, server_default=FetchedValue()),
+ Column("y", Integer, primary_key=True, server_default=FetchedValue()),
)
For the very unlikely case where a composite primary key is actually intended
specify the column with ``nullable=True``::
Table(
- 'b', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True, nullable=True)
+ "b",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True, nullable=True),
)
In a related change, the ``autoincrement`` flag may be set to True
:meth:`.ColumnOperators.isnot_distinct_from` allow the IS DISTINCT
FROM and IS NOT DISTINCT FROM sql operation::
- >>> print(column('x').is_distinct_from(None))
+ >>> print(column("x").is_distinct_from(None))
x IS DISTINCT FROM NULL
Handling is provided for NULL, True and False::
- >>> print(column('x').isnot_distinct_from(False))
+ >>> print(column("x").isnot_distinct_from(False))
x IS NOT DISTINCT FROM false
For SQLite, which doesn't have this operator, "IS" / "IS NOT" is rendered,
which on SQLite works for NULL unlike other backends::
>>> from sqlalchemy.dialects import sqlite
- >>> print(column('x').is_distinct_from(None).compile(dialect=sqlite.dialect()))
+ >>> print(column("x").is_distinct_from(None).compile(dialect=sqlite.dialect()))
x IS NOT NULL
.. _change_1957:
from sqlalchemy import text
- stmt = text("SELECT users.id, addresses.id, users.id, "
- "users.name, addresses.email_address AS email "
- "FROM users JOIN addresses ON users.id=addresses.user_id "
- "WHERE users.id = 1").columns(
- User.id,
- Address.id,
- Address.user_id,
- User.name,
- Address.email_address
- )
-
- query = session.query(User).from_statement(stmt).\
- options(contains_eager(User.addresses))
+
+ stmt = text(
+ "SELECT users.id, addresses.id, users.id, "
+ "users.name, addresses.email_address AS email "
+ "FROM users JOIN addresses ON users.id=addresses.user_id "
+ "WHERE users.id = 1"
+ ).columns(User.id, Address.id, Address.user_id, User.name, Address.email_address)
+
+ query = session.query(User).from_statement(stmt).options(contains_eager(User.addresses))
result = query.all()
Above, the textual SQL contains the column "id" three times, which would
to rely upon "positional" matching more fully for compiled SQL constructs
as well. Given a statement like the following::
- ua = users.alias('ua')
+ ua = users.alias("ua")
stmt = select([users.c.user_id, ua.c.user_id])
The above statement will compile to::
ua_id = row[ua.c.user_id]
# this still raises, however
- user_id = row['user_id']
+ user_id = row["user_id"]
Much less likely to get an "ambiguous column" error message
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
three = 3
- t = Table(
- 'data', MetaData(),
- Column('value', Enum(MyEnum))
- )
+ t = Table("data", MetaData(), Column("value", Enum(MyEnum)))
e = create_engine("sqlite://")
t.create(e)
>>> from sqlalchemy import Table, MetaData, Column, Enum, create_engine
>>> t = Table(
- ... 'data', MetaData(),
- ... Column('value', Enum("one", "two", "three", validate_strings=True))
+ ... "data",
+ ... MetaData(),
+ ... Column("value", Enum("one", "two", "three", validate_strings=True)),
... )
>>> e = create_engine("sqlite://")
>>> t.create(e)
>>> from sqlalchemy import create_engine
>>> import random
- >>> e = create_engine("sqlite://", echo='debug')
- >>> some_value = ''.join(chr(random.randint(52, 85)) for i in range(5000))
+ >>> e = create_engine("sqlite://", echo="debug")
+ >>> some_value = "".join(chr(random.randint(52, 85)) for i in range(5000))
>>> row = e.execute("select ?", [some_value]).first()
... (lines are wrapped for clarity) ...
2016-02-17 13:23:03,027 INFO sqlalchemy.engine.base.Engine select ?
json_value = Column(JSON(none_as_null=False), default="some default")
+
# would insert "some default" instead of "'null'",
# now will insert "'null'"
obj = MyObject(json_value=None)
some_other_value = Column(String(50))
json_value = Column(JSON(none_as_null=False))
+
# would result in NULL for some_other_value,
# but json "'null'" for json_value. Now results in NULL for both
# (the json_value is omitted from the INSERT)
# would insert SQL NULL and/or trigger defaults,
# now inserts "'null'"
- session.bulk_insert_mappings(
- MyObject,
- [{"json_value": None}])
+ session.bulk_insert_mappings(MyObject, [{"json_value": None}])
The :class:`_types.JSON` type now implements the
:attr:`.TypeEngine.should_evaluate_none` flag,
PostgreSQL**, however it can be used directly, supporting special array
use cases such as indexed access, as well as support for the ANY and ALL::
- mytable = Table("mytable", metadata,
- Column("data", ARRAY(Integer, dimensions=2))
- )
+ mytable = Table("mytable", metadata, Column("data", ARRAY(Integer, dimensions=2)))
expr = mytable.c.data[5][6]
subq = select([mytable.c.value])
select([mytable]).where(12 > any_(subq))
-
:ticket:`3516`
.. _change_3132:
which is now available using :class:`_functions.array_agg`::
from sqlalchemy import func
+
stmt = select([func.array_agg(table.c.value)])
A PostgreSQL element for an aggregate ORDER BY is also added via
:class:`_postgresql.aggregate_order_by`::
from sqlalchemy.dialects.postgresql import aggregate_order_by
+
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select([expr])
ensure the :class:`_postgresql.ARRAY` type::
from sqlalchemy.dialects.postgresql import array_agg
- stmt = select([array_agg(table.c.value).contains('foo')])
+ stmt = select([array_agg(table.c.value).contains("foo")])
Additionally, functions like ``percentile_cont()``, ``percentile_disc()``,
``rank()``, ``dense_rank()`` and others that require an ordering via
:meth:`.FunctionElement.within_group` modifier::
from sqlalchemy import func
- stmt = select([
- department.c.id,
- func.percentile_cont(0.5).within_group(
- department.c.salary.desc()
- )
- ])
+
+ stmt = select(
+ [
+ department.c.id,
+ func.percentile_cont(0.5).within_group(department.c.salary.desc()),
+ ]
+ )
The above statement would produce SQL similar to::
# old way
class MyEnum(TypeDecorator, SchemaType):
- impl = postgresql.ENUM('one', 'two', 'three', name='myenum')
+ impl = postgresql.ENUM("one", "two", "three", name="myenum")
def _set_table(self, table):
self.impl._set_table(table)
# new way
class MyEnum(TypeDecorator):
- impl = postgresql.ENUM('one', 'two', 'three', name='myenum')
-
+ impl = postgresql.ENUM("one", "two", "three", name="myenum")
:ticket:`2919`
For example, if the ``User`` class were assigned the schema "per_user"::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
- __table_args__ = {'schema': 'per_user'}
+ __table_args__ = {"schema": "per_user"}
On each request, the :class:`.Session` can be set up to refer to a
different schema each time::
session = Session()
- session.connection(execution_options={
- "schema_translate_map": {"per_user": "account_one"}})
+ session.connection(
+ execution_options={"schema_translate_map": {"per_user": "account_one"}}
+ )
# will query from the ``account_one.user`` table
session.query(User).get(5)
table to an integer "id" column on the other::
class Person(Base):
- __tablename__ = 'person'
+ __tablename__ = "person"
id = Column(StringAsInt, primary_key=True)
pets = relationship(
- 'Pets',
+ "Pets",
primaryjoin=(
- 'foreign(Pets.person_id)'
- '==cast(type_coerce(Person.id, Integer), Integer)'
- )
+ "foreign(Pets.person_id)" "==cast(type_coerce(Person.id, Integer), Integer)"
+ ),
)
+
class Pets(Base):
- __tablename__ = 'pets'
- id = Column('id', Integer, primary_key=True)
- person_id = Column('person_id', Integer)
+ __tablename__ = "pets"
+ id = Column("id", Integer, primary_key=True)
+ person_id = Column("person_id", Integer)
Above, in the :paramref:`_orm.relationship.primaryjoin` expression, we are
using :func:`.type_coerce` to handle bound parameters passed via
class MyObject(Base):
# ...
- json_value = Column(
- JSON(none_as_null=False), nullable=False, default=JSON.NULL)
+ json_value = Column(JSON(none_as_null=False), nullable=False, default=JSON.NULL)
Or, ensure the value is present on the object::
# default=None is the same as omitting it entirely, does not apply JSON NULL
json_value = Column(JSON(none_as_null=False), nullable=False, default=None)
-
.. seealso::
:ref:`change_3514`
A query such as the following will now augment only those columns
that are missing from the SELECT list, without duplicates::
- q = session.query(User.id, User.name.label('name')).\
- distinct().\
- order_by(User.id, User.name, User.fullname)
+ q = (
+ session.query(User.id, User.name.label("name"))
+ .distinct()
+ .order_by(User.id, User.name, User.fullname)
+ )
Produces::
last defined validator::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String)
def _validate_data_two(self):
assert "y" in data
+
configure_mappers()
Will raise::
>>> from sqlalchemy.schema import MetaData, Table, Column, CreateTable
>>> from sqlalchemy.types import String
- >>> t = Table('t', MetaData(), Column('x', String(), server_default="hi ' there"))
+ >>> t = Table("t", MetaData(), Column("x", String(), server_default="hi ' there"))
>>> print(CreateTable(t))
CREATE TABLE t (
one less dimension. Given a column with type ``ARRAY(Integer, dimensions=3)``,
we can now perform this expression::
- int_expr = col[5][6][7] # returns an Integer expression object
+ int_expr = col[5][6][7] # returns an Integer expression object
Previously, the indexed access to ``col[5]`` would return an expression of
type :class:`.Integer` where we could no longer perform indexed access
the :class:`_postgresql.ARRAY` type, this means that it is now straightforward
to produce JSON expressions with multiple levels of indexed access::
- json_expr = json_col['key1']['attr1'][5]
+ json_expr = json_col["key1"]["attr1"][5]
* The "textual" type that is returned by indexed access of :class:`.HSTORE`
as well as the "textual" type that is returned by indexed access of
This means that in most cases, an application that was doing this::
- expr = json_col['somekey'].cast(Integer)
+ expr = json_col["somekey"].cast(Integer)
Will now need to change to this::
- expr = json_col['somekey'].astext.cast(Integer)
-
+ expr = json_col["somekey"].astext.cast(Integer)
.. _change_2729:
as expected::
enum = Enum(
- 'manager', 'place_admin', 'carwash_admin',
- 'parking_admin', 'service_admin', 'tire_admin',
- 'mechanic', 'carwasher', 'tire_mechanic', name="work_place_roles")
+ "manager",
+ "place_admin",
+ "carwash_admin",
+ "parking_admin",
+ "service_admin",
+ "tire_admin",
+ "mechanic",
+ "carwasher",
+ "tire_mechanic",
+ name="work_place_roles",
+ )
+
class WorkPlacement(Base):
- __tablename__ = 'work_placement'
+ __tablename__ = "work_placement"
id = Column(Integer, primary_key=True)
roles = Column(ARRAY(enum))
allows specification of which sub-types of views should be returned::
from sqlalchemy import inspect
+
insp = inspect(engine)
- plain_views = insp.get_view_names(include='plain')
- all_views = insp.get_view_names(include=('plain', 'materialized'))
+ plain_views = insp.get_view_names(include="plain")
+ all_views = insp.get_view_names(include=("plain", "materialized"))
:ticket:`3588`
parameters::
connection = engine.connect()
- connection = connection.execution_options(
- isolation_level="AUTOCOMMIT"
- )
+ connection = connection.execution_options(isolation_level="AUTOCOMMIT")
The isolation level makes use of the various "autocommit" attributes
provided by most MySQL DBAPIs.
not the first column, e.g.::
t = Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True, autoincrement=False),
- Column('y', Integer, primary_key=True, autoincrement=True),
- mysql_engine='InnoDB'
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True, autoincrement=False),
+ Column("y", Integer, primary_key=True, autoincrement=True),
+ mysql_engine="InnoDB",
)
DDL such as the following would be generated::
(along with a KEY for the autoincrement column as required by MySQL), e.g.::
t = Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True, autoincrement=True),
- PrimaryKeyConstraint('x', 'y'),
- UniqueConstraint('y'),
- mysql_engine='InnoDB'
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True, autoincrement=True),
+ PrimaryKeyConstraint("x", "y"),
+ UniqueConstraint("y"),
+ mysql_engine="InnoDB",
)
Along with the change :ref:`change_3216`, composite primary keys with
directives are no longer needed::
t = Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True, autoincrement=True),
- mysql_engine='InnoDB'
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True, autoincrement=True),
+ mysql_engine="InnoDB",
)
-
-
Dialect Improvements and Changes - SQLite
=========================================
``SNAPSHOT``::
engine = create_engine(
- "mssql+pyodbc://scott:tiger@ms_2008",
- isolation_level="REPEATABLE READ"
+ "mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ"
)
.. seealso::
copy the "length" parameter as the value ``"max"``::
>>> from sqlalchemy import create_engine, inspect
- >>> engine = create_engine('mssql+pyodbc://scott:tiger@ms_2008', echo=True)
+ >>> engine = create_engine("mssql+pyodbc://scott:tiger@ms_2008", echo=True)
>>> engine.execute("create table s (x varchar(max), y varbinary(max))")
>>> insp = inspect(engine)
>>> for col in insp.get_columns("s"):
- ... print(col['type'].__class__, col['type'].length)
- ...
+ ... print(col["type"].__class__, col["type"].length)
<class 'sqlalchemy.sql.sqltypes.VARCHAR'> max
<class 'sqlalchemy.dialects.mssql.base.VARBINARY'> max
out as None, so that the type objects work in non-SQL Server contexts::
>>> for col in insp.get_columns("s"):
- ... print(col['type'].__class__, col['type'].length)
- ...
+ ... print(col["type"].__class__, col["type"].length)
<class 'sqlalchemy.sql.sqltypes.VARCHAR'> None
<class 'sqlalchemy.dialects.mssql.base.VARBINARY'> None
given a table such as::
account_table = Table(
- 'account', metadata,
- Column('id', Integer, primary_key=True),
- Column('info', String(100)),
- schema="customer_schema"
+ "account",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("info", String(100)),
+ schema="customer_schema",
)
The legacy mode of behavior will attempt to turn a schema-qualified table
Given a query as below::
- q = session.query(User).\
- filter(User.name.like('%ed%')).\
- options(subqueryload(User.addresses))
+ q = (
+ session.query(User)
+ .filter(User.name.like("%ed%"))
+ .options(subqueryload(User.addresses))
+ )
The SQL produced would be the query against ``User`` followed by the
subqueryload for ``User.addresses`` (note the parameters are also listed)::
With "selectin" loading, we instead get a SELECT that refers to the
actual primary key values loaded in the parent query::
- q = session.query(User).\
- filter(User.name.like('%ed%')).\
- options(selectinload(User.addresses))
+ q = (
+ session.query(User)
+ .filter(User.name.like("%ed%"))
+ .options(selectinload(User.addresses))
+ )
Produces::
from sqlalchemy.orm import query_expression
from sqlalchemy.orm import with_expression
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
x = Column(Integer)
y = Column(Integer)
# will be None normally...
expr = query_expression()
+
# but let's give it x + y
- a1 = session.query(A).options(
- with_expression(A.expr, A.x + A.y)).first()
+ a1 = session.query(A).options(with_expression(A.expr, A.x + A.y)).first()
print(a1.expr)
.. seealso::
a FROM clause (or equivalent, depending on backend)
against ``SomeOtherEntity``::
- query(SomeEntity).\
- filter(SomeEntity.id==SomeOtherEntity.id).\
- filter(SomeOtherEntity.foo=='bar').\
- delete()
+ query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter(
+ SomeOtherEntity.foo == "bar"
+ ).delete()
.. seealso::
@hybrid.hybrid_property
def name(self):
- return self.first_name + ' ' + self.last_name
+ return self.first_name + " " + self.last_name
@name.expression
def name(cls):
- return func.concat(cls.first_name, ' ', cls.last_name)
+ return func.concat(cls.first_name, " ", cls.last_name)
@name.update_expression
def name(cls, value):
- f, l = value.split(' ', 1)
+ f, l = value.split(" ", 1)
return [(cls.first_name, f), (cls.last_name, l)]
Above, an UPDATE can be rendered using::
- session.query(Person).filter(Person.id == 5).update(
- {Person.name: "Dr. No"})
+ session.query(Person).filter(Person.id == 5).update({Person.name: "Dr. No"})
Similar functionality is available for composites, where composite values
will be broken out into their individual columns for bulk UPDATE::
session.query(Vertex).update({Edge.start: Point(3, 4)})
-
.. seealso::
:ref:`hybrid_bulk_update`
def name(self, value):
self.first_name = value
+
class FirstNameLastName(FirstNameOnly):
# ...
@FirstNameOnly.name.getter
def name(self):
- return self.first_name + ' ' + self.last_name
+ return self.first_name + " " + self.last_name
@name.setter
def name(self, value):
- self.first_name, self.last_name = value.split(' ', maxsplit=1)
+ self.first_name, self.last_name = value.split(" ", maxsplit=1)
@name.expression
def name(cls):
- return func.concat(cls.first_name, ' ', cls.last_name)
+ return func.concat(cls.first_name, " ", cls.last_name)
Above, the ``FirstNameOnly.name`` hybrid is referenced by the
``FirstNameLastName`` subclass in order to repurpose it specifically to the
def _set_name(self, value):
self.first_name = value
+
class FirstNameOnly(Base):
@hybrid_property
def name(self):
from sqlalchemy.orm.attributes import OP_BULK_REPLACE
+
@event.listens_for(SomeObject.collection, "bulk_replace")
def process_collection(target, values, initiator):
values[:] = [_make_value(value) for value in values]
+
@event.listens_for(SomeObject.collection, "append", retval=True)
def process_collection(target, value, initiator):
# make sure bulk_replace didn't already do it
else:
return value
-
:ticket:`3896`
.. _change_3303:
Base = declarative_base()
+
class MyDataClass(Base):
- __tablename__ = 'my_data'
+ __tablename__ = "my_data"
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSONEncodedDict))
+
@event.listens_for(MyDataClass.data, "modified")
def modified_json(instance):
print("json value modified:", instance.data)
model = session.query(MyModel).first()
model.json_set &= {1, 3}
-
:ticket:`3853`
.. _change_3769:
itself an association proxy onto ``B``::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
b_values = association_proxy("atob", "b_value")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
value = Column(String)
c = relationship("C")
class C(Base):
- __tablename__ = 'c'
+ __tablename__ = "c"
id = Column(Integer, primary_key=True)
- b_id = Column(ForeignKey('b.id'))
+ b_id = Column(ForeignKey("b.id"))
value = Column(String)
class AtoB(Base):
- __tablename__ = 'atob'
+ __tablename__ = "atob"
- a_id = Column(ForeignKey('a.id'), primary_key=True)
- b_id = Column(ForeignKey('b.id'), primary_key=True)
+ a_id = Column(ForeignKey("a.id"), primary_key=True)
+ b_id = Column(ForeignKey("b.id"), primary_key=True)
a = relationship("A", backref="atob")
b = relationship("B", backref="atob")
.. sourcecode:: pycon+sql
- >>> s.query(A).filter(A.b_values.contains('hi')).all()
+ >>> s.query(A).filter(A.b_values.contains("hi")).all()
{opensql}SELECT a.id AS a_id
FROM a
WHERE EXISTS (SELECT 1
.. sourcecode:: pycon+sql
- >>> s.query(A).filter(A.c_values.any(value='x')).all()
+ >>> s.query(A).filter(A.c_values.any(value="x")).all()
{opensql}SELECT a.id AS a_id
FROM a
WHERE EXISTS (SELECT 1
field tracks this difference so that the two objects can co-exist in the
same identity map::
- tokyo = WeatherLocation('Asia', 'Tokyo')
- newyork = WeatherLocation('North America', 'New York')
+ tokyo = WeatherLocation("Asia", "Tokyo")
+ newyork = WeatherLocation("North America", "New York")
tokyo.reports.append(Report(80.0))
newyork.reports.append(Report(75))
newyork_report = newyork.reports[0]
tokyo_report = tokyo.reports[0]
- assert inspect(newyork_report).identity_key == (Report, (1, ), "north_america")
- assert inspect(tokyo_report).identity_key == (Report, (1, ), "asia")
+ assert inspect(newyork_report).identity_key == (Report, (1,), "north_america")
+ assert inspect(tokyo_report).identity_key == (Report, (1,), "asia")
# the token representing the originating shard is also available directly
assert inspect(newyork_report).identity_token == "north_america"
assert inspect(tokyo_report).identity_token == "asia"
-
:ticket:`4137`
New Features and Improvements - Core
from sqlalchemy import Boolean
from sqlalchemy import TypeDecorator
+
class LiberalBoolean(TypeDecorator):
impl = Boolean
value = bool(int(value))
return value
-
:ticket:`4102`
.. _change_3919:
have the effect of more parenthesization being generated when comparison
operators are combined together, such as::
- (column('q') == null()) != (column('y') == null())
+ (column("q") == null()) != (column("y") == null())
Will now generate ``(q IS NULL) != (y IS NULL)`` rather than
``q IS NULL != y IS NULL``.
:paramref:`_schema.Column.comment` arguments::
Table(
- 'my_table', metadata,
- Column('q', Integer, comment="the Q value"),
- comment="my Q table"
+ "my_table",
+ metadata,
+ Column("q", Integer, comment="the Q value"),
+ comment="my Q table",
)
Above, DDL will be rendered appropriately upon table create to associate
Given a statement as::
- stmt = users.delete().\
- where(users.c.id == addresses.c.id).\
- where(addresses.c.email_address.startswith('ed%'))
+ stmt = (
+ users.delete()
+ .where(users.c.id == addresses.c.id)
+ .where(addresses.c.email_address.startswith("ed%"))
+ )
conn.execute(stmt)
The resulting SQL from the above statement on a PostgreSQL backend
An expression such as::
- >>> column('x').startswith('total%score', autoescape=True)
+ >>> column("x").startswith("total%score", autoescape=True)
Renders as::
Similarly, an expression that has backslashes::
- >>> column('x').startswith('total/score', autoescape=True)
+ >>> column("x").startswith("total/score", autoescape=True)
Will render the same way, with the value of the parameter "x_1" as
``'total//score'``.
float_value = connection.scalar(
- select([literal(4.56)]) # the "BindParameter" will now be
- # Float, not Numeric(asdecimal=True)
+ select([literal(4.56)]) # the "BindParameter" will now be
+ # Float, not Numeric(asdecimal=True)
)
* Math operations between :class:`.Numeric`, :class:`.Float`, and
as well as if the type should be :class:`.Float`::
# asdecimal flag is maintained
- expr = column('a', Integer) * column('b', Numeric(asdecimal=False))
+ expr = column("a", Integer) * column("b", Numeric(asdecimal=False))
assert expr.type.asdecimal == False
# Float subclass of Numeric is maintained
- expr = column('a', Integer) * column('b', Float())
+ expr = column("a", Integer) * column("b", Float())
assert isinstance(expr.type, Float)
* The :class:`.Float` datatype will apply the ``float()`` processor to
are named in the documentation now::
>>> from sqlalchemy import select, table, column, func, tuple_
- >>> t = table('t',
- ... column('value'), column('x'),
- ... column('y'), column('z'), column('q'))
+ >>> t = table("t", column("value"), column("x"), column("y"), column("z"), column("q"))
>>> stmt = select([func.sum(t.c.value)]).group_by(
... func.grouping_sets(
... tuple_(t.c.x, t.c.y),
def mydefault(context):
- return context.get_current_parameters()['counter'] + 12
+ return context.get_current_parameters()["counter"] + 12
- mytable = Table('mytable', metadata_obj,
- Column('counter', Integer),
- Column('counter_plus_twelve',
- Integer, default=mydefault, onupdate=mydefault)
+
+ mytable = Table(
+ "mytable",
+ metadata_obj,
+ Column("counter", Integer),
+ Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault),
)
- stmt = mytable.insert().values(
- [{"counter": 5}, {"counter": 18}, {"counter": 20}])
+ stmt = mytable.insert().values([{"counter": 5}, {"counter": 18}, {"counter": 20}])
conn.execute(stmt)
sess = Session()
- user = sess.query(User).filter_by(name='x').first()
+ user = sess.query(User).filter_by(name="x").first()
+
@event.listens_for(sess, "after_rollback")
def after_rollback(session):
# to emit a lazy load.
print("user name: %s" % user.name)
+
@event.listens_for(sess, "after_commit")
def after_commit(session):
# 'user.name' is present, assuming it was already
# loaded. this is the existing behavior.
print("user name: %s" % user.name)
+
if should_rollback:
sess.rollback()
else:
the previous collection, a side effect of this was that the collection
being replaced would also be mutated, which is misleading and unnecessary::
- >>> a1, a2, a3 = Address('a1'), Address('a2'), Address('a3')
+ >>> a1, a2, a3 = Address("a1"), Address("a2"), Address("a3")
>>> user.addresses = [a1, a2]
>>> previous_collection = user.addresses
Given a mapping as::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B")
- @validates('bs')
+ @validates("bs")
def convert_dict_to_b(self, key, value):
- return B(data=value['data'])
+ return B(data=value["data"])
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
data = Column(String)
Above, we could use the validator as follows, to convert from an incoming
class A(Base):
# ...
- @validates('bs')
+ @validates("bs")
def validate_b(self, key, value):
assert value.data is not None
return value
An exception is now raised if the :func:`.attributes.flag_modified` function
is used to mark an attribute as modified that isn't actually loaded::
- a1 = A(data='adf')
+ a1 = A(data="adf")
s.add(a1)
s.flush()
# expire, similarly as though we said s.commit()
- s.expire(a1, 'data')
+ s.expire(a1, "data")
# will raise InvalidRequestError
- attributes.flag_modified(a1, 'data')
+ attributes.flag_modified(a1, "data")
This because the flush process will most likely fail in any case if the
attribute remains un-present by the time flush occurs. To mark an object
A very old and undocumented keyword argument ``scope`` has been removed::
from sqlalchemy.orm import scoped_session
+
Session = scoped_session(sessionmaker())
session = Session(scope=None)
overwrite it::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
- favorite_b_id = Column(ForeignKey('b.id', name="favorite_b_fk"))
+ favorite_b_id = Column(ForeignKey("b.id", name="favorite_b_fk"))
bs = relationship("B", primaryjoin="A.id == B.a_id")
favorite_b = relationship(
- "B", primaryjoin="A.favorite_b_id == B.id", post_update=True)
+ "B", primaryjoin="A.favorite_b_id == B.id", post_update=True
+ )
updated = Column(Integer, onupdate=my_onupdate_function)
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id', name="a_fk"))
+ a_id = Column(ForeignKey("a.id", name="a_fk"))
+
a1 = A()
b1 = B()
Given a mapping::
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = Column(Integer, primary_key=True)
version_id = Column(Integer, default=0)
- parent_id = Column(ForeignKey('node.id'))
- favorite_node_id = Column(ForeignKey('node.id'))
+ parent_id = Column(ForeignKey("node.id"))
+ favorite_node_id = Column(ForeignKey("node.id"))
nodes = relationship("Node", primaryjoin=remote(parent_id) == id)
favorite_node = relationship(
- "Node", primaryjoin=favorite_node_id == remote(id),
- post_update=True
+ "Node", primaryjoin=favorite_node_id == remote(id), post_update=True
)
- __mapper_args__ = {
- 'version_id_col': version_id
- }
+ __mapper_args__ = {"version_id_col": version_id}
An UPDATE of a node that associates another node as "favorite" will
now increment the version counter as well as match the current version::
a result with no return type (assume ``-%>`` is some special operator
supported by the database)::
- >>> column('x', types.DateTime).op('-%>')(None).type
+ >>> column("x", types.DateTime).op("-%>")(None).type
NullType()
Other types would use the default behavior of using the left-hand type
as the return type::
- >>> column('x', types.String(50)).op('-%>')(None).type
+ >>> column("x", types.String(50)).op("-%>")(None).type
String(length=50)
These behaviors were mostly by accident, so the behavior has been made
consistent with the second form, that is the default return type is the
same as the left-hand expression::
- >>> column('x', types.DateTime).op('-%>')(None).type
+ >>> column("x", types.DateTime).op("-%>")(None).type
DateTime()
As most user-defined operators tend to be "comparison" operators, often
its documented behavior of allowing the return type to be :class:`.Boolean`
in all cases, including for :class:`_types.ARRAY` and :class:`_types.JSON`::
- >>> column('x', types.String(50)).op('-%>', is_comparison=True)(None).type
+ >>> column("x", types.String(50)).op("-%>", is_comparison=True)(None).type
Boolean()
- >>> column('x', types.ARRAY(types.Integer)).op('-%>', is_comparison=True)(None).type
+ >>> column("x", types.ARRAY(types.Integer)).op("-%>", is_comparison=True)(None).type
Boolean()
- >>> column('x', types.JSON()).op('-%>', is_comparison=True)(None).type
+ >>> column("x", types.JSON()).op("-%>", is_comparison=True)(None).type
Boolean()
To assist with boolean comparison operators, a new shorthand method
:meth:`.Operators.bool_op` has been added. This method should be preferred
for on-the-fly boolean operators::
- >>> print(column('x', types.Integer).bool_op('-%>')(5))
+ >>> print(column("x", types.Integer).bool_op("-%>")(5))
x -%> :x_1
construct that stated a single percent sign::
>>> from sqlalchemy import literal_column
- >>> print(literal_column('some%symbol'))
+ >>> print(literal_column("some%symbol"))
some%%symbol
The percent sign is now unaffected for dialects that are not set to
as is appropriate::
>>> from sqlalchemy import literal_column
- >>> print(literal_column('some%symbol'))
+ >>> print(literal_column("some%symbol"))
some%symbol
>>> from sqlalchemy.dialects import mysql
- >>> print(literal_column('some%symbol').compile(dialect=mysql.dialect()))
+ >>> print(literal_column("some%symbol").compile(dialect=mysql.dialect()))
some%%symbol
As part of this change, the doubling that has been present when using
functions, used to supply ad-hoc column collations at the statement level,
is fixed, where a case sensitive name would not be quoted::
- stmt = select([mytable.c.x, mytable.c.y]).\
- order_by(mytable.c.somecolumn.collate("fr_FR"))
+ stmt = select([mytable.c.x, mytable.c.y]).order_by(
+ mytable.c.somecolumn.collate("fr_FR")
+ )
now renders::
``use_batch_mode`` argument on :func:`_sa.create_engine`::
engine = create_engine(
- "postgresql+psycopg2://scott:tiger@host/dbname",
- use_batch_mode=True)
+ "postgresql+psycopg2://scott:tiger@host/dbname", use_batch_mode=True
+ )
The feature is considered to be experimental for the moment but may become
on by default in a future release.
from sqlalchemy.dialects.postgresql import INTERVAL
- Table(
- 'my_table', metadata,
- Column("some_interval", INTERVAL(fields="DAY TO SECOND"))
- )
+ Table("my_table", metadata, Column("some_interval", INTERVAL(fields="DAY TO SECOND")))
Additionally, all INTERVAL datatypes can now be reflected independently
of the "fields" specifier present; the "fields" parameter in the datatype
from sqlalchemy.dialects.mysql import insert
- insert_stmt = insert(my_table). \
- values(id='some_id', data='some data to insert')
+ insert_stmt = insert(my_table).values(id="some_id", data="some data to insert")
on_conflict_stmt = insert_stmt.on_duplicate_key_update(
- data=insert_stmt.inserted.data,
- status='U'
+ data=insert_stmt.inserted.data, status="U"
)
conn.execute(on_conflict_stmt)
Previously, the foreign keys result would look like::
- [{'referred_table': u'users', 'referred_columns': [u'id'],
- 'referred_schema': None, 'name': 'USER_ID_FK',
- 'constrained_columns': [u'user_id']}]
+ [
+ {
+ "referred_table": "users",
+ "referred_columns": ["id"],
+ "referred_schema": None,
+ "name": "USER_ID_FK",
+ "constrained_columns": ["user_id"],
+ }
+ ]
Where the above could create problems particularly with Alembic autogenerate.
occurs, allowing database and/or owner names that themselves contain one
or more dots::
- Table(
- "some_table", metadata,
- Column("q", String(50)),
- schema="[MyDataBase.dbo]"
- )
+ Table("some_table", metadata, Column("q", String(50)), schema="[MyDataBase.dbo]")
The above table will consider the "owner" to be ``MyDataBase.dbo``, which
will also be quoted upon render, and the "database" as None. To individually
refer to database name and owner, use two pairs of brackets::
Table(
- "some_table", metadata,
+ "some_table",
+ metadata,
Column("q", String(50)),
- schema="[MyDataBase.SomeDB].[MyDB.owner]"
+ schema="[MyDataBase.SomeDB].[MyDB.owner]",
)
Additionally, the :class:`.quoted_name` construct is now honored when
j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id)
B_viacd = mapper(
- B, j, non_primary=True, primary_key=[j.c.b_id],
+ B,
+ j,
+ non_primary=True,
+ primary_key=[j.c.b_id],
properties={
"id": j.c.b_id, # so that 'id' looks the same as before
- "c_id": j.c.c_id, # needed for disambiguation
+ "c_id": j.c.c_id, # needed for disambiguation
"d_c_id": j.c.d_c_id, # needed for disambiguation
"b_id": [j.c.b_id, j.c.d_b_id],
"d_id": j.c.d_id,
- }
+ },
)
A.b = relationship(B_viacd, primaryjoin=A.b_id == B_viacd.c.b_id)
Given a mapping::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B", lazy="selectin")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
some_object = session.query(SomeObject).get(5)
- del some_object.some_attribute # from a SQL perspective, works like "= None"
+ del some_object.some_attribute # from a SQL perspective, works like "= None"
:ticket:`4354`
from sqlalchemy import inspect
- u1 = User(id=7, name='ed')
-
- inspect(u1).info['user_info'] = '7|ed'
+ u1 = User(id=7, name="ed")
+ inspect(u1).info["user_info"] = "7|ed"
:ticket:`4257`
Given a mapping as::
class A(Base):
- __tablename__ = 'test_a'
+ __tablename__ = "test_a"
id = Column(Integer, primary_key=True)
- ab = relationship(
- 'AB', backref='a', uselist=False)
+ ab = relationship("AB", backref="a", uselist=False)
b = association_proxy(
- 'ab', 'b', creator=lambda b: AB(b=b),
- cascade_scalar_deletes=True)
+ "ab", "b", creator=lambda b: AB(b=b), cascade_scalar_deletes=True
+ )
class B(Base):
- __tablename__ = 'test_b'
+ __tablename__ = "test_b"
id = Column(Integer, primary_key=True)
- ab = relationship('AB', backref='b', cascade='all, delete-orphan')
+ ab = relationship("AB", backref="b", cascade="all, delete-orphan")
class AB(Base):
- __tablename__ = 'test_ab'
+ __tablename__ = "test_ab"
a_id = Column(Integer, ForeignKey(A.id), primary_key=True)
b_id = Column(Integer, ForeignKey(B.id), primary_key=True)
class User(Base):
# ...
- keywords = association_proxy('kws', 'keyword')
+ keywords = association_proxy("kws", "keyword")
proxy_state = inspect(User).all_orm_descriptors["keywords"].for_class(User)
# column-based association proxy
values = association_proxy("elements", "value")
+
class Element(Base):
# ...
The ``User.values`` association proxy refers to the ``Element.value`` column.
Standard column operations are now available, such as ``like``::
- >>> print(s.query(User).filter(User.values.like('%foo%')))
+ >>> print(s.query(User).filter(User.values.like("%foo%")))
SELECT "user".id AS user_id
FROM "user"
WHERE EXISTS (SELECT 1
``equals``::
- >>> print(s.query(User).filter(User.values == 'foo'))
+ >>> print(s.query(User).filter(User.values == "foo"))
SELECT "user".id AS user_id
FROM "user"
WHERE EXISTS (SELECT 1
the association proxy used ``.contains`` as a list containment operator only.
With a column-oriented comparison, it now behaves like a "like"::
- >>> print(s.query(User).filter(User.values.contains('foo')))
+ >>> print(s.query(User).filter(User.values.contains("foo")))
SELECT "user".id AS user_id
FROM "user"
WHERE EXISTS (SELECT 1
as before, that of testing for collection membership, e.g. given a mapping::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
user_elements = relationship("UserElement")
class UserElement(Base):
- __tablename__ = 'user_element'
+ __tablename__ = "user_element"
id = Column(Integer, primary_key=True)
user_id = Column(ForeignKey("user.id"))
class Element(Base):
- __tablename__ = 'element'
+ __tablename__ = "element"
id = Column(Integer, primary_key=True)
value = Column(String)
As an example, given a mapping with association proxy::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B")
- b_data = association_proxy('bs', 'data')
+ b_data = association_proxy("bs", "data")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
data = Column(String)
- a1 = A(bs=[B(data='b1'), B(data='b2')])
+ a1 = A(bs=[B(data="b1"), B(data="b2")])
b_data = a1.b_data
The change is that the ``b_data`` collection is now maintaining a strong
reference to the ``a1`` object, so that it remains present::
- assert b_data == ['b1', 'b2']
+ assert b_data == ["b1", "b2"]
This change introduces the side effect that if an application is passing around
the collection as above, **the parent object won't be garbage collected** until
id = Column(Integer, primary_key=True)
b_rel = relationship(
- "B", collection_class=set, cascade="all, delete-orphan",
+ "B",
+ collection_class=set,
+ cascade="all, delete-orphan",
)
b = association_proxy("b_rel", "value", creator=lambda x: B(value=x))
a_id = Column(Integer, ForeignKey("test_a.id"), nullable=False)
value = Column(String)
+
# ...
s = Session(e)
# against the deleted ones.
assert len(s.new) == 1
-
:ticket:`2642`
.. _change_1103:
"swap" operation. Given a standard one-to-many/many-to-one setup::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B", backref="a")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
>>> del a1.bs[1]
>>> a1.bs # collection is unaffected so far...
[<__main__.B object at 0x7f047af5fb70>]
- >>> b1.a # however b1.a is None
+ >>> b1.a # however b1.a is None
>>>
>>> session.add(a1)
>>> session.commit() # so upon flush + expire....
one-to-one relationships, in the following situation::
class User(Base):
- __tablename__ = 'users'
+ __tablename__ = "users"
id = Column(Integer, primary_key=True)
- addresses = relationship(
- "Address",
- passive_deletes="all")
+ addresses = relationship("Address", passive_deletes="all")
+
class Address(Base):
- __tablename__ = 'addresses'
+ __tablename__ = "addresses"
id = Column(Integer, primary_key=True)
email = Column(String)
- user_id = Column(Integer, ForeignKey('users.id'))
+ user_id = Column(Integer, ForeignKey("users.id"))
user = relationship("User")
+
u1 = session.query(User).first()
address = u1.addresses[0]
u1.addresses.remove(address)
separator. Below we define a convention that will name :class:`.UniqueConstraint`
constraints with a name that joins together the names of all columns::
- metadata_obj = MetaData(naming_convention={
- "uq": "uq_%(table_name)s_%(column_0_N_name)s"
- })
+ metadata_obj = MetaData(
+ naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"}
+ )
table = Table(
- 'info', metadata_obj,
- Column('a', Integer),
- Column('b', Integer),
- Column('c', Integer),
- UniqueConstraint('a', 'b', 'c')
+ "info",
+ metadata_obj,
+ Column("a", Integer),
+ Column("b", Integer),
+ Column("c", Integer),
+ UniqueConstraint("a", "b", "c"),
)
The CREATE TABLE for the above table will render as::
constraint name would normally be generated from the table definition below::
long_names = Table(
- 'long_names', metadata_obj,
- Column('information_channel_code', Integer, key='a'),
- Column('billing_convention_name', Integer, key='b'),
- Column('product_identifier', Integer, key='c'),
- UniqueConstraint('a', 'b', 'c')
+ "long_names",
+ metadata_obj,
+ Column("information_channel_code", Integer, key="a"),
+ Column("billing_convention_name", Integer, key="b"),
+ Column("product_identifier", Integer, key="c"),
+ UniqueConstraint("a", "b", "c"),
)
The truncation logic will ensure a too-long name isn't generated for the
side::
class Venue(Base):
- __tablename__ = 'venue'
+ __tablename__ = "venue"
id = Column(Integer, primary_key=True)
name = Column(String)
descendants = relationship(
"Venue",
- primaryjoin=func.instr(
- remote(foreign(name)), name + "/"
- ).as_comparison(1, 2) == 1,
+ primaryjoin=func.instr(remote(foreign(name)), name + "/").as_comparison(1, 2)
+ == 1,
viewonly=True,
- order_by=name
+ order_by=name,
)
Above, the :paramref:`_orm.relationship.primaryjoin` of the "descendants" relationship
and a joinedload, such as::
- v1 = s.query(Venue).filter_by(name="parent1").options(
- joinedload(Venue.descendants)).one()
+ v1 = (
+ s.query(Venue)
+ .filter_by(name="parent1")
+ .options(joinedload(Venue.descendants))
+ .one()
+ )
to work as::
>>> from sqlalchemy import select, literal_column, bindparam
>>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
>>> with e.connect() as conn:
- ... conn.execute(
- ... select([literal_column('1')]).
- ... where(literal_column('1').in_(bindparam('q', expanding=True))),
- ... q=[]
- ... )
- ...
+ ... conn.execute(
+ ... select([literal_column("1")]).where(
+ ... literal_column("1").in_(bindparam("q", expanding=True))
+ ... ),
+ ... q=[],
+ ... )
SELECT 1 WHERE 1 IN (SELECT CAST(NULL AS INTEGER) WHERE 1!=1)
The feature also works for tuple-oriented IN statements, where the "empty IN"
>>> from sqlalchemy import select, literal_column, tuple_, bindparam
>>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
>>> with e.connect() as conn:
- ... conn.execute(
- ... select([literal_column('1')]).
- ... where(tuple_(50, "somestring").in_(bindparam('q', expanding=True))),
- ... q=[]
- ... )
- ...
+ ... conn.execute(
+ ... select([literal_column("1")]).where(
+ ... tuple_(50, "somestring").in_(bindparam("q", expanding=True))
+ ... ),
+ ... q=[],
+ ... )
SELECT 1 WHERE (%(param_1)s, %(param_2)s)
IN (SELECT CAST(NULL AS INTEGER), CAST(NULL AS VARCHAR) WHERE 1!=1)
from sqlalchemy import TypeDecorator, LargeBinary, func
+
class CompressedLargeBinary(TypeDecorator):
impl = LargeBinary
def column_expression(self, col):
return func.uncompress(col, type_=self)
+
MyLargeBinary = LargeBinary().with_variant(CompressedLargeBinary(), "sqlite")
The above expression will render a function within SQL when used on SQLite only::
from sqlalchemy import select, column
from sqlalchemy.dialects import sqlite
- print(select([column('x', CompressedLargeBinary)]).compile(dialect=sqlite.dialect()))
+
+ print(select([column("x", CompressedLargeBinary)]).compile(dialect=sqlite.dialect()))
will render::
Given a schema such as::
dv = Table(
- 'data_values', metadata_obj,
- Column('modulus', Integer, nullable=False),
- Column('data', String(30)),
- postgresql_partition_by='range(modulus)')
+ "data_values",
+ metadata_obj,
+ Column("modulus", Integer, nullable=False),
+ Column("data", String(30)),
+ postgresql_partition_by="range(modulus)",
+ )
sa.event.listen(
dv,
"after_create",
sa.DDL(
"CREATE TABLE data_values_4_10 PARTITION OF data_values "
- "FOR VALUES FROM (4) TO (10)")
+ "FOR VALUES FROM (4) TO (10)"
+ ),
)
The two table names ``'data_values'`` and ``'data_values_4_10'`` will come
from sqlalchemy.dialects.mysql import insert
- insert_stmt = insert(my_table).values(
- id='some_existing_id',
- data='inserted value')
+ insert_stmt = insert(my_table).values(id="some_existing_id", data="inserted value")
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
[
as several :class:`_schema.Column` -specific variants::
some_table = Table(
- 'some_table', metadata_obj,
- Column('id', Integer, primary_key=True, sqlite_on_conflict_primary_key='FAIL'),
- Column('data', Integer),
- UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE')
+ "some_table",
+ metadata_obj,
+ Column("id", Integer, primary_key=True, sqlite_on_conflict_primary_key="FAIL"),
+ Column("data", Integer),
+ UniqueConstraint("id", "data", sqlite_on_conflict="IGNORE"),
)
The above table would render in a CREATE TABLE statement as::
engine = create_engine(
"mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server",
- fast_executemany=True)
+ fast_executemany=True,
+ )
.. seealso::
on :class:`_schema.Column`::
test = Table(
- 'test', metadata_obj,
+ "test",
+ metadata_obj,
Column(
- 'id', Integer, primary_key=True, mssql_identity_start=100,
- mssql_identity_increment=10
+ "id",
+ Integer,
+ primary_key=True,
+ mssql_identity_start=100,
+ mssql_identity_increment=10,
),
- Column('name', String(20))
+ Column("name", String(20)),
)
In order to emit ``IDENTITY`` on a non-primary key column, which is a little-used
test = Table(
- 'test', metadata_obj,
- Column('id', Integer, primary_key=True, autoincrement=False),
- Column('number', Integer, autoincrement=True)
+ "test",
+ metadata_obj,
+ Column("id", Integer, primary_key=True, autoincrement=False),
+ Column("number", Integer, autoincrement=True),
)
.. seealso::
with Session(engine, future=True) as sess:
- stmt = select(User).where(
- User.name == 'sandy'
- ).join(User.addresses).where(Address.email_address.like("%gmail%"))
+ stmt = (
+ select(User)
+ .where(User.name == "sandy")
+ .join(User.addresses)
+ .where(Address.email_address.like("%gmail%"))
+ )
result = sess.execute(stmt)
Core :func:`_sql.update` and :func:`_sql.delete` can be used for bulk
operations. A bulk update like the following::
- session.query(User).filter(User.name == 'sandy').update({"password": "foobar"}, synchronize_session="fetch")
+ session.query(User).filter(User.name == "sandy").update(
+ {"password": "foobar"}, synchronize_session="fetch"
+ )
can now be achieved in :term:`2.0 style` (and indeed the above runs internally
in this way) as follows::
with Session(engine, future=True) as sess:
- stmt = update(User).where(
- User.name == 'sandy'
- ).values(password="foobar").execution_options(
- synchronize_session="fetch"
+ stmt = (
+ update(User)
+ .where(User.name == "sandy")
+ .values(password="foobar")
+ .execution_options(synchronize_session="fetch")
)
sess.execute(stmt)
is code such as the following::
stmt = select(users)
- stmt = stmt.where(stmt.c.name == 'foo')
+ stmt = stmt.where(stmt.c.name == "foo")
The above code appears intuitive and that it would generate
"SELECT * FROM users WHERE name='foo'", however veteran SQLAlchemy users will
present in the ``users.c`` collection::
stmt = select(users)
- stmt = stmt.where(stmt.selected_columns.name == 'foo')
-
+ stmt = stmt.where(stmt.selected_columns.name == "foo")
:ticket:`4617`
:meth:`_orm.Query.join`, adding JOIN criteria to the existing statement by
matching to the left entity::
- stmt = select(user_table).join(addresses_table, user_table.c.id == addresses_table.c.user_id)
+ stmt = select(user_table).join(
+ addresses_table, user_table.c.id == addresses_table.c.user_id
+ )
producing::
To alter the contents of the :attr:`_engine.URL.query` dictionary, methods
such as :meth:`_engine.URL.update_query_dict` may be used::
- >>> url.update_query_dict({"sslcert": '/path/to/crt'})
+ >>> url.update_query_dict({"sslcert": "/path/to/crt"})
postgresql://user:***@host/dbname?sslcert=%2Fpath%2Fto%2Fcrt
To upgrade code that is mutating these fields directly, a **backwards and
some_url.drivername = some_drivername
return some_url
+
def set_ssl_cert(some_url, ssl_cert):
# check for 1.4
if hasattr(some_url, "update_query_dict"):
For example::
>>> from sqlalchemy.engine import make_url
- >>> url = make_url("postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt")
+ >>> url = make_url(
+ ... "postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt"
+ ... )
>>> url.query
immutabledict({'alt_host': ('host1', 'host2'), 'sslcert': '/path/to/crt'})
from sqlalchemy.engine import CreateEnginePlugin
+
class MyPlugin(CreateEnginePlugin):
def __init__(self, url, kwargs):
# check for 1.4 style
if hasattr(CreateEnginePlugin, "update_url"):
- self.my_argument_one = url.query['my_argument_one']
- self.my_argument_two = url.query['my_argument_two']
+ self.my_argument_one = url.query["my_argument_one"]
+ self.my_argument_two = url.query["my_argument_two"]
else:
# legacy
- self.my_argument_one = url.query.pop('my_argument_one')
- self.my_argument_two = url.query.pop('my_argument_two')
+ self.my_argument_one = url.query.pop("my_argument_one")
+ self.my_argument_two = url.query.pop("my_argument_two")
- self.my_argument_three = kwargs.pop('my_argument_three', None)
+ self.my_argument_three = kwargs.pop("my_argument_three", None)
def update_url(self, url):
# this method runs in 1.4 only and should be used to consume
# plugin-specific arguments
- return url.difference_update_query(
- ["my_argument_one", "my_argument_two"]
- )
+ return url.difference_update_query(["my_argument_one", "my_argument_two"])
See the docstring at :class:`_engine.CreateEnginePlugin` for complete details
on how this class is used.
stmt = select(users_table).where(
case(
- (users_table.c.name == 'wendy', 'W'),
- (users_table.c.name == 'jack', 'J'),
- else_='E'
+ (users_table.c.name == "wendy", "W"),
+ (users_table.c.name == "jack", "J"),
+ else_="E",
)
)
address_alias = aliased(Address)
- q = session.query(User).\
- join(address_alias, User.addresses).\
- filter(Address.email_address == 'foo')
+ q = (
+ session.query(User)
+ .join(address_alias, User.addresses)
+ .filter(Address.email_address == "foo")
+ )
The above query selects from a JOIN of ``User`` and ``address_alias``, the
latter of which is an alias of the ``Address`` entity. However, the
clause to link the new ``Address`` entity with the previous ``address_alias``
entity and that will remove the warning::
- q = session.query(User).\
- join(address_alias, User.addresses).\
- filter(Address.email_address == 'foo').\
- filter(Address.id == address_alias.id) # resolve cartesian products,
- # will no longer warn
+ q = (
+ session.query(User)
+ .join(address_alias, User.addresses)
+ .filter(Address.email_address == "foo")
+ .filter(Address.id == address_alias.id)
+ ) # resolve cartesian products,
+ # will no longer warn
The cartesian product warning considers **any** kind of link between two
FROM clauses to be a resolution, even if the end result set is still
FROM clause that is completely unexpected. If the FROM clause is referred
to explicitly elsewhere and linked to the other FROMs, no warning is emitted::
- q = session.query(User).\
- join(address_alias, User.addresses).\
- filter(Address.email_address == 'foo').\
- filter(Address.id > address_alias.id) # will generate a lot of rows,
- # but no warning
+ q = (
+ session.query(User)
+ .join(address_alias, User.addresses)
+ .filter(Address.email_address == "foo")
+ .filter(Address.id > address_alias.id)
+ ) # will generate a lot of rows,
+ # but no warning
Full cartesian products are also allowed if they are explicitly stated; if we
wanted for example the cartesian product of ``User`` and ``Address``, we can
with engine.connect() as conn:
row = conn.execute(table.select().where(table.c.id == 5)).one()
-
:meth:`_engine.Result.one_or_none` - same, but also returns None for no rows
:meth:`_engine.Result.all` - returns all rows
.. sourcecode::
with engine.connect() as conn:
- # requests x, y, z
- result = conn.execute(select(table.c.x, table.c.y, table.c.z))
+ # requests x, y, z
+ result = conn.execute(select(table.c.x, table.c.y, table.c.z))
- # iterate rows as y, x
- for y, x in result.columns("y", "x"):
- print("Y: %s X: %s" % (y, x))
+ # iterate rows as y, x
+ for y, x in result.columns("y", "x"):
+ print("Y: %s X: %s" % (y, x))
:meth:`_engine.Result.scalars` - returns lists of scalar objects, from the
first column by default but can also be selected:
.. sourcecode::
with engine.connect() as conn:
- result = conn.execute(select(table.c.x, table.c.y, table.c.z))
+ result = conn.execute(select(table.c.x, table.c.y, table.c.z))
- for map_ in result.mappings():
- print("Y: %(y)s X: %(x)s" % map_)
+ for map_ in result.mappings():
+ print("Y: %(y)s X: %(x)s" % map_)
When using Core, the object returned by :meth:`_engine.Connection.execute` is
an instance of :class:`.CursorResult`, which continues to feature the same API
The biggest cross-incompatible difference is the behavior of ``__contains__``::
- "id" in row # True for a mapping, False for a named tuple
- "some name" in row # False for a mapping, True for a named tuple
+ "id" in row # True for a mapping, False for a named tuple
+ "some name" in row # False for a mapping, True for a named tuple
In 1.4, when a :class:`.LegacyRow` is returned by a Core result set, the above
``"id" in row`` comparison will continue to succeed, however a deprecation
a datetime value from SQLite, the data for the row as present in the
:class:`.RowProxy` object would previously have looked like::
- row_proxy = (1, '2019-12-31 19:56:58.272106')
+ row_proxy = (1, "2019-12-31 19:56:58.272106")
and then upon access via ``__getitem__``, the ``datetime.strptime()`` function
would be used on the fly to convert the above string date into a ``datetime``
the 2.0 transition::
>>> from sqlalchemy import column, select
- >>> c1, c2, c3, c4 = column('c1'), column('c2'), column('c3'), column('c4')
- >>> stmt = select(c1, c2, c3.label('c2'), c2, c4)
+ >>> c1, c2, c3, c4 = column("c1"), column("c2"), column("c3"), column("c4")
+ >>> stmt = select(c1, c2, c3.label("c2"), c2, c4)
>>> print(stmt)
SELECT c1, c2, c3 AS c2, c2, c4
deduplication of implicitly generated labels::
>>> from sqlalchemy import table
- >>> user = table('user', column('id'), column('name'))
+ >>> user = table("user", column("id"), column("name"))
>>> stmt = select(user.c.id, user.c.name, user.c.id).apply_labels()
>>> print(stmt)
SELECT "user".id AS user_id, "user".name AS user_name, "user".id AS id_1
For CAST against expressions that don't have a name, the previous logic is used
to generate the usual "anonymous" labels::
- >>> print(select(cast('hi there,' + foo.c.data, String)))
+ >>> print(select(cast("hi there," + foo.c.data, String)))
SELECT CAST(:data_1 + foo.data AS VARCHAR) AS anon_1
FROM foo
expression as these don't render inside of a CAST, will nonetheless make use of
the given name::
- >>> print(select(cast(('hi there,' + foo.c.data).label('hello_data'), String)))
+ >>> print(select(cast(("hi there," + foo.c.data).label("hello_data"), String)))
SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data
FROM foo
And of course as was always the case, :class:`.Label` can be applied to the
expression on the outside to apply an "AS <name>" label directly::
- >>> print(select(cast(('hi there,' + foo.c.data), String).label('hello_data')))
+ >>> print(select(cast(("hi there," + foo.c.data), String).label("hello_data")))
SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data
FROM foo
boolean = Column(Boolean(create_constraint=True))
enum = Column(Enum("a", "b", "c", create_constraint=True))
-
:ticket:`5367`
New Features - ORM
the attribute::
class Book(Base):
- __tablename__ = 'book'
+ __tablename__ = "book"
book_id = Column(Integer, primary_key=True)
title = Column(String(200), nullable=False)
summary = deferred(Column(String(2000)), raiseload=True)
excerpt = deferred(Column(Text), raiseload=True)
+
book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first()
It was originally considered that the existing :func:`.raiseload` option that
attributes. However, this would break the "wildcard" behavior of :func:`.raiseload`,
which is documented as allowing one to prevent all relationships from loading::
- session.query(Order).options(
- joinedload(Order.items), raiseload('*'))
+ session.query(Order).options(joinedload(Order.items), raiseload("*"))
Above, if we had expanded :func:`.raiseload` to accommodate for columns as
well, the wildcard would also prevent columns from loading and thus be a
row._mapping[u1] # same as row[0]
- row = (
- s.query(User.id, Address.email_address)
- .join(User.addresses)
- .first()
- )
+ row = s.query(User.id, Address.email_address).join(User.addresses).first()
row._mapping[User.id] # same as row[0]
row._mapping["id"] # same as row[0]
Session = sessionmaker(engine, future=True)
with Session() as session:
- u1 = User()
- session.add(u1)
-
- a1 = Address()
- a1.user = u1 # <--- will not add "a1" to the Session
-
+ u1 = User()
+ session.add(u1)
+ a1 = Address()
+ a1.user = u1 # <--- will not add "a1" to the Session
:ticket:`5150`
relationship, when an expired object is unexpired or an object is refreshed::
>>> a1 = session.query(A).options(joinedload(A.bs)).first()
- >>> a1.data = 'new data'
+ >>> a1.data = "new data"
>>> session.commit()
Above, the ``A`` object was loaded with a ``joinedload()`` option associated
an additional query::
>>> a1 = session.query(A).options(selectinload(A.bs)).first()
- >>> a1.data = 'new data'
+ >>> a1.data = "new data"
>>> session.commit()
>>> a1.data
SELECT a.id AS a_id, a.data AS a_data
harmful, which is when the object is merged into a session::
>>> u1 = User(id=1) # create an empty User to merge with id=1 in the database
- >>> merged1 = session.merge(u1) # value of merged1.addresses is unchanged from that of the DB
+ >>> merged1 = session.merge(
+ ... u1
+ ... ) # value of merged1.addresses is unchanged from that of the DB
- >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database
+ >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database
>>> u2.addresses
[]
>>> merged2 = session.merge(u2) # value of merged2.addresses has been emptied in the DB
>>> u1 = User()
>>> l1 = u1.addresses # new list is created, associated with the state
>>> assert u1.addresses is l1 # you get the same list each time you access it
- >>> assert "addresses" not in u1.__dict__ # but it won't go into __dict__ until it's mutated
+ >>> assert (
+ ... "addresses" not in u1.__dict__
+ ... ) # but it won't go into __dict__ until it's mutated
>>> from sqlalchemy import inspect
>>> inspect(u1).attrs.addresses.history
History(added=None, unchanged=None, deleted=None)
>>> u1.addresses
[]
# this will now fail, would pass before
- >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == {"addresses": []}
+ >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == {
+ ... "addresses": []
+ ... }
or to ensure that the collection won't require a lazy load to proceed, the
(admittedly awkward) code below will now also fail::
to be inserted has the same primary key as an object that is already present::
class Product(Base):
- __tablename__ = 'product'
+ __tablename__ = "product"
id = Column(Integer, primary_key=True)
+
session = Session(engine)
# add Product with primary key 1
# ...
# this is now an error
- addresses = relationship(
- "Address", viewonly=True, cascade="all, delete-orphan")
+ addresses = relationship("Address", viewonly=True, cascade="all, delete-orphan")
The above will raise::
s.commit()
- print(
- s.query(Manager).select_entity_from(s.query(Employee).subquery()).all()
- )
-
+ print(s.query(Manager).select_entity_from(s.query(Employee).subquery()).all())
The subquery selects both the ``Engineer`` and the ``Manager`` rows, and
even though the outer query is against ``Manager``, we get a non ``Manager``
integer primary key column of a table::
Table(
- "some_table", metadata,
- Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True)
+ "some_table",
+ metadata,
+ Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True),
)
The above :class:`.Sequence` is only used for DDL and INSERT statements if the
print(result.fetchall())
-
The goal of "2.0 deprecations mode" is that a program which runs with no
:class:`_exc.RemovedIn20Warning` warnings with "2.0 deprecations mode" turned
on is then ready to run in SQLAlchemy 2.0.
from sqlalchemy import exc
# for warnings not included in regex-based filter below, just log
- warnings.filterwarnings(
- "always", category=exc.RemovedIn20Warning
- )
+ warnings.filterwarnings("always", category=exc.RemovedIn20Warning)
# for warnings related to execute() / scalar(), raise
for msg in [
r"The (?:Executable|Engine)\.(?:execute|scalar)\(\) function",
- r"The current statement is being autocommitted using implicit "
- "autocommit,",
+ r"The current statement is being autocommitted using implicit " "autocommit,",
r"The connection.execute\(\) method in SQLAlchemy 2.0 will accept "
"parameters as a single dictionary or a single sequence of "
"dictionaries only.",
r"The Connection.connect\(\) function/method is considered legacy",
r".*DefaultGenerator.execute\(\)",
]:
- warnings.filterwarnings(
- "error", message=msg, category=exc.RemovedIn20Warning,
- )
+ warnings.filterwarnings(
+ "error",
+ message=msg,
+ category=exc.RemovedIn20Warning,
+ )
3. As each sub-category of warnings are resolved in the application, new
warnings that are caught by the "always" filter can be added to the list
conn.commit() # commit as you go
-
-
Migration to 2.0 Step Five - Use the ``future`` flag on Session
---------------------------------------------------------------
:class:`_orm.Session` may be used as a context manager::
from sqlalchemy.orm import Session
+
with Session(engine) as session:
session.add(MyObject())
session.commit()
conn = engine.connect()
# won't autocommit in 2.0
- conn.execute(some_table.insert().values(foo='bar'))
+ conn.execute(some_table.insert().values(foo="bar"))
Nor will this autocommit::
conn = engine.connect()
# won't autocommit in 2.0
- conn.execute(
- text("EXEC my_procedural_thing()").execution_options(autocommit=True)
- )
-
+ conn.execute(text("EXEC my_procedural_thing()").execution_options(autocommit=True))
**Migration to 2.0**
or the :meth:`_engine.Engine.begin` context manager::
with engine.begin() as conn:
- conn.execute(some_table.insert().values(foo='bar'))
- conn.execute(some_other_table.insert().values(bat='hoho'))
+ conn.execute(some_table.insert().values(foo="bar"))
+ conn.execute(some_other_table.insert().values(bat="hoho"))
with engine.connect() as conn:
with conn.begin():
- conn.execute(some_table.insert().values(foo='bar'))
- conn.execute(some_other_table.insert().values(bat='hoho'))
+ conn.execute(some_table.insert().values(foo="bar"))
+ conn.execute(some_other_table.insert().values(bat="hoho"))
with engine.begin() as conn:
conn.execute(text("EXEC my_procedural_thing()"))
:meth:`_future.Connection.begin`::
with engine.connect() as conn:
- conn.execute(some_table.insert().values(foo='bar'))
- conn.execute(some_other_table.insert().values(bat='hoho'))
+ conn.execute(some_table.insert().values(foo="bar"))
+ conn.execute(some_other_table.insert().values(bat="hoho"))
conn.commit()
of Core use cases, it's the pattern that is already recommended::
with engine.begin() as conn:
- conn.execute(some_table.insert().values(foo='bar'))
+ conn.execute(some_table.insert().values(foo="bar"))
For "commit as you go, or rollback instead" usage, which resembles how the
:class:`_orm.Session` is normally used today, the "future" version of
engine = create_engine(..., future=True)
with engine.connect() as conn:
- conn.execute(some_table.insert().values(foo='bar'))
+ conn.execute(some_table.insert().values(foo="bar"))
conn.commit()
conn.execute(text("some other SQL"))
metadata_obj = MetaData(bind=engine) # no longer supported
- metadata_obj.create_all() # requires Engine or Connection
+ metadata_obj.create_all() # requires Engine or Connection
metadata_obj.reflect() # requires Engine or Connection
- t = Table('t', metadata_obj, autoload=True) # use autoload_with=engine
+ t = Table("t", metadata_obj, autoload=True) # use autoload_with=engine
result = engine.execute(t.select()) # no longer supported
metadata_obj.reflect(engine)
# reflect individual table
- t = Table('t', metadata_obj, autoload_with=engine)
+ t = Table("t", metadata_obj, autoload_with=engine)
# connection level:
metadata_obj.reflect(connection)
# reflect individual table
- t = Table('t', metadata_obj, autoload_with=connection)
+ t = Table("t", metadata_obj, autoload_with=connection)
# execute SQL statements
result = conn.execute(t.select())
-
**Discussion**
with conn.begin():
result = conn.execute(stmt)
-
execute() method more strict, execution options are more prominent
-------------------------------------------------------------------------------
# positional parameters no longer supported, only named
# unless using exec_driver_sql()
- result = connection.execute(table.insert(), ('x', 'y', 'z'))
+ result = connection.execute(table.insert(), ("x", "y", "z"))
# **kwargs no longer accepted, pass a single dictionary
result = connection.execute(table.insert(), x=10, y=5)
# multiple *args no longer accepted, pass a list
result = connection.execute(
- table.insert(),
- {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}
+ table.insert(), {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}
)
-
**Migration to 2.0**
The new :meth:`_future.Connection.execute` method now accepts a subset of the
connection = engine.connect()
from sqlalchemy import text
+
result = connection.execute(text("select * from table"))
# pass a single dictionary for single statement execution
# pass a list of dictionaries for executemany
result = connection.execute(
- table.insert(),
- [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}]
+ table.insert(), [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}]
)
-
-
**Discussion**
The use of ``*args`` and ``**kwargs`` has been removed both to remove the
row = result.first() # suppose the row is (1, 2)
- "x" in row # evaluates to False, in 1.x / future=False, this would be True
+ "x" in row # evaluates to False, in 1.x / future=False, this would be True
1 in row # evaluates to True, in 1.x / future=False, this would be False
-
**Migration to 2.0**
Application code or test suites that are testing for a particular key
stmt = select(User, Address).join(User.addresses)
for row in session.execute(stmt).mappings():
- print("the user is: %s the address is: %s" % (
- row[User],
- row[Address]
- ))
+ print("the user is: %s the address is: %s" % (row[User], row[Address]))
.. seealso::
# list emits a deprecation warning
case_clause = case(
- [
- (table.c.x == 5, "five"),
- (table.c.x == 7, "seven")
- ],
- else_="neither five nor seven"
+ [(table.c.x == 5, "five"), (table.c.x == 7, "seven")],
+ else_="neither five nor seven",
)
-
**Migration to 2.0**
Only the "generative" style of :func:`_sql.select` will be supported. The list
# case conditions passed positionally
case_clause = case(
- (table.c.x == 5, "five"),
- (table.c.x == 7, "seven"),
- else_="neither five nor seven"
+ (table.c.x == 5, "five"), (table.c.x == 7, "seven"), else_="neither five nor seven"
)
**Discussion**
Examples of "structural" vs. "data" elements are as follows::
# table columns for CREATE TABLE - structural
- table = Table("table", metadata_obj, Column('x', Integer), Column('y', Integer))
+ table = Table("table", metadata_obj, Column("x", Integer), Column("y", Integer))
# columns in a SELECT statement - structural
stmt = select(table.c.x, table.c.y)
stmt = table.delete(table.c.x > 15)
# no longer supported
- stmt = table.update(
- table.c.x < 15,
- preserve_parameter_order=True
- ).values(
+ stmt = table.update(table.c.x < 15, preserve_parameter_order=True).values(
[(table.c.y, 20), (table.c.x, table.c.y + 10)]
)
stmt = table.delete().where(table.c.x > 15)
# use generative methods, ordered_values() replaces preserve_parameter_order
- stmt = table.update().where(
- table.c.x < 15,
- ).ordered_values(
- (table.c.y, 20), (table.c.x, table.c.y + 10)
+ stmt = (
+ table.update()
+ .where(
+ table.c.x < 15,
+ )
+ .ordered_values((table.c.y, 20), (table.c.x, table.c.y + 10))
)
**Discussion**
from sqlalchemy.orm import mapper
- mapper(SomeClass, some_table, properties={
- "related": relationship(SomeRelatedClass)
- })
+ mapper(SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)})
To work from a central :class:`_orm.registry` object::
mapper_reg = registry()
- mapper_reg.map_imperatively(SomeClass, some_table, properties={
- "related": relationship(SomeRelatedClass)
- })
+ mapper_reg.map_imperatively(
+ SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)}
+ )
The above :class:`_orm.registry` is also the source for declarative mappings,
and classical mappings now have access to this registry including string-based
Base = mapper_reg.generate_base()
+
class SomeRelatedClass(Base):
- __tablename__ = 'related'
+ __tablename__ = "related"
# ...
- mapper_reg.map_imperatively(SomeClass, some_table, properties={
- "related": relationship(
- "SomeRelatedClass",
- primaryjoin="SomeRelatedClass.related_id == SomeClass.id"
- )
- })
-
+ mapper_reg.map_imperatively(
+ SomeClass,
+ some_table,
+ properties={
+ "related": relationship(
+ "SomeRelatedClass",
+ primaryjoin="SomeRelatedClass.related_id == SomeClass.id",
+ )
+ },
+ )
**Discussion**
- ::
- session.execute(
- select(User)
- ).scalars().all()
+ session.execute(select(User)).scalars().all()
# or
session.scalars(select(User)).all()
* - ::
- session.query(User).\
- filter_by(name='some user').one()
+ session.query(User).filter_by(name="some user").one()
- ::
- session.execute(
- select(User).
- filter_by(name="some user")
- ).scalar_one()
+ session.execute(select(User).filter_by(name="some user")).scalar_one()
- :ref:`migration_20_unify_select`
* - ::
- session.query(User).\
- filter_by(name='some user').first()
-
+ session.query(User).filter_by(name="some user").first()
- ::
- session.scalars(
- select(User).
- filter_by(name="some user").
- limit(1)
- ).first()
+ session.scalars(select(User).filter_by(name="some user").limit(1)).first()
- :ref:`migration_20_unify_select`
* - ::
- session.query(User).options(
- joinedload(User.addresses)
- ).all()
+ session.query(User).options(joinedload(User.addresses)).all()
- ::
- session.scalars(
- select(User).
- options(
- joinedload(User.addresses)
- )
- ).unique().all()
+ session.scalars(select(User).options(joinedload(User.addresses))).unique().all()
- :ref:`joinedload_not_uniqued`
* - ::
- session.query(User).\
- join(Address).\
- filter(Address.email == 'e@sa.us').\
- all()
+ session.query(User).join(Address).filter(Address.email == "e@sa.us").all()
- ::
session.execute(
- select(User).
- join(Address).
- where(Address.email == 'e@sa.us')
+ select(User).join(Address).where(Address.email == "e@sa.us")
).scalars().all()
- :ref:`migration_20_unify_select`
* - ::
- session.query(User).from_statement(
- text("select * from users")
- ).all()
+ session.query(User).from_statement(text("select * from users")).all()
- ::
- session.scalars(
- select(User).
- from_statement(
- text("select * from users")
- )
- ).all()
+ session.scalars(select(User).from_statement(text("select * from users"))).all()
- :ref:`orm_queryguide_selecting_text`
* - ::
- session.query(User).\
- join(User.addresses).\
- options(
- contains_eager(User.addresses)
- ).\
- populate_existing().all()
+ session.query(User).join(User.addresses).options(
+ contains_eager(User.addresses)
+ ).populate_existing().all()
- ::
session.execute(
- select(User).
- join(User.addresses).
- options(contains_eager(User.addresses)).
- execution_options(populate_existing=True)
+ select(User)
+ .join(User.addresses)
+ .options(contains_eager(User.addresses))
+ .execution_options(populate_existing=True)
).scalars().all()
-
*
- ::
- session.query(User).\
- filter(User.name == 'foo').\
- update(
- {"fullname": "Foo Bar"},
- synchronize_session="evaluate"
- )
-
+ session.query(User).filter(User.name == "foo").update(
+ {"fullname": "Foo Bar"}, synchronize_session="evaluate"
+ )
- ::
session.execute(
- update(User).
- where(User.name == 'foo').
- values(fullname="Foo Bar").
- execution_options(synchronize_session="evaluate")
+ update(User)
+ .where(User.name == "foo")
+ .values(fullname="Foo Bar")
+ .execution_options(synchronize_session="evaluate")
)
- :ref:`orm_expression_update_delete`
# string use removed
q = session.query(Address).filter(with_parent(u1, "addresses"))
-
**Migration to 2.0**
Modern SQLAlchemy 1.x versions support the recommended technique which
# chaining removed
q = session.query(User).join("orders", "items", "keywords")
-
**Migration to 2.0**
Use individual calls to :meth:`_orm.Query.join` for 1.x /2.0 cross compatible
n1 = aliased(Node)
n2 = aliased(Node)
- q = select(Node).join(Node.children.of_type(n1)).\
- where(n1.name == "some sub child").\
- join(n1.children.of_type(n2)).\
- where(n2.name == "some sub child")
-
+ q = (
+ select(Node)
+ .join(Node.children.of_type(n1))
+ .where(n1.name == "some sub child")
+ .join(n1.children.of_type(n2))
+ .where(n2.name == "some sub child")
+ )
**Discussion**
# 1.xx code
- result = session.query(User).join(User.addresses).\
- distinct().order_by(Address.email_address).all()
+ result = (
+ session.query(User)
+ .join(User.addresses)
+ .distinct()
+ .order_by(Address.email_address)
+ .all()
+ )
In version 2.0, the "email_address" column will not be automatically added
to the columns clause, and the above query will fail, since relational
# 1.4 / 2.0 code
- stmt = select(User, Address.email_address).join(User.addresses).\
- distinct().order_by(Address.email_address)
+ stmt = (
+ select(User, Address.email_address)
+ .join(User.addresses)
+ .distinct()
+ .order_by(Address.email_address)
+ )
result = session.execute(stmt).columns(User).all()
The :meth:`_orm.Query.from_self` method will be removed from :class:`_orm.Query`::
# from_self is removed
- q = session.query(User, Address.email_address).\
- join(User.addresses).\
- from_self(User).order_by(Address.email_address)
-
+ q = (
+ session.query(User, Address.email_address)
+ .join(User.addresses)
+ .from_self(User)
+ .order_by(Address.email_address)
+ )
**Migration to 2.0**
from sqlalchemy.orm import aliased
- subq = session.query(User, Address.email_address).\
- join(User.addresses).subquery()
+ subq = session.query(User, Address.email_address).join(User.addresses).subquery()
ua = aliased(User, subq)
from sqlalchemy.orm import aliased
- subq = select(User, Address.email_address).\
- join(User.addresses).subquery()
+ subq = select(User, Address.email_address).join(User.addresses).subquery()
ua = aliased(User, subq)
result = session.execute(stmt)
-
**Discussion**
The :meth:`_query.Query.from_self` method is a very complicated method that is rarely
# 1.4 / 2.0 code
- subq = select(User, Address).\
- join(User.addresses).subquery()
+ subq = select(User, Address).join(User.addresses).subquery()
ua = aliased(User, subq)
aa = aliased(Address, subq)
# In the new API, uniquing is available but not implicitly
# enabled
- result = session.execute(
- select(User).options(joinedload(User.addresses))
- )
+ result = session.execute(select(User).options(joinedload(User.addresses)))
# this actually will raise an error to let the user know that
# uniquing should be applied
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
posts = relationship(Post, lazy="dynamic")
+
jack = session.get(User, 5)
# filter Jack's blog posts
- posts = session.scalars(
- jack.posts.statement.where(Post.headline == "this is a post")
- )
+ posts = session.scalars(jack.posts.statement.where(Post.headline == "this is a post"))
* Use the :func:`_orm.with_parent` function to construct a :func:`_sql.select`
construct directly::
jack = session.get(User, 5)
posts = session.scalars(
- select(Post).
- where(with_parent(jack, User.posts)).
- where(Post.headline == "this is a post")
+ select(Post)
+ .where(with_parent(jack, User.posts))
+ .where(Post.headline == "this is a post")
)
**Discussion**
# commits, won't be supported
sess.flush()
-
**Migration to 2.0**
The main reason a :class:`_orm.Session` is used in "autocommit" mode
sess = Session(engine)
sess.begin() # begin explicitly; if not called, will autobegin
- # when database access is needed
+ # when database access is needed
sess.add(obj)
import contextlib
+
@contextlib.contextmanager
def transaction(session):
if not session.in_transaction():
else:
yield
-
The above context manager may be used in the same way the
"subtransaction" flag works, such as in the following example::
with transaction(session):
method_b(session)
+
# method_b also starts a transaction, but when
# called from method_a participates in the ongoing
# transaction.
def method_b(session):
with transaction(session):
- session.add(SomeObject('bat', 'lala'))
+ session.add(SomeObject("bat", "lala"))
+
Session = sessionmaker(engine)
def method_a(session):
method_b(session)
+
def method_b(session):
- session.add(SomeObject('bat', 'lala'))
+ session.add(SomeObject("bat", "lala"))
+
Session = sessionmaker(engine)
Database via Azure Active Directory", which apparently lacks the
``system_views`` view entirely. Error catching has been extended that under
no circumstances will this method ever fail, provided database connectivity
- is present.
\ No newline at end of file
+ is present.
combinations of SQL label names and aliasing. This "wrapping" is not
appropriate for :func:`_orm.contains_eager` which has always had the
contract that the user-defined SQL statement is unmodified with the
- exception of adding the appropriate columns to be fetched.
\ No newline at end of file
+ exception of adding the appropriate columns to be fetched.
Recall from :doc:`/core/engines` that an :class:`_engine.Engine` is created via
the :func:`_sa.create_engine` call::
- engine = create_engine('mysql://scott:tiger@localhost/test')
+ engine = create_engine("mysql://scott:tiger@localhost/test")
The typical usage of :func:`_sa.create_engine` is once per particular database
URL, held globally for the lifetime of a single application process. A single
with engine.connect() as connection:
result = connection.execute(text("select username from users"))
for row in result:
- print("username:", row['username'])
+ print("username:", row["username"])
Above, the :meth:`_engine.Engine.connect` method returns a :class:`_engine.Connection`
object, and by using it in a Python context manager (e.g. the ``with:``
with connection.begin(): # open a transaction
method_b(connection)
+
# method_b also starts a transaction
def method_b(connection):
- with connection.begin(): # open a transaction - this runs in the
- # context of method_a's transaction
+ with connection.begin(): # open a transaction - this runs in the
+ # context of method_a's transaction
connection.execute(text("insert into mytable values ('bat', 'lala')"))
connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"})
+
# open a Connection and call method_a
with engine.connect() as conn:
method_a(conn)
def method_a(connection):
method_b(connection)
+
# method_b uses the connection and assumes the transaction
# is external
def method_b(connection):
connection.execute(text("insert into mytable values ('bat', 'lala')"))
connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"})
+
# open a Connection inside of a transaction and call method_a
with engine.begin() as conn:
method_a(conn)
import contextlib
+
@contextlib.contextmanager
def transaction(connection):
if not connection.in_transaction():
with transaction(connection): # open a transaction
method_b(connection)
+
# method_b either starts a transaction, or uses the one already
# present
def method_b(connection):
connection.execute(text("insert into mytable values ('bat', 'lala')"))
connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"})
+
# open a Connection and call method_a
with engine.connect() as conn:
method_a(conn)
import contextlib
+
def connectivity(engine):
connection = None
with connectivity():
method_b(connectivity)
+
# method_b also wants to use a connection from the context, so it
# also calls "with:", but also it actually uses the connection.
def method_b(connectivity):
connection.execute(text("insert into mytable values ('bat', 'lala')"))
connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"})
+
# create a new connection/transaction context object and call
# method_a
method_a(connectivity(engine))
eng = create_engine(
"postgresql://scott:tiger@localhost/test",
- execution_options={
- "isolation_level": "REPEATABLE READ"
- }
+ execution_options={"isolation_level": "REPEATABLE READ"},
)
With the above setting, the DBAPI connection will be set to use a
autocommit_engine = eng.execution_options(isolation_level="AUTOCOMMIT")
-
Above, the :meth:`_engine.Engine.execution_options` method creates a shallow
copy of the original :class:`_engine.Engine`. Both ``eng`` and
``autocommit_engine`` share the same dialect and connection pool. However, the
These three behaviors are illustrated in the example below::
with engine.connect() as conn:
- result = (
- conn.
- execution_options(yield_per=100).
- execute(text("select * from table"))
- )
+ result = conn.execution_options(yield_per=100).execute(text("select * from table"))
for partition in result.partitions():
# partition is an iterable that will be at most 100 items
result = engine.execute(text("select username from users"))
for row in result:
- print("username:", row['username'])
+ print("username:", row["username"])
In addition to "connectionless" execution, it is also possible
to use the :meth:`~.Executable.execute` method of
from sqlalchemy import MetaData, Table, Column, Integer
metadata_obj = MetaData()
- users_table = Table('users', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('name', String(50))
+ users_table = Table(
+ "users",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50)),
)
Explicit execution delivers the SQL text or constructed SQL expression to the
Given a table::
user_table = Table(
- 'user', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('name', String(50))
+ "user",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50)),
)
The "schema" of this :class:`_schema.Table` as defined by the
render the schema as ``user_schema_one``::
connection = engine.connect().execution_options(
- schema_translate_map={None: "user_schema_one"})
+ schema_translate_map={None: "user_schema_one"}
+ )
result = connection.execute(user_table.select())
connection = engine.connect().execution_options(
schema_translate_map={
- None: "user_schema_one", # no schema name -> "user_schema_one"
- "special": "special_schema", # schema="special" becomes "special_schema"
- "public": None # Table objects with schema="public" will render with no schema
- })
+ None: "user_schema_one", # no schema name -> "user_schema_one"
+ "special": "special_schema", # schema="special" becomes "special_schema"
+ "public": None, # Table objects with schema="public" will render with no schema
+ }
+ )
The :paramref:`.Connection.execution_options.schema_translate_map` parameter
affects all DDL and SQL constructs generated from the SQL expression language,
to the :class:`_orm.Session`. The :class:`_orm.Session` uses a new
:class:`_engine.Connection` for each transaction::
- schema_engine = engine.execution_options(schema_translate_map = { ... } )
+ schema_engine = engine.execution_options(schema_translate_map={...})
session = Session(schema_engine)
s = Session(e)
- s.add_all(
- [A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])]
- )
+ s.add_all([A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])])
s.commit()
for a_rec in s.query(A):
from sqlalchemy.engine.default import DefaultDialect
+
class MyDialect(DefaultDialect):
supports_statement_cache = True
def limit_clause(self, select, **kw):
text = ""
if select._limit is not None:
- text += " \n LIMIT %d" % (select._limit, )
+ text += " \n LIMIT %d" % (select._limit,)
if select._offset is not None:
- text += " \n OFFSET %d" % (select._offset, )
+ text += " \n OFFSET %d" % (select._offset,)
return text
The above routine renders the :attr:`.Select._limit` and
from sqlalchemy import lambda_stmt
+
def run_my_statement(connection, parameter):
stmt = lambda_stmt(lambda: select(table))
stmt += lambda s: s.where(table.c.col == parameter)
return connection.execute(stmt)
+
with engine.connect() as conn:
result = run_my_statement(some_connection, "some parameter")
def upd(id_, newname):
stmt = lambda_stmt(lambda: users.update())
stmt += lambda s: s.values(name=newname)
- stmt += lambda s: s.where(users.c.id==id_)
+ stmt += lambda s: s.where(users.c.id == id_)
return stmt
+
with engine.begin() as conn:
conn.execute(upd(7, "foo"))
>>> def my_stmt(x, y):
... stmt = lambda_stmt(lambda: select(func.max(x, y)))
... return stmt
- ...
>>> engine = create_engine("sqlite://", echo=True)
>>> with engine.connect() as conn:
... print(conn.scalar(my_stmt(5, 10)))
... print(conn.scalar(my_stmt(12, 8)))
- ...
{opensql}SELECT max(?, ?) AS max_1
[generated in 0.00057s] (5, 10){stop}
10
>>> def my_stmt(x, y):
... def get_x():
... return x
+ ...
... def get_y():
... return y
...
... stmt = lambda_stmt(lambda: select(func.max(get_x(), get_y())))
... return stmt
- ...
>>> with engine.connect() as conn:
... print(conn.scalar(my_stmt(5, 10)))
- ...
Traceback (most recent call last):
# ...
sqlalchemy.exc.InvalidRequestError: Can't invoke Python callable get_x()
>>> def my_stmt(x, y):
... def get_x():
... return x
+ ...
... def get_y():
... return y
...
... def __init__(self, x, y):
... self.x = x
... self.y = y
- ...
>>> def my_stmt(foo):
... stmt = lambda_stmt(lambda: select(func.max(foo.x, foo.y)))
... return stmt
- ...
>>> with engine.connect() as conn:
- ... print(conn.scalar(my_stmt(Foo(5, 10))))
- ...
+ ... print(conn.scalar(my_stmt(Foo(5, 10))))
Traceback (most recent call last):
# ...
sqlalchemy.exc.InvalidRequestError: Closure variable named 'foo' inside of
>>> def my_stmt(foo):
... stmt = lambda_stmt(
- ... lambda: select(func.max(foo.x, foo.y)),
- ... track_closure_variables=False
+ ... lambda: select(func.max(foo.x, foo.y)), track_closure_variables=False
... )
... return stmt
>>> def my_stmt(self, foo):
... stmt = lambda_stmt(
- ... lambda: select(*self.column_expressions),
- ... track_closure_variables=False
- ... )
- ... stmt = stmt.add_criteria(
- ... lambda: self.where_criteria,
- ... track_on=[self]
+ ... lambda: select(*self.column_expressions), track_closure_variables=False
... )
+ ... stmt = stmt.add_criteria(lambda: self.where_criteria, track_on=[self])
... return stmt
Using ``track_on`` means the given objects will be stored long term in the
state within the construct::
>>> from sqlalchemy import select, column
- >>> stmt = select(column('q'))
+ >>> stmt = select(column("q"))
>>> cache_key = stmt._generate_cache_key()
>>> print(cache_key) # somewhat paraphrased
CacheKey(key=(
with engine.connect() as conn:
conn.exec_driver_sql("SET param='bar'")
-
.. versionadded:: 1.4 Added the :meth:`_engine.Connection.exec_driver_sql` method.
.. _dbapi_connections_cursor:
connection = engine.raw_connection()
try:
cursor_obj = connection.cursor()
- cursor_obj.callproc("my_procedure", ['x', 'y', 'z'])
+ cursor_obj.callproc("my_procedure", ["x", "y", "z"])
results = list(cursor_obj.fetchall())
cursor_obj.close()
connection.commit()
finally:
connection.close()
-
-
Registering New Dialects
========================
via ``foodialect.dialect``.
3. The entry point can be established in setup.py as follows::
- entry_points="""
+ entry_points = """
[sqlalchemy.dialects]
foodialect = foodialect.dialect:FooDialect
"""
including a database-qualification. For example, if ``FooDialect``
were in fact a MySQL dialect, the entry point could be established like this::
- entry_points="""
+ entry_points = """
[sqlalchemy.dialects]
mysql.foodialect = foodialect.dialect:FooDialect
"""
the need for separate installation. Use the ``register()`` function as follows::
from sqlalchemy.dialects import registry
+
registry.register("mysql.foodialect", "myapp.dialect", "MyMySQLDialect")
The above will respond to ``create_engine("mysql+foodialect://")`` and load the
is specified by constructing a :class:`~sqlalchemy.schema.ForeignKey` object
as an argument to a :class:`~sqlalchemy.schema.Column` object::
- user_preference = Table('user_preference', metadata_obj,
- Column('pref_id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False),
- Column('pref_name', String(40), nullable=False),
- Column('pref_value', String(100))
+ user_preference = Table(
+ "user_preference",
+ metadata_obj,
+ Column("pref_id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("user.user_id"), nullable=False),
+ Column("pref_name", String(40), nullable=False),
+ Column("pref_value", String(100)),
)
Above, we define a new table ``user_preference`` for which each row must
has a composite primary key. Below we define a table ``invoice`` which has a
composite primary key::
- invoice = Table('invoice', metadata_obj,
- Column('invoice_id', Integer, primary_key=True),
- Column('ref_num', Integer, primary_key=True),
- Column('description', String(60), nullable=False)
+ invoice = Table(
+ "invoice",
+ metadata_obj,
+ Column("invoice_id", Integer, primary_key=True),
+ Column("ref_num", Integer, primary_key=True),
+ Column("description", String(60), nullable=False),
)
And then a table ``invoice_item`` with a composite foreign key referencing
``invoice``::
- invoice_item = Table('invoice_item', metadata_obj,
- Column('item_id', Integer, primary_key=True),
- Column('item_name', String(60), nullable=False),
- Column('invoice_id', Integer, nullable=False),
- Column('ref_num', Integer, nullable=False),
- ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num'])
+ invoice_item = Table(
+ "invoice_item",
+ metadata_obj,
+ Column("item_id", Integer, primary_key=True),
+ Column("item_name", String(60), nullable=False),
+ Column("invoice_id", Integer, nullable=False),
+ Column("ref_num", Integer, nullable=False),
+ ForeignKeyConstraint(
+ ["invoice_id", "ref_num"], ["invoice.invoice_id", "invoice.ref_num"]
+ ),
)
It's important to note that the
most forms of ALTER. Given a schema like::
node = Table(
- 'node', metadata_obj,
- Column('node_id', Integer, primary_key=True),
- Column(
- 'primary_element', Integer,
- ForeignKey('element.element_id')
- )
+ "node",
+ metadata_obj,
+ Column("node_id", Integer, primary_key=True),
+ Column("primary_element", Integer, ForeignKey("element.element_id")),
)
element = Table(
- 'element', metadata_obj,
- Column('element_id', Integer, primary_key=True),
- Column('parent_node_id', Integer),
+ "element",
+ metadata_obj,
+ Column("element_id", Integer, primary_key=True),
+ Column("parent_node_id", Integer),
ForeignKeyConstraint(
- ['parent_node_id'], ['node.node_id'],
- name='fk_element_parent_node_id'
- )
+ ["parent_node_id"], ["node.node_id"], name="fk_element_parent_node_id"
+ ),
)
When we call upon :meth:`_schema.MetaData.create_all` on a backend such as the
.. sourcecode:: pycon+sql
>>> with engine.connect() as conn:
- ... metadata_obj.create_all(conn, checkfirst=False)
+ ... metadata_obj.create_all(conn, checkfirst=False)
{opensql}CREATE TABLE element (
element_id SERIAL NOT NULL,
parent_node_id INTEGER,
.. sourcecode:: pycon+sql
>>> with engine.connect() as conn:
- ... metadata_obj.drop_all(conn, checkfirst=False)
+ ... metadata_obj.drop_all(conn, checkfirst=False)
{opensql}ALTER TABLE element DROP CONSTRAINT fk_element_parent_node_id
DROP TABLE node
DROP TABLE element
the ``'element'`` table as follows::
element = Table(
- 'element', metadata_obj,
- Column('element_id', Integer, primary_key=True),
- Column('parent_node_id', Integer),
+ "element",
+ metadata_obj,
+ Column("element_id", Integer, primary_key=True),
+ Column("parent_node_id", Integer),
ForeignKeyConstraint(
- ['parent_node_id'], ['node.node_id'],
- use_alter=True, name='fk_element_parent_node_id'
- )
+ ["parent_node_id"],
+ ["node.node_id"],
+ use_alter=True,
+ name="fk_element_parent_node_id",
+ ),
)
in our CREATE DDL we will see the ALTER statement only for this constraint,
.. sourcecode:: pycon+sql
>>> with engine.connect() as conn:
- ... metadata_obj.create_all(conn, checkfirst=False)
+ ... metadata_obj.create_all(conn, checkfirst=False)
{opensql}CREATE TABLE element (
element_id SERIAL NOT NULL,
parent_node_id INTEGER,
arguments. The value is any string which will be output after the appropriate
"ON UPDATE" or "ON DELETE" phrase::
- child = Table('child', metadata_obj,
- Column('id', Integer,
- ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"),
- primary_key=True
- )
- )
-
- composite = Table('composite', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('rev_id', Integer),
- Column('note_id', Integer),
+ child = Table(
+ "child",
+ metadata_obj,
+ Column(
+ "id",
+ Integer,
+ ForeignKey("parent.id", onupdate="CASCADE", ondelete="CASCADE"),
+ primary_key=True,
+ ),
+ )
+
+ composite = Table(
+ "composite",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("rev_id", Integer),
+ Column("note_id", Integer),
ForeignKeyConstraint(
- ['rev_id', 'note_id'],
- ['revisions.id', 'revisions.note_id'],
- onupdate="CASCADE", ondelete="SET NULL"
- )
+ ["rev_id", "note_id"],
+ ["revisions.id", "revisions.note_id"],
+ onupdate="CASCADE",
+ ondelete="SET NULL",
+ ),
)
Note that these clauses require ``InnoDB`` tables when used with MySQL.
from sqlalchemy import UniqueConstraint
metadata_obj = MetaData()
- mytable = Table('mytable', metadata_obj,
-
+ mytable = Table(
+ "mytable",
+ metadata_obj,
# per-column anonymous unique constraint
- Column('col1', Integer, unique=True),
-
- Column('col2', Integer),
- Column('col3', Integer),
-
+ Column("col1", Integer, unique=True),
+ Column("col2", Integer),
+ Column("col3", Integer),
# explicit/composite unique constraint. 'name' is optional.
- UniqueConstraint('col2', 'col3', name='uix_1')
- )
+ UniqueConstraint("col2", "col3", name="uix_1"),
+ )
CHECK Constraint
----------------
from sqlalchemy import CheckConstraint
metadata_obj = MetaData()
- mytable = Table('mytable', metadata_obj,
-
+ mytable = Table(
+ "mytable",
+ metadata_obj,
# per-column CHECK constraint
- Column('col1', Integer, CheckConstraint('col1>5')),
-
- Column('col2', Integer),
- Column('col3', Integer),
-
+ Column("col1", Integer, CheckConstraint("col1>5")),
+ Column("col2", Integer),
+ Column("col3", Integer),
# table level CHECK constraint. 'name' is optional.
- CheckConstraint('col2 > col3 + 5', name='check1')
- )
+ CheckConstraint("col2 > col3 + 5", name="check1"),
+ )
{sql}mytable.create(engine)
CREATE TABLE mytable (
from sqlalchemy import PrimaryKeyConstraint
- my_table = Table('mytable', metadata_obj,
- Column('id', Integer),
- Column('version_id', Integer),
- Column('data', String(50)),
- PrimaryKeyConstraint('id', 'version_id', name='mytable_pk')
- )
+ my_table = Table(
+ "mytable",
+ metadata_obj,
+ Column("id", Integer),
+ Column("version_id", Integer),
+ Column("data", String(50)),
+ PrimaryKeyConstraint("id", "version_id", name="mytable_pk"),
+ )
.. seealso::
An example naming convention that suits basic cases is as follows::
convention = {
- "ix": 'ix_%(column_0_label)s',
- "uq": "uq_%(table_name)s_%(column_0_name)s",
- "ck": "ck_%(table_name)s_%(constraint_name)s",
- "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
- "pk": "pk_%(table_name)s"
+ "ix": "ix_%(column_0_label)s",
+ "uq": "uq_%(table_name)s_%(column_0_name)s",
+ "ck": "ck_%(table_name)s_%(constraint_name)s",
+ "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
+ "pk": "pk_%(table_name)s",
}
metadata_obj = MetaData(naming_convention=convention)
For example, we can observe the name produced when we create an unnamed
:class:`.UniqueConstraint`::
- >>> user_table = Table('user', metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('name', String(30), nullable=False),
- ... UniqueConstraint('name')
+ >>> user_table = Table(
+ ... "user",
+ ... metadata_obj,
+ ... Column("id", Integer, primary_key=True),
+ ... Column("name", String(30), nullable=False),
+ ... UniqueConstraint("name"),
... )
>>> list(user_table.constraints)[1].name
'uq_user_name'
This same feature takes effect even if we just use the :paramref:`_schema.Column.unique`
flag::
- >>> user_table = Table('user', metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('name', String(30), nullable=False, unique=True)
- ... )
+ >>> user_table = Table(
+ ... "user",
+ ... metadata_obj,
+ ... Column("id", Integer, primary_key=True),
+ ... Column("name", String(30), nullable=False, unique=True),
+ ... )
>>> list(user_table.constraints)[1].name
'uq_user_name'
hash of the long name. For example, the naming convention below will
generate very long names given the column names in use::
- metadata_obj = MetaData(naming_convention={
- "uq": "uq_%(table_name)s_%(column_0_N_name)s"
- })
+ metadata_obj = MetaData(
+ naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"}
+ )
long_names = Table(
- 'long_names', metadata_obj,
- Column('information_channel_code', Integer, key='a'),
- Column('billing_convention_name', Integer, key='b'),
- Column('product_identifier', Integer, key='c'),
- UniqueConstraint('a', 'b', 'c')
+ "long_names",
+ metadata_obj,
+ Column("information_channel_code", Integer, key="a"),
+ Column("billing_convention_name", Integer, key="b"),
+ Column("product_identifier", Integer, key="c"),
+ UniqueConstraint("a", "b", "c"),
)
On the PostgreSQL dialect, names longer than 63 characters will be truncated
import uuid
+
def fk_guid(constraint, table):
- str_tokens = [
- table.name,
- ] + [
- element.parent.name for element in constraint.elements
- ] + [
- element.target_fullname for element in constraint.elements
- ]
- guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode('ascii'))
+ str_tokens = (
+ [
+ table.name,
+ ]
+ + [element.parent.name for element in constraint.elements]
+ + [element.target_fullname for element in constraint.elements]
+ )
+ guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode("ascii"))
return str(guid)
+
convention = {
"fk_guid": fk_guid,
- "ix": 'ix_%(column_0_label)s',
+ "ix": "ix_%(column_0_label)s",
"fk": "fk_%(fk_guid)s",
}
>>> metadata_obj = MetaData(naming_convention=convention)
- >>> user_table = Table('user', metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('version', Integer, primary_key=True),
- ... Column('data', String(30))
- ... )
- >>> address_table = Table('address', metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('user_id', Integer),
- ... Column('user_version_id', Integer)
- ... )
- >>> fk = ForeignKeyConstraint(['user_id', 'user_version_id'],
- ... ['user.id', 'user.version'])
+ >>> user_table = Table(
+ ... "user",
+ ... metadata_obj,
+ ... Column("id", Integer, primary_key=True),
+ ... Column("version", Integer, primary_key=True),
+ ... Column("data", String(30)),
+ ... )
+ >>> address_table = Table(
+ ... "address",
+ ... metadata_obj,
+ ... Column("id", Integer, primary_key=True),
+ ... Column("user_id", Integer),
+ ... Column("user_version_id", Integer),
+ ... )
+ >>> fk = ForeignKeyConstraint(["user_id", "user_version_id"], ["user.id", "user.version"])
>>> address_table.append_constraint(fk)
>>> fk.name
fk_0cd51ab5-8d70-56e8-a83c-86661737766d
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
- Table('foo', metadata_obj,
- Column('value', Integer),
- CheckConstraint('value > 5', name='value_gt_5')
+ Table(
+ "foo",
+ metadata_obj,
+ Column("value", Integer),
+ CheckConstraint("value > 5", name="value_gt_5"),
)
The above table will produce the name ``ck_foo_value_gt_5``::
:func:`_expression.column` element within the constraint's expression,
either by declaring the constraint separate from the table::
- metadata_obj = MetaData(
- naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}
- )
+ metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"})
- foo = Table('foo', metadata_obj,
- Column('value', Integer)
- )
+ foo = Table("foo", metadata_obj, Column("value", Integer))
CheckConstraint(foo.c.value > 5)
from sqlalchemy import column
- metadata_obj = MetaData(
- naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}
- )
+ metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"})
- foo = Table('foo', metadata_obj,
- Column('value', Integer),
- CheckConstraint(column('value') > 5)
+ foo = Table(
+ "foo", metadata_obj, Column("value", Integer), CheckConstraint(column("value") > 5)
)
Both will produce the name ``ck_foo_value``::
The name for the constraint here is most directly set up by sending
the "name" parameter, e.g. :paramref:`.Boolean.name`::
- Table('foo', metadata_obj,
- Column('flag', Boolean(name='ck_foo_flag'))
- )
+ Table("foo", metadata_obj, Column("flag", Boolean(name="ck_foo_flag")))
The naming convention feature may be combined with these types as well,
normally by using a convention which includes ``%(constraint_name)s``
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
- Table('foo', metadata_obj,
- Column('flag', Boolean(name='flag_bool'))
- )
+ Table("foo", metadata_obj, Column("flag", Boolean(name="flag_bool")))
The above table will produce the constraint name ``ck_foo_flag_bool``::
which works nicely with :class:`.SchemaType` since these constraints have
only one column::
- metadata_obj = MetaData(
- naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}
- )
+ metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"})
- Table('foo', metadata_obj,
- Column('flag', Boolean())
- )
+ Table("foo", metadata_obj, Column("flag", Boolean()))
The above schema will produce::
.. sourcecode:: python+sql
metadata_obj = MetaData()
- mytable = Table('mytable', metadata_obj,
+ mytable = Table(
+ "mytable",
+ metadata_obj,
# an indexed column, with index "ix_mytable_col1"
- Column('col1', Integer, index=True),
-
+ Column("col1", Integer, index=True),
# a uniquely indexed column with index "ix_mytable_col2"
- Column('col2', Integer, index=True, unique=True),
-
- Column('col3', Integer),
- Column('col4', Integer),
-
- Column('col5', Integer),
- Column('col6', Integer),
- )
+ Column("col2", Integer, index=True, unique=True),
+ Column("col3", Integer),
+ Column("col4", Integer),
+ Column("col5", Integer),
+ Column("col6", Integer),
+ )
# place an index on col3, col4
- Index('idx_col34', mytable.c.col3, mytable.c.col4)
+ Index("idx_col34", mytable.c.col3, mytable.c.col4)
# place a unique index on col5, col6
- Index('myindex', mytable.c.col5, mytable.c.col6, unique=True)
+ Index("myindex", mytable.c.col5, mytable.c.col6, unique=True)
{sql}mytable.create(engine)
CREATE TABLE mytable (
identify columns::
metadata_obj = MetaData()
- mytable = Table('mytable', metadata_obj,
- Column('col1', Integer),
-
- Column('col2', Integer),
-
- Column('col3', Integer),
- Column('col4', Integer),
-
+ mytable = Table(
+ "mytable",
+ metadata_obj,
+ Column("col1", Integer),
+ Column("col2", Integer),
+ Column("col3", Integer),
+ Column("col4", Integer),
# place an index on col1, col2
- Index('idx_col12', 'col1', 'col2'),
-
+ Index("idx_col12", "col1", "col2"),
# place a unique index on col3, col4
- Index('idx_col34', 'col3', 'col4', unique=True)
+ Index("idx_col34", "col3", "col4", unique=True),
)
The :class:`~sqlalchemy.schema.Index` object also supports its own ``create()`` method:
.. sourcecode:: python+sql
- i = Index('someindex', mytable.c.col5)
+ i = Index("someindex", mytable.c.col5)
{sql}i.create(engine)
CREATE INDEX someindex ON mytable (col5){stop}
from sqlalchemy import Index
- Index('someindex', mytable.c.somecol.desc())
+ Index("someindex", mytable.c.somecol.desc())
Or with a backend that supports functional indexes such as PostgreSQL,
a "case insensitive" index can be created using the ``lower()`` function::
from sqlalchemy import func, Index
- Index('someindex', func.lower(mytable.c.somecol))
+ Index("someindex", func.lower(mytable.c.somecol))
Index API
---------
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import BINARY
+
@compiles(BINARY, "sqlite")
def compile_binary_sqlite(type_, compiler, **kw):
return "BLOB"
from sqlalchemy.types import TypeDecorator, Unicode
+
class CoerceUTF8(TypeDecorator):
"""Safely coerce Python bytestrings to Unicode
before passing off to the database."""
def process_bind_param(self, value, dialect):
if isinstance(value, str):
- value = value.decode('utf-8')
+ value = value.decode("utf-8")
return value
Rounding Numerics
from sqlalchemy.types import TypeDecorator, Numeric
from decimal import Decimal
+
class SafeNumeric(TypeDecorator):
"""Adds quantization to Numeric."""
def __init__(self, *arg, **kw):
TypeDecorator.__init__(self, *arg, **kw)
- self.quantize_int = - self.impl.scale
+ self.quantize_int = -self.impl.scale
self.quantize = Decimal(10) ** self.quantize_int
def process_bind_param(self, value, dialect):
- if isinstance(value, Decimal) and \
- value.as_tuple()[2] < self.quantize_int:
+ if isinstance(value, Decimal) and value.as_tuple()[2] < self.quantize_int:
value = value.quantize(self.quantize)
return value
import datetime
+
class TZDateTime(TypeDecorator):
impl = DateTime
cache_ok = True
if value is not None:
if not value.tzinfo:
raise TypeError("tzinfo is required")
- value = value.astimezone(datetime.timezone.utc).replace(
- tzinfo=None
- )
+ value = value.astimezone(datetime.timezone.utc).replace(tzinfo=None)
return value
def process_result_value(self, value, dialect):
value = value.replace(tzinfo=datetime.timezone.utc)
return value
-
.. _custom_guid_type:
Backend-agnostic GUID Type
from sqlalchemy.dialects.postgresql import UUID
import uuid
+
class GUID(TypeDecorator):
"""Platform-independent GUID type.
CHAR(32), storing as stringified hex values.
"""
+
impl = CHAR
cache_ok = True
def load_dialect_impl(self, dialect):
- if dialect.name == 'postgresql':
+ if dialect.name == "postgresql":
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
- elif dialect.name == 'postgresql':
+ elif dialect.name == "postgresql":
return str(value)
else:
if not isinstance(value, uuid.UUID):
json_type = MutableDict.as_mutable(JSONEncodedDict)
+
class MyClass(Base):
# ...
json_data = Column(json_type)
-
.. seealso::
:ref:`mutable_toplevel`
from sqlalchemy import type_coerce, String
- stmt = select(my_table).where(
- type_coerce(my_table.c.json_data, String).like('%foo%'))
+ stmt = select(my_table).where(type_coerce(my_table.c.json_data, String).like("%foo%"))
:class:`.TypeDecorator` provides a built-in system for working up type
translations like these based on operators. If we wanted to frequently use the
from sqlalchemy.sql import operators
from sqlalchemy import String
+
class JSONEncodedDict(TypeDecorator):
impl = VARCHAR
from sqlalchemy import func
from sqlalchemy.types import UserDefinedType
+
class Geometry(UserDefinedType):
def get_col_spec(self):
return "GEOMETRY"
We can apply the ``Geometry`` type into :class:`_schema.Table` metadata
and use it in a :func:`_expression.select` construct::
- geometry = Table('geometry', metadata,
- Column('geom_id', Integer, primary_key=True),
- Column('geom_data', Geometry)
- )
+ geometry = Table(
+ "geometry",
+ metadata,
+ Column("geom_id", Integer, primary_key=True),
+ Column("geom_data", Geometry),
+ )
- print(select(geometry).where(
- geometry.c.geom_data == 'LINESTRING(189412 252431,189631 259122)'))
+ print(
+ select(geometry).where(
+ geometry.c.geom_data == "LINESTRING(189412 252431,189631 259122)"
+ )
+ )
The resulting SQL embeds both functions as appropriate. ``ST_AsText``
is applied to the columns clause so that the return value is run through
a :func:`_expression.select` against a :func:`.label` of our expression, the string
label is moved to the outside of the wrapped expression::
- print(select(geometry.c.geom_data.label('my_data')))
+ print(select(geometry.c.geom_data.label("my_data")))
Output::
PostgreSQL ``pgcrypto`` extension to encrypt/decrypt values
transparently::
- from sqlalchemy import create_engine, String, select, func, \
- MetaData, Table, Column, type_coerce, TypeDecorator
+ from sqlalchemy import (
+ create_engine,
+ String,
+ select,
+ func,
+ MetaData,
+ Table,
+ Column,
+ type_coerce,
+ TypeDecorator,
+ )
from sqlalchemy.dialects.postgresql import BYTEA
+
class PGPString(TypeDecorator):
impl = BYTEA
def column_expression(self, col):
return func.pgp_sym_decrypt(col, self.passphrase)
+
metadata_obj = MetaData()
- message = Table('message', metadata_obj,
- Column('username', String(50)),
- Column('message',
- PGPString("this is my passphrase")),
- )
+ message = Table(
+ "message",
+ metadata_obj,
+ Column("username", String(50)),
+ Column("message", PGPString("this is my passphrase")),
+ )
engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
with engine.begin() as conn:
metadata_obj.create_all(conn)
- conn.execute(message.insert(), username="some user",
- message="this is my message")
+ conn.execute(message.insert(), username="some user", message="this is my message")
- print(conn.scalar(
- select(message.c.message).\
- where(message.c.username == "some user")
- ))
+ print(
+ conn.scalar(select(message.c.message).where(message.c.username == "some user"))
+ )
The ``pgp_sym_encrypt`` and ``pgp_sym_decrypt`` functions are applied
to the INSERT and SELECT statements::
is a Python callable that accepts any arbitrary right-hand side expression::
>>> from sqlalchemy import column
- >>> expr = column('x').op('>>')(column('y'))
+ >>> expr = column("x").op(">>")(column("y"))
>>> print(expr)
x >> y
from sqlalchemy import Integer
+
class MyInt(Integer):
class comparator_factory(Integer.Comparator):
def __add__(self, other):
from sqlalchemy import Integer
+
class MyInt(Integer):
class comparator_factory(Integer.Comparator):
def __add__(self, other):
from sqlalchemy import Integer, func
+
class MyInt(Integer):
class comparator_factory(Integer.Comparator):
def log(self, other):
from sqlalchemy.sql.expression import UnaryExpression
from sqlalchemy.sql import operators
+
class MyInteger(Integer):
class comparator_factory(Integer.Comparator):
def factorial(self):
- return UnaryExpression(self.expr,
- modifier=operators.custom_op("!"),
- type_=MyInteger)
+ return UnaryExpression(
+ self.expr, modifier=operators.custom_op("!"), type_=MyInteger
+ )
Using the above type::
>>> from sqlalchemy.sql import column
- >>> print(column('x', MyInteger).factorial())
+ >>> print(column("x", MyInteger).factorial())
x !
.. seealso::
>>> from sqlalchemy import Table, Column, MetaData, create_engine, PickleType, Integer
>>> metadata = MetaData()
- >>> my_table = Table("my_table", metadata, Column('id', Integer), Column("data", PickleType))
- >>> engine = create_engine("sqlite://", echo='debug')
+ >>> my_table = Table(
+ ... "my_table", metadata, Column("id", Integer), Column("data", PickleType)
+ ... )
+ >>> engine = create_engine("sqlite://", echo="debug")
>>> my_table.create(engine)
INFO sqlalchemy.engine.base.Engine
CREATE TABLE my_table (
columns for which we want to use a custom or decorated datatype::
>>> metadata_three = MetaData()
- >>> my_reflected_table = Table("my_table", metadata_three, Column("data", PickleType), autoload_with=engine)
+ >>> my_reflected_table = Table(
+ ... "my_table", metadata_three, Column("data", PickleType), autoload_with=engine
+ ... )
The ``my_reflected_table`` object above is reflected, and will load the
definition of the "id" column from the SQLite database. But for the "data"
from sqlalchemy import PickleType
from sqlalchemy import Table
+
@event.listens_for(Table, "column_reflect")
def _setup_pickletype(inspector, table, column_info):
if isinstance(column_info["type"], BLOB):
in order to affect only those columns where the datatype is important, such as
a lookup table of table names and possibly column names, or other heuristics
in order to accurately determine which columns should be established with an
-in Python datatype.
\ No newline at end of file
+in Python datatype.
event.listen(
metadata,
"after_create",
- DDL("ALTER TABLE users ADD CONSTRAINT "
+ DDL(
+ "ALTER TABLE users ADD CONSTRAINT "
"cst_user_name_length "
- " CHECK (length(user_name) >= 8)")
+ " CHECK (length(user_name) >= 8)"
+ ),
)
A more comprehensive method of creating libraries of DDL constructs is to use
the PostgreSQL backend, we could invoke this as::
mytable = Table(
- 'mytable', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', String(50))
+ "mytable",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("data", String(50)),
)
func = DDL(
"FOR EACH ROW EXECUTE PROCEDURE my_func();"
)
- event.listen(
- mytable,
- 'after_create',
- func.execute_if(dialect='postgresql')
- )
+ event.listen(mytable, "after_create", func.execute_if(dialect="postgresql"))
- event.listen(
- mytable,
- 'after_create',
- trigger.execute_if(dialect='postgresql')
- )
+ event.listen(mytable, "after_create", trigger.execute_if(dialect="postgresql"))
The :paramref:`.DDLElement.execute_if.dialect` keyword also accepts a tuple
of string dialect names::
event.listen(
- mytable,
- "after_create",
- trigger.execute_if(dialect=('postgresql', 'mysql'))
+ mytable, "after_create", trigger.execute_if(dialect=("postgresql", "mysql"))
)
event.listen(
- mytable,
- "before_drop",
- trigger.execute_if(dialect=('postgresql', 'mysql'))
+ mytable, "before_drop", trigger.execute_if(dialect=("postgresql", "mysql"))
)
The :meth:`.DDLElement.execute_if` method can also work against a callable
def should_create(ddl, target, connection, **kw):
row = connection.execute(
- "select conname from pg_constraint where conname='%s'" %
- ddl.element.name).scalar()
+ "select conname from pg_constraint where conname='%s'" % ddl.element.name
+ ).scalar()
return not bool(row)
+
def should_drop(ddl, target, connection, **kw):
return not should_create(ddl, target, connection, **kw)
+
event.listen(
users,
"after_create",
DDL(
"ALTER TABLE users ADD CONSTRAINT "
"cst_user_name_length CHECK (length(user_name) >= 8)"
- ).execute_if(callable_=should_create)
+ ).execute_if(callable_=should_create),
)
event.listen(
users,
"before_drop",
- DDL(
- "ALTER TABLE users DROP CONSTRAINT cst_user_name_length"
- ).execute_if(callable_=should_drop)
+ DDL("ALTER TABLE users DROP CONSTRAINT cst_user_name_length").execute_if(
+ callable_=should_drop
+ ),
)
{sql}users.create(engine)
def should_create(ddl, target, connection, **kw):
row = connection.execute(
- "select conname from pg_constraint where conname='%s'" %
- ddl.element.name).scalar()
+ "select conname from pg_constraint where conname='%s'" % ddl.element.name
+ ).scalar()
return not bool(row)
+
def should_drop(ddl, target, connection, **kw):
return not should_create(ddl, target, connection, **kw)
+
event.listen(
- users,
- "after_create",
- AddConstraint(constraint).execute_if(callable_=should_create)
+ users, "after_create", AddConstraint(constraint).execute_if(callable_=should_create)
)
event.listen(
- users,
- "before_drop",
- DropConstraint(constraint).execute_if(callable_=should_drop)
+ users, "before_drop", DropConstraint(constraint).execute_if(callable_=should_drop)
)
{sql}users.create(engine)
The simplest kind of default is a scalar value used as the default value of a column::
- Table("mytable", metadata_obj,
- Column("somecolumn", Integer, default=12)
- )
+ Table("mytable", metadata_obj, Column("somecolumn", Integer, default=12))
Above, the value "12" will be bound as the column value during an INSERT if no
other value is supplied.
not very common (as UPDATE statements are usually looking for dynamic
defaults)::
- Table("mytable", metadata_obj,
- Column("somecolumn", Integer, onupdate=25)
- )
-
+ Table("mytable", metadata_obj, Column("somecolumn", Integer, onupdate=25))
Python-Executed Functions
-------------------------
# a function which counts upwards
i = 0
+
+
def mydefault():
global i
i += 1
return i
- t = Table("mytable", metadata_obj,
- Column('id', Integer, primary_key=True, default=mydefault),
+
+ t = Table(
+ "mytable",
+ metadata_obj,
+ Column("id", Integer, primary_key=True, default=mydefault),
)
It should be noted that for real "incrementing sequence" behavior, the
import datetime
- t = Table("mytable", metadata_obj,
- Column('id', Integer, primary_key=True),
-
+ t = Table(
+ "mytable",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
# define 'last_updated' to be populated with datetime.now()
- Column('last_updated', DateTime, onupdate=datetime.datetime.now),
+ Column("last_updated", DateTime, onupdate=datetime.datetime.now),
)
When an update statement executes and no value is passed for ``last_updated``,
single ``context`` argument::
def mydefault(context):
- return context.get_current_parameters()['counter'] + 12
+ return context.get_current_parameters()["counter"] + 12
- t = Table('mytable', metadata_obj,
- Column('counter', Integer),
- Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault)
+
+ t = Table(
+ "mytable",
+ metadata_obj,
+ Column("counter", Integer),
+ Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault),
)
The above default generation function is applied so that it will execute for
also be passed SQL expressions, which are in most cases rendered inline within the
INSERT or UPDATE statement::
- t = Table("mytable", metadata_obj,
- Column('id', Integer, primary_key=True),
-
+ t = Table(
+ "mytable",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
# define 'create_date' to default to now()
- Column('create_date', DateTime, default=func.now()),
-
+ Column("create_date", DateTime, default=func.now()),
# define 'key' to pull its default from the 'keyvalues' table
- Column('key', String(20), default=select(keyvalues.c.key).where(keyvalues.c.type='type1')),
-
+ Column(
+ "key",
+ String(20),
+ default=select(keyvalues.c.key).where(keyvalues.c.type="type1"),
+ ),
# define 'last_modified' to use the current_timestamp SQL function on update
- Column('last_modified', DateTime, onupdate=func.utc_timestamp())
- )
+ Column("last_modified", DateTime, onupdate=func.utc_timestamp()),
+ )
Above, the ``create_date`` column will be populated with the result of the
``now()`` SQL function (which, depending on backend, compiles into ``NOW()``
.. sourcecode:: python+sql
- t = Table('test', metadata_obj,
- Column('abc', String(20), server_default='abc'),
- Column('created_at', DateTime, server_default=func.sysdate()),
- Column('index_value', Integer, server_default=text("0"))
+ t = Table(
+ "test",
+ metadata_obj,
+ Column("abc", String(20), server_default="abc"),
+ Column("created_at", DateTime, server_default=func.sysdate()),
+ Column("index_value", Integer, server_default=text("0")),
)
A create call for the above table will produce::
from sqlalchemy.schema import FetchedValue
- t = Table('test', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('abc', TIMESTAMP, server_default=FetchedValue()),
- Column('def', String(20), server_onupdate=FetchedValue())
+ t = Table(
+ "test",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("abc", TIMESTAMP, server_default=FetchedValue()),
+ Column("def", String(20), server_onupdate=FetchedValue()),
)
The :class:`.FetchedValue` indicator does not affect the rendered DDL for the
configured to fire off during UPDATE operations if desired. It is most
commonly used in conjunction with a single integer primary key column::
- table = Table("cartitems", metadata_obj,
+ table = Table(
+ "cartitems",
+ metadata_obj,
Column(
"cart_id",
Integer,
- Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True),
+ Sequence("cart_id_seq", metadata=metadata_obj),
+ primary_key=True,
+ ),
Column("description", String(40)),
- Column("createdate", DateTime())
+ Column("createdate", DateTime()),
)
Where above, the table "cartitems" is associated with a sequence named
passing it directly to a SQL execution method::
with my_engine.connect() as conn:
- seq = Sequence('some_sequence')
+ seq = Sequence("some_sequence")
nextid = conn.execute(seq)
In order to embed the "next value" function of a :class:`.Sequence`
method, which will render at statement compilation time a SQL function that is
appropriate for the target backend::
- >>> my_seq = Sequence('some_sequence')
+ >>> my_seq = Sequence("some_sequence")
>>> stmt = select(my_seq.next_value())
>>> print(stmt.compile(dialect=postgresql.dialect()))
SELECT nextval('some_sequence') AS next_value_1
For many years, the SQLAlchemy documentation referred to the
example of associating a :class:`.Sequence` with a table as follows::
- table = Table("cartitems", metadata_obj,
- Column("cart_id", Integer, Sequence('cart_id_seq'),
- primary_key=True),
+ table = Table(
+ "cartitems",
+ metadata_obj,
+ Column("cart_id", Integer, Sequence("cart_id_seq"), primary_key=True),
Column("description", String(40)),
- Column("createdate", DateTime())
+ Column("createdate", DateTime()),
)
While the above is a prominent idiomatic pattern, it is recommended that
the :class:`.Sequence` in most cases be explicitly associated with the
:class:`_schema.MetaData`, using the :paramref:`.Sequence.metadata` parameter::
- table = Table("cartitems", metadata_obj,
+ table = Table(
+ "cartitems",
+ metadata_obj,
Column(
"cart_id",
Integer,
- Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True),
+ Sequence("cart_id_seq", metadata=metadata_obj),
+ primary_key=True,
+ ),
Column("description", String(40)),
- Column("createdate", DateTime())
+ Column("createdate", DateTime()),
)
The :class:`.Sequence` object is a first class
:class:`_schema.Column` as the **Python side default generator**::
Column(
- "cart_id", Integer, Sequence('cart_id_seq', metadata=metadata_obj),
- primary_key=True)
+ "cart_id", Integer, Sequence("cart_id_seq", metadata=metadata_obj), primary_key=True
+ )
In the above case, the :class:`.Sequence` will automatically be subject
to CREATE SEQUENCE / DROP SEQUENCE DDL when the related :class:`_schema.Table`
:class:`_schema.Column` both as the Python-side default generator as well as
the server-side default generator::
- cart_id_seq = Sequence('cart_id_seq', metadata=metadata_obj)
- table = Table("cartitems", metadata_obj,
+ cart_id_seq = Sequence("cart_id_seq", metadata=metadata_obj)
+ table = Table(
+ "cartitems",
+ metadata_obj,
Column(
- "cart_id", Integer, cart_id_seq,
- server_default=cart_id_seq.next_value(), primary_key=True),
+ "cart_id",
+ Integer,
+ cart_id_seq,
+ server_default=cart_id_seq.next_value(),
+ primary_key=True,
+ ),
Column("description", String(40)),
- Column("createdate", DateTime())
+ Column("createdate", DateTime()),
)
or with the ORM::
class CartItem(Base):
- __tablename__ = 'cartitems'
+ __tablename__ = "cartitems"
- cart_id_seq = Sequence('cart_id_seq', metadata=Base.metadata)
+ cart_id_seq = Sequence("cart_id_seq", metadata=Base.metadata)
cart_id = Column(
- Integer, cart_id_seq,
- server_default=cart_id_seq.next_value(), primary_key=True)
+ Integer, cart_id_seq, server_default=cart_id_seq.next_value(), primary_key=True
+ )
description = Column(String(40))
createdate = Column(DateTime)
data = Table(
"data",
metadata_obj,
- Column('id', Integer, Identity(start=42, cycle=True), primary_key=True),
- Column('data', String)
+ Column("id", Integer, Identity(start=42, cycle=True), primary_key=True),
+ Column("data", String),
)
The DDL for the ``data`` table when run on a PostgreSQL 12 backend will look
:func:`_sa.create_engine()`::
from sqlalchemy import create_engine
- engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase')
+
+ engine = create_engine("postgresql://scott:tiger@localhost:5432/mydatabase")
The above engine creates a :class:`.Dialect` object tailored towards
PostgreSQL, as well as a :class:`_pool.Pool` object which will establish a DBAPI
PostgreSQL DBAPIs include pg8000 and asyncpg::
# default
- engine = create_engine('postgresql://scott:tiger@localhost/mydatabase')
+ engine = create_engine("postgresql://scott:tiger@localhost/mydatabase")
# psycopg2
- engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase')
+ engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/mydatabase")
# pg8000
- engine = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase')
+ engine = create_engine("postgresql+pg8000://scott:tiger@localhost/mydatabase")
More notes on connecting to PostgreSQL at :ref:`postgresql_toplevel`.
MySQL DBAPIs available, including PyMySQL::
# default
- engine = create_engine('mysql://scott:tiger@localhost/foo')
+ engine = create_engine("mysql://scott:tiger@localhost/foo")
# mysqlclient (a maintained fork of MySQL-Python)
- engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo')
+ engine = create_engine("mysql+mysqldb://scott:tiger@localhost/foo")
# PyMySQL
- engine = create_engine('mysql+pymysql://scott:tiger@localhost/foo')
+ engine = create_engine("mysql+pymysql://scott:tiger@localhost/foo")
More notes on connecting to MySQL at :ref:`mysql_toplevel`.
The Oracle dialect uses cx_oracle as the default DBAPI::
- engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname')
+ engine = create_engine("oracle://scott:tiger@127.0.0.1:1521/sidname")
- engine = create_engine('oracle+cx_oracle://scott:tiger@tnsname')
+ engine = create_engine("oracle+cx_oracle://scott:tiger@tnsname")
More notes on connecting to Oracle at :ref:`oracle_toplevel`.
also available::
# pyodbc
- engine = create_engine('mssql+pyodbc://scott:tiger@mydsn')
+ engine = create_engine("mssql+pyodbc://scott:tiger@mydsn")
# pymssql
- engine = create_engine('mssql+pymssql://scott:tiger@hostname:port/dbname')
+ engine = create_engine("mssql+pymssql://scott:tiger@hostname:port/dbname")
More notes on connecting to SQL Server at :ref:`mssql_toplevel`.
# sqlite://<nohostname>/<path>
# where <path> is relative:
- engine = create_engine('sqlite:///foo.db')
+ engine = create_engine("sqlite:///foo.db")
And for an absolute file path, the three slashes are followed by the absolute path::
# Unix/Mac - 4 initial slashes in total
- engine = create_engine('sqlite:////absolute/path/to/foo.db')
+ engine = create_engine("sqlite:////absolute/path/to/foo.db")
# Windows
- engine = create_engine('sqlite:///C:\\path\\to\\foo.db')
+ engine = create_engine("sqlite:///C:\\path\\to\\foo.db")
# Windows alternative using raw string
- engine = create_engine(r'sqlite:///C:\path\to\foo.db')
+ engine = create_engine(r"sqlite:///C:\path\to\foo.db")
To use a SQLite ``:memory:`` database, specify an empty URL::
- engine = create_engine('sqlite://')
+ engine = create_engine("sqlite://")
More notes on connecting to SQLite at :ref:`sqlite_toplevel`.
for keys and either strings or tuples of strings for values, e.g.::
>>> from sqlalchemy.engine import make_url
- >>> url = make_url("postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt")
+ >>> url = make_url(
+ ... "postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt"
+ ... )
>>> url.query
immutabledict({'alt_host': ('host1', 'host2'), 'ssl_cipher': '/path/to/crt'})
this is DBAPIs that accept an argument ``encoding`` for character encodings,
such as most MySQL DBAPIs::
- engine = create_engine(
- "mysql+pymysql://user:pass@host/test?charset=utf8mb4"
- )
+ engine = create_engine("mysql+pymysql://user:pass@host/test?charset=utf8mb4")
The advantage of using the query string is that additional DBAPI options may be
specified in configuration files in a manner that's portable to the DBAPI
method directly as follows::
>>> from sqlalchemy import create_engine
- >>> engine = create_engine("mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4")
+ >>> engine = create_engine(
+ ... "mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4"
+ ... )
>>> args, kwargs = engine.dialect.create_connect_args(engine.url)
>>> args, kwargs
([], {'host': 'some_host', 'database': 'test', 'user': 'some_user', 'password': 'some_pass', 'charset': 'utf8mb4', 'client_flag': 2})
engine = create_engine(
"postgresql://user:pass@hostname/dbname",
- connect_args={"connection_factory": MyConnectionFactory}
+ connect_args={"connection_factory": MyConnectionFactory},
)
Another example is the pyodbc "timeout" parameter::
engine = create_engine(
- "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server",
- connect_args={"timeout": 30}
+ "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server",
+ connect_args={"timeout": 30},
)
The above example also illustrates that both URL "query string" parameters as
engine = create_engine("postgresql://user:pass@hostname/dbname")
+
@event.listens_for(engine, "do_connect")
def receive_do_connect(dialect, conn_rec, cargs, cparams):
- cparams['connection_factory'] = MyConnectionFactory
+ cparams["connection_factory"] = MyConnectionFactory
.. _engines_dynamic_tokens:
engine = create_engine("postgresql://user@hostname/dbname")
+
@event.listens_for(engine, "do_connect")
def provide_token(dialect, conn_rec, cargs, cparams):
- cparams['token'] = get_authentication_token()
+ cparams["token"] = get_authentication_token()
.. seealso::
from sqlalchemy import event
- engine = create_engine(
- "postgresql://user:pass@hostname/dbname"
- )
+ engine = create_engine("postgresql://user:pass@hostname/dbname")
+
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
cursor_obj.execute("SET some session variables")
cursor_obj.close()
-
Fully Replacing the DBAPI ``connect()`` function
------------------------------------------------
from sqlalchemy import event
- engine = create_engine(
- "postgresql://user:pass@hostname/dbname"
- )
+ engine = create_engine("postgresql://user:pass@hostname/dbname")
+
@event.listens_for(engine, "do_connect")
def receive_do_connect(dialect, conn_rec, cargs, cparams):
import logging
logging.basicConfig()
- logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
+ logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
By default, the log level is set to ``logging.WARN`` within the entire
``sqlalchemy`` namespace so that no log operations occur, even within an
>>> from sqlalchemy import create_engine, text
- >>> e = create_engine("sqlite://", echo=True, echo_pool='debug')
+ >>> e = create_engine("sqlite://", echo=True, echo_pool="debug")
>>> with e.connect() as conn:
- ... print(conn.scalar(text("select 'hi'")))
- ...
+ ... print(conn.scalar(text("select 'hi'")))
2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Created new connection <sqlite3.Connection object at 0x7f287819ac60>
2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Connection <sqlite3.Connection object at 0x7f287819ac60> checked out from pool
2020-10-24 12:54:57,702 INFO sqlalchemy.engine.Engine select 'hi'
Use of these flags is roughly equivalent to::
import logging
+
logging.basicConfig()
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
>>> from sqlalchemy import create_engine
>>> from sqlalchemy import text
- >>> e = create_engine("sqlite://", echo=True, logging_name='myengine')
+ >>> e = create_engine("sqlite://", echo=True, logging_name="myengine")
>>> with e.connect() as conn:
... conn.execute(text("select 'hi'"))
- ...
2020-10-24 12:47:04,291 INFO sqlalchemy.engine.Engine.myengine select 'hi'
2020-10-24 12:47:04,292 INFO sqlalchemy.engine.Engine.myengine ()
>>> e = create_engine("sqlite://", echo=True, hide_parameters=True)
>>> with e.connect() as conn:
... conn.execute(text("select :some_private_name"), {"some_private_name": "pii"})
- ...
2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine select ?
2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine [SQL parameters hidden due to hide_parameters=True]
from sqlalchemy.event import listen
from sqlalchemy.pool import Pool
+
def my_on_connect(dbapi_con, connection_record):
print("New DBAPI connection:", dbapi_con)
- listen(Pool, 'connect', my_on_connect)
+
+ listen(Pool, "connect", my_on_connect)
To listen with the :func:`.listens_for` decorator looks like::
from sqlalchemy.event import listens_for
from sqlalchemy.pool import Pool
+
@listens_for(Pool, "connect")
def my_on_connect(dbapi_con, connection_record):
print("New DBAPI connection:", dbapi_con)
from sqlalchemy.event import listens_for
from sqlalchemy.pool import Pool
+
@listens_for(Pool, "connect", named=True)
def my_on_connect(**kw):
- print("New DBAPI connection:", kw['dbapi_connection'])
+ print("New DBAPI connection:", kw["dbapi_connection"])
When using named argument passing, the names listed in the function argument
specification will be used as keys in the dictionary.
from sqlalchemy.event import listens_for
from sqlalchemy.pool import Pool
+
@listens_for(Pool, "connect", named=True)
def my_on_connect(dbapi_connection, **kw):
print("New DBAPI connection:", dbapi_connection)
- print("Connection record:", kw['connection_record'])
+ print("Connection record:", kw["connection_record"])
Above, the presence of ``**kw`` tells :func:`.listens_for` that
arguments should be passed to the function by name, rather than positionally.
from sqlalchemy.engine import Engine
import psycopg2
+
def connect():
- return psycopg2.connect(user='ed', host='127.0.0.1', dbname='test')
+ return psycopg2.connect(user="ed", host="127.0.0.1", dbname="test")
+
my_pool = QueuePool(connect)
- my_engine = create_engine('postgresql://ed@localhost/test')
+ my_engine = create_engine("postgresql://ed@localhost/test")
# associate listener with all instances of Pool
- listen(Pool, 'connect', my_on_connect)
+ listen(Pool, "connect", my_on_connect)
# associate listener with all instances of Pool
# via the Engine class
- listen(Engine, 'connect', my_on_connect)
+ listen(Engine, "connect", my_on_connect)
# associate listener with my_pool
- listen(my_pool, 'connect', my_on_connect)
+ listen(my_pool, "connect", my_on_connect)
# associate listener with my_engine.pool
- listen(my_engine, 'connect', my_on_connect)
-
+ listen(my_engine, "connect", my_on_connect)
.. _event_modifiers:
def validate_phone(target, value, oldvalue, initiator):
"""Strip non-numeric characters from a phone number"""
- return re.sub(r'\D', '', value)
+ return re.sub(r"\D", "", value)
+
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
- listen(UserContact.phone, 'set', validate_phone, retval=True)
+ listen(UserContact.phone, "set", validate_phone, retval=True)
Event Reference
---------------
automatically. The are invoked in the same way as any other member of the
:data:`_sql.func` namespace::
- select(func.count('*')).select_from(some_table)
+ select(func.count("*")).select_from(some_table)
Note that any name not known to :data:`_sql.func` generates the function name
as is - there is no restriction on what SQL functions can be called, known or
:func:`_sa.create_engine`::
from sqlalchemy import create_engine
+
engine = create_engine("postgresql://user:pass@host/dbname", future=True)
Similarly, with the ORM, to enable "future" behavior in the ORM :class:`.Session`,
from sqlalchemy import Table, Column, Integer, String
user = Table(
- 'user',
+ "user",
metadata_obj,
- Column('user_id', Integer, primary_key=True),
- Column('user_name', String(16), nullable=False),
- Column('email_address', String(60)),
- Column('nickname', String(50), nullable=False)
+ Column("user_id", Integer, primary_key=True),
+ Column("user_name", String(16), nullable=False),
+ Column("email_address", String(60)),
+ Column("nickname", String(50), nullable=False),
)
Above, a table called ``user`` is described, which contains four columns. The
references)::
>>> for t in metadata_obj.sorted_tables:
- ... print(t.name)
+ ... print(t.name)
user
user_preference
invoice
accessors which allow inspection of its properties. Given the following
:class:`~sqlalchemy.schema.Table` definition::
- employees = Table('employees', metadata_obj,
- Column('employee_id', Integer, primary_key=True),
- Column('employee_name', String(60), nullable=False),
- Column('employee_dept', Integer, ForeignKey("departments.department_id"))
+ employees = Table(
+ "employees",
+ metadata_obj,
+ Column("employee_id", Integer, primary_key=True),
+ Column("employee_name", String(60), nullable=False),
+ Column("employee_dept", Integer, ForeignKey("departments.department_id")),
)
Note the :class:`~sqlalchemy.schema.ForeignKey` object used in this table -
employees.c.employee_id
# via string
- employees.c['employee_id']
+ employees.c["employee_id"]
# iterate through all columns
for c in employees.c:
.. sourcecode:: python+sql
- engine = create_engine('sqlite:///:memory:')
+ engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
- user = Table('user', metadata_obj,
- Column('user_id', Integer, primary_key=True),
- Column('user_name', String(16), nullable=False),
- Column('email_address', String(60), key='email'),
- Column('nickname', String(50), nullable=False)
+ user = Table(
+ "user",
+ metadata_obj,
+ Column("user_id", Integer, primary_key=True),
+ Column("user_name", String(16), nullable=False),
+ Column("email_address", String(60), key="email"),
+ Column("nickname", String(50), nullable=False),
)
- user_prefs = Table('user_prefs', metadata_obj,
- Column('pref_id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False),
- Column('pref_name', String(40), nullable=False),
- Column('pref_value', String(100))
+ user_prefs = Table(
+ "user_prefs",
+ metadata_obj,
+ Column("pref_id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("user.user_id"), nullable=False),
+ Column("pref_name", String(40), nullable=False),
+ Column("pref_value", String(100)),
)
{sql}metadata_obj.create_all(engine)
.. sourcecode:: python+sql
- engine = create_engine('sqlite:///:memory:')
+ engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
- employees = Table('employees', metadata_obj,
- Column('employee_id', Integer, primary_key=True),
- Column('employee_name', String(60), nullable=False, key='name'),
- Column('employee_dept', Integer, ForeignKey("departments.department_id"))
+ employees = Table(
+ "employees",
+ metadata_obj,
+ Column("employee_id", Integer, primary_key=True),
+ Column("employee_name", String(60), nullable=False, key="name"),
+ Column("employee_dept", Integer, ForeignKey("departments.department_id")),
)
{sql}employees.create(engine)
CREATE TABLE employees(
metadata_obj = MetaData()
financial_info = Table(
- 'financial_info',
+ "financial_info",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('value', String(100), nullable=False),
- schema='remote_banks'
+ Column("id", Integer, primary_key=True),
+ Column("value", String(100), nullable=False),
+ schema="remote_banks",
)
SQL that is rendered using this :class:`_schema.Table`, such as the SELECT
in the :attr:`_schema.MetaData.tables` collection by searching for the
key ``'remote_banks.financial_info'``::
- >>> metadata_obj.tables['remote_banks.financial_info']
+ >>> metadata_obj.tables["remote_banks.financial_info"]
Table('financial_info', MetaData(),
Column('id', Integer(), table=<financial_info>, primary_key=True, nullable=False),
Column('value', String(length=100), table=<financial_info>, nullable=False),
customer = Table(
"customer",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('financial_info_id', ForeignKey("remote_banks.financial_info.id")),
- schema='remote_banks'
+ Column("id", Integer, primary_key=True),
+ Column("financial_info_id", ForeignKey("remote_banks.financial_info.id")),
+ schema="remote_banks",
)
The :paramref:`_schema.Table.schema` argument may also be used with certain
dotted "database/owner" tokens. The tokens may be placed directly in the name
at once, such as::
- schema="dbo.scott"
+ schema = "dbo.scott"
.. seealso::
metadata_obj = MetaData(schema="remote_banks")
financial_info = Table(
- 'financial_info',
+ "financial_info",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('value', String(100), nullable=False),
+ Column("id", Integer, primary_key=True),
+ Column("value", String(100), nullable=False),
)
Above, for any :class:`_schema.Table` object (or :class:`_schema.Sequence` object
includes that the :class:`_schema.Table` is cataloged in the :class:`_schema.MetaData`
using the schema-qualified name, that is::
- metadata_obj.tables['remote_banks.financial_info']
+ metadata_obj.tables["remote_banks.financial_info"]
When using the :class:`_schema.ForeignKey` or :class:`_schema.ForeignKeyConstraint`
objects to refer to this table, either the schema-qualified name or the
# either will work:
refers_to_financial_info = Table(
- 'refers_to_financial_info',
+ "refers_to_financial_info",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('fiid', ForeignKey('financial_info.id')),
+ Column("id", Integer, primary_key=True),
+ Column("fiid", ForeignKey("financial_info.id")),
)
# or
refers_to_financial_info = Table(
- 'refers_to_financial_info',
+ "refers_to_financial_info",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('fiid', ForeignKey('remote_banks.financial_info.id')),
+ Column("id", Integer, primary_key=True),
+ Column("fiid", ForeignKey("remote_banks.financial_info.id")),
)
When using a :class:`_schema.MetaData` object that sets
metadata_obj = MetaData(schema="remote_banks")
financial_info = Table(
- 'financial_info',
+ "financial_info",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('value', String(100), nullable=False),
- schema=BLANK_SCHEMA # will not use "remote_banks"
+ Column("id", Integer, primary_key=True),
+ Column("value", String(100), nullable=False),
+ schema=BLANK_SCHEMA, # will not use "remote_banks"
)
.. seealso::
engine = create_engine("oracle+cx_oracle://scott:tiger@tsn_name")
+
@event.listens_for(engine, "connect", insert=True)
def set_current_schema(dbapi_connection, connection_record):
cursor_obj = dbapi_connection.cursor()
"InnoDB". This can be expressed with :class:`~sqlalchemy.schema.Table` using
``mysql_engine``::
- addresses = Table('engine_email_addresses', metadata_obj,
- Column('address_id', Integer, primary_key=True),
- Column('remote_user_id', Integer, ForeignKey(users.c.user_id)),
- Column('email_address', String(20)),
- mysql_engine='InnoDB'
+ addresses = Table(
+ "engine_email_addresses",
+ metadata_obj,
+ Column("address_id", Integer, primary_key=True),
+ Column("remote_user_id", Integer, ForeignKey(users.c.user_id)),
+ Column("email_address", String(20)),
+ mysql_engine="InnoDB",
)
Other backends may support table-level options as well - these would be
>>> user_table = Table(
... "user_account",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('name', String(30)),
- ... Column('fullname', String)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("name", String(30)),
+ ... Column("fullname", String),
... )
>>> from sqlalchemy import ForeignKey
>>> address_table = Table(
... "address",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('user_id', None, ForeignKey('user_account.id')),
- ... Column('email_address', String, nullable=False)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("user_id", None, ForeignKey("user_account.id")),
+ ... Column("email_address", String, nullable=False),
... )
>>> metadata_obj.create_all(engine)
BEGIN (implicit)
>>> Base = declarative_base()
>>> from sqlalchemy.orm import relationship
>>> class User(Base):
- ... __tablename__ = 'user_account'
+ ... __tablename__ = "user_account"
...
... id = Column(Integer, primary_key=True)
... name = Column(String(30))
... addresses = relationship("Address", back_populates="user")
...
... def __repr__(self):
- ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
+ ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
>>> class Address(Base):
- ... __tablename__ = 'address'
+ ... __tablename__ = "address"
...
... id = Column(Integer, primary_key=True)
... email_address = Column(String, nullable=False)
- ... user_id = Column(Integer, ForeignKey('user_account.id'))
+ ... user_id = Column(Integer, ForeignKey("user_account.id"))
...
... user = relationship("User", back_populates="addresses")
...
>>> conn = engine.connect()
>>> from sqlalchemy.orm import Session
>>> session = Session(conn)
- >>> session.add_all([
- ... User(name="spongebob", fullname="Spongebob Squarepants", addresses=[
- ... Address(email_address="spongebob@sqlalchemy.org")
- ... ]),
- ... User(name="sandy", fullname="Sandy Cheeks", addresses=[
- ... Address(email_address="sandy@sqlalchemy.org"),
- ... Address(email_address="squirrel@squirrelpower.org")
- ... ]),
- ... User(name="patrick", fullname="Patrick Star", addresses=[
- ... Address(email_address="pat999@aol.com")
- ... ]),
- ... User(name="squidward", fullname="Squidward Tentacles", addresses=[
- ... Address(email_address="stentcl@sqlalchemy.org")
- ... ]),
- ... User(name="ehkrabs", fullname="Eugene H. Krabs"),
- ... ])
+ >>> session.add_all(
+ ... [
+ ... User(
+ ... name="spongebob",
+ ... fullname="Spongebob Squarepants",
+ ... addresses=[Address(email_address="spongebob@sqlalchemy.org")],
+ ... ),
+ ... User(
+ ... name="sandy",
+ ... fullname="Sandy Cheeks",
+ ... addresses=[
+ ... Address(email_address="sandy@sqlalchemy.org"),
+ ... Address(email_address="squirrel@squirrelpower.org"),
+ ... ],
+ ... ),
+ ... User(
+ ... name="patrick",
+ ... fullname="Patrick Star",
+ ... addresses=[Address(email_address="pat999@aol.com")],
+ ... ),
+ ... User(
+ ... name="squidward",
+ ... fullname="Squidward Tentacles",
+ ... addresses=[Address(email_address="stentcl@sqlalchemy.org")],
+ ... ),
+ ... User(name="ehkrabs", fullname="Eugene H. Krabs"),
+ ... ]
+ ... )
>>> session.commit()
BEGIN ...
>>> conn.begin()
* :meth:`_sql.ColumnOperators.__eq__` (Python "``==``" operator)::
- >>> print(column('x') == 5)
+ >>> print(column("x") == 5)
x = :x_1
..
* :meth:`_sql.ColumnOperators.__ne__` (Python "``!=``" operator)::
- >>> print(column('x') != 5)
+ >>> print(column("x") != 5)
x != :x_1
..
* :meth:`_sql.ColumnOperators.__gt__` (Python "``>``" operator)::
- >>> print(column('x') > 5)
+ >>> print(column("x") > 5)
x > :x_1
..
* :meth:`_sql.ColumnOperators.__lt__` (Python "``<``" operator)::
- >>> print(column('x') < 5)
+ >>> print(column("x") < 5)
x < :x_1
..
* :meth:`_sql.ColumnOperators.__ge__` (Python "``>=``" operator)::
- >>> print(column('x') >= 5)
+ >>> print(column("x") >= 5)
x >= :x_1
..
* :meth:`_sql.ColumnOperators.__le__` (Python "``<=``" operator)::
- >>> print(column('x') <= 5)
+ >>> print(column("x") <= 5)
x <= :x_1
..
* :meth:`_sql.ColumnOperators.between`::
- >>> print(column('x').between(5, 10))
+ >>> print(column("x").between(5, 10))
x BETWEEN :x_1 AND :x_2
..
values to the :meth:`_sql.ColumnOperators.in_` method::
- >>> print(column('x').in_([1, 2, 3]))
+ >>> print(column("x").in_([1, 2, 3]))
x IN (__[POSTCOMPILE_x_1])
The special bound form ``__[POSTCOMPILE`` is rendered into individual parameters
"NOT IN" is available via the :meth:`_sql.ColumnOperators.not_in` operator::
- >>> print(column('x').not_in([1, 2, 3]))
+ >>> print(column("x").not_in([1, 2, 3]))
(x NOT IN (__[POSTCOMPILE_x_1]))
This is typically more easily available by negating with the ``~`` operator::
- >>> print(~column('x').in_([1, 2, 3]))
+ >>> print(~column("x").in_([1, 2, 3]))
(x NOT IN (__[POSTCOMPILE_x_1]))
Tuple IN Expressions
then receives a list of tuples::
>>> from sqlalchemy import tuple_
- >>> tup = tuple_(column('x', Integer), column('y', Integer))
+ >>> tup = tuple_(column("x", Integer), column("y", Integer))
>>> expr = tup.in_([(1, 2), (3, 4)])
>>> print(expr)
(x, y) IN (__[POSTCOMPILE_param_1])
construct is passed in directly, without any explicit conversion to a named
subquery::
- >>> print(column('x').in_(select(user_table.c.id)))
+ >>> print(column("x").in_(select(user_table.c.id)))
x IN (SELECT user_account.id
FROM user_account)
Tuples work as expected::
>>> print(
- ... tuple_(column('x'), column('y')).in_(
+ ... tuple_(column("x"), column("y")).in_(
... select(user_table.c.id, address_table.c.id).join(address_table)
... )
... )
as "<expr> IS NULL". The ``NULL`` constant is most easily acquired
using regular Python ``None``::
- >>> print(column('x').is_(None))
+ >>> print(column("x").is_(None))
x IS NULL
SQL NULL is also explicitly available, if needed, using the
:func:`_sql.null` construct::
>>> from sqlalchemy import null
- >>> print(column('x').is_(null()))
+ >>> print(column("x").is_(null()))
x IS NULL
The :meth:`_sql.ColumnOperators.is_` operator is automatically invoked when
explicitly, paricularly when used with a dynamic value::
>>> a = None
- >>> print(column('x') == a)
+ >>> print(column("x") == a)
x IS NULL
Note that the Python ``is`` operator is **not overloaded**. Even though
Similar to :meth:`_sql.ColumnOperators.is_`, produces "IS NOT"::
- >>> print(column('x').is_not(None))
+ >>> print(column("x").is_not(None))
x IS NOT NULL
Is similarly equivalent to ``!= None``::
- >>> print(column('x') != None)
+ >>> print(column("x") != None)
x IS NOT NULL
* :meth:`_sql.ColumnOperators.is_distinct_from`:
Produces SQL IS DISTINCT FROM::
- >>> print(column('x').is_distinct_from('some value'))
+ >>> print(column("x").is_distinct_from("some value"))
x IS DISTINCT FROM :x_1
* :meth:`_sql.ColumnOperators.isnot_distinct_from`:
Produces SQL IS NOT DISTINCT FROM::
- >>> print(column('x').isnot_distinct_from('some value'))
+ >>> print(column("x").isnot_distinct_from("some value"))
x IS NOT DISTINCT FROM :x_1
String Comparisons
* :meth:`_sql.ColumnOperators.like`::
- >>> print(column('x').like('word'))
+ >>> print(column("x").like("word"))
x LIKE :x_1
..
Case insensitive LIKE makes use of the SQL ``lower()`` function on a
generic backend. On the PostgreSQL backend it will use ``ILIKE``::
- >>> print(column('x').ilike('word'))
+ >>> print(column("x").ilike("word"))
lower(x) LIKE lower(:x_1)
..
* :meth:`_sql.ColumnOperators.notlike`::
- >>> print(column('x').notlike('word'))
+ >>> print(column("x").notlike("word"))
x NOT LIKE :x_1
..
* :meth:`_sql.ColumnOperators.notilike`::
- >>> print(column('x').notilike('word'))
+ >>> print(column("x").notilike("word"))
lower(x) NOT LIKE lower(:x_1)
..
* :meth:`_sql.ColumnOperators.startswith`::
The string containment operators
- >>> print(column('x').startswith('word'))
+ >>> print(column("x").startswith("word"))
x LIKE :x_1 || '%'
..
* :meth:`_sql.ColumnOperators.endswith`::
- >>> print(column('x').endswith('word'))
+ >>> print(column("x").endswith("word"))
x LIKE '%' || :x_1
..
* :meth:`_sql.ColumnOperators.contains`::
- >>> print(column('x').contains('word'))
+ >>> print(column("x").contains("word"))
x LIKE '%' || :x_1 || '%'
..
This is a dialect-specific operator that makes use of the MATCH
feature of the underlying database, if available::
- >>> print(column('x').match('word'))
+ >>> print(column("x").match("word"))
x MATCH :x_1
..
for example the PostgreSQL dialect::
>>> from sqlalchemy.dialects import postgresql
- >>> print(column('x').regexp_match('word').compile(dialect=postgresql.dialect()))
+ >>> print(column("x").regexp_match("word").compile(dialect=postgresql.dialect()))
x ~ %(x_1)s
Or MySQL::
>>> from sqlalchemy.dialects import mysql
- >>> print(column('x').regexp_match('word').compile(dialect=mysql.dialect()))
+ >>> print(column("x").regexp_match("word").compile(dialect=mysql.dialect()))
x REGEXP %s
..
String concatenation::
- >>> print(column('x').concat("some string"))
+ >>> print(column("x").concat("some string"))
x || :x_1
This operator is available via :meth:`_sql.ColumnOperators.__add__`, that
is, the Python ``+`` operator, when working with a column expression that
derives from :class:`_types.String`::
- >>> print(column('x', String) + "some string")
+ >>> print(column("x", String) + "some string")
x || :x_1
The operator will produce the appropriate database-specific construct,
such as on MySQL it's historically been the ``concat()`` SQL function::
- >>> print((column('x', String) + "some string").compile(dialect=mysql.dialect()))
+ >>> print((column("x", String) + "some string").compile(dialect=mysql.dialect()))
concat(x, %s)
..
Complementary to :meth:`_sql.ColumnOperators.regexp` this produces REGEXP
REPLACE equivalent for the backends which support it::
- >>> print(column('x').regexp_replace('foo', 'bar').compile(dialect=postgresql.dialect()))
+ >>> print(column("x").regexp_replace("foo", "bar").compile(dialect=postgresql.dialect()))
REGEXP_REPLACE(x, %(x_1)s, %(x_2)s)
..
Produces the COLLATE SQL operator which provides for specific collations
at expression time::
- >>> print((column('x').collate('latin1_german2_ci') == 'Müller').compile(dialect=mysql.dialect()))
+ >>> print(
+ ... (column("x").collate("latin1_german2_ci") == "Müller").compile(
+ ... dialect=mysql.dialect()
+ ... )
+ ... )
(x COLLATE latin1_german2_ci) = %s
>>> from sqlalchemy import literal
- >>> print((literal('Müller').collate('latin1_german2_ci') == column('x')).compile(dialect=mysql.dialect()))
+ >>> print(
+ ... (literal("Müller").collate("latin1_german2_ci") == column("x")).compile(
+ ... dialect=mysql.dialect()
+ ... )
+ ... )
(%s COLLATE latin1_german2_ci) = x
..
* :meth:`_sql.ColumnOperators.__add__`, :meth:`_sql.ColumnOperators.__radd__` (Python "``+``" operator)::
- >>> print(column('x') + 5)
+ >>> print(column("x") + 5)
x + :x_1
- >>> print(5 + column('x'))
+ >>> print(5 + column("x"))
:x_1 + x
..
* :meth:`_sql.ColumnOperators.__sub__`, :meth:`_sql.ColumnOperators.__rsub__` (Python "``-``" operator)::
- >>> print(column('x') - 5)
+ >>> print(column("x") - 5)
x - :x_1
- >>> print(5 - column('x'))
+ >>> print(5 - column("x"))
:x_1 - x
..
* :meth:`_sql.ColumnOperators.__mul__`, :meth:`_sql.ColumnOperators.__rmul__` (Python "``*``" operator)::
- >>> print(column('x') * 5)
+ >>> print(column("x") * 5)
x * :x_1
- >>> print(5 * column('x'))
+ >>> print(5 * column("x"))
:x_1 * x
..
* :meth:`_sql.ColumnOperators.__div__`, :meth:`_sql.ColumnOperators.__rdiv__` (Python "``/``" operator)::
- >>> print(column('x') / 5)
+ >>> print(column("x") / 5)
x / :x_1
- >>> print(5 / column('x'))
+ >>> print(5 / column("x"))
:x_1 / x
..
* :meth:`_sql.ColumnOperators.__mod__`, :meth:`_sql.ColumnOperators.__rmod__` (Python "``%``" operator)::
- >>> print(column('x') % 5)
+ >>> print(column("x") % 5)
x % :x_1
- >>> print(5 % column('x'))
+ >>> print(5 % column("x"))
:x_1 % x
..
:meth:`_sql.Update.where` and :meth:`_sql.Delete.where`::
>>> print(
- ... select(address_table.c.email_address).
- ... where(user_table.c.name == 'squidward').
- ... where(address_table.c.user_id == user_table.c.id)
- ... )
+ ... select(address_table.c.email_address)
+ ... .where(user_table.c.name == "squidward")
+ ... .where(address_table.c.user_id == user_table.c.id)
+ ... )
SELECT address.email_address
FROM address, user_account
WHERE user_account.name = :name_1 AND address.user_id = user_account.id
:meth:`_sql.Select.where`, :meth:`_sql.Update.where` and :meth:`_sql.Delete.where` also accept multiple expressions with the same effect::
>>> print(
- ... select(address_table.c.email_address).
- ... where(
- ... user_table.c.name == 'squidward',
- ... address_table.c.user_id == user_table.c.id
- ... )
- ... )
+ ... select(address_table.c.email_address).where(
+ ... user_table.c.name == "squidward", address_table.c.user_id == user_table.c.id
+ ... )
+ ... )
SELECT address.email_address
FROM address, user_account
WHERE user_account.name = :name_1 AND address.user_id = user_account.id
>>> from sqlalchemy import and_, or_
>>> print(
- ... select(address_table.c.email_address).
- ... where(
+ ... select(address_table.c.email_address).where(
... and_(
- ... or_(user_table.c.name == 'squidward', user_table.c.name == 'sandy'),
- ... address_table.c.user_id == user_table.c.id
+ ... or_(user_table.c.name == "squidward", user_table.c.name == "sandy"),
+ ... address_table.c.user_id == user_table.c.id,
... )
... )
... )
typically invert the operator in a boolean expression::
>>> from sqlalchemy import not_
- >>> print(not_(column('x') == 5))
+ >>> print(not_(column("x") == 5))
x != :x_1
It also may apply a keyword such as ``NOT`` when appropriate::
>>> from sqlalchemy import Boolean
- >>> print(not_(column('x', Boolean)))
+ >>> print(not_(column("x", Boolean)))
NOT x
The Python binary ``&`` operator is overloaded to behave the same
as :func:`_sql.and_` (note parenthesis around the two operands)::
- >>> print((column('x') == 5) & (column('y') == 10))
+ >>> print((column("x") == 5) & (column("y") == 10))
x = :x_1 AND y = :y_1
..
The Python binary ``|`` operator is overloaded to behave the same
as :func:`_sql.or_` (note parenthesis around the two operands)::
- >>> print((column('x') == 5) | (column('y') == 10))
+ >>> print((column("x") == 5) | (column("y") == 10))
x = :x_1 OR y = :y_1
..
as :func:`_sql.not_`, either inverting the existing operator, or
applying the ``NOT`` keyword to the expression as a whole::
- >>> print(~(column('x') == 5))
+ >>> print(~(column("x") == 5))
x != :x_1
>>> from sqlalchemy import Boolean
- >>> print(~column('x', Boolean))
+ >>> print(~column("x", Boolean))
NOT x
..
.. Setup code, not for display
>>> conn.close()
- ROLLBACK
\ No newline at end of file
+ ROLLBACK
``pool_size``, ``max_overflow``, ``pool_recycle`` and
``pool_timeout``. For example::
- engine = create_engine('postgresql://me@localhost/mydb',
- pool_size=20, max_overflow=0)
+ engine = create_engine("postgresql://me@localhost/mydb", pool_size=20, max_overflow=0)
In the case of SQLite, the :class:`.SingletonThreadPool` or
:class:`.NullPool` are selected by the dialect to provide
:class:`.QueuePool` with SQLite::
from sqlalchemy.pool import QueuePool
- engine = create_engine('sqlite:///file.db', poolclass=QueuePool)
+
+ engine = create_engine("sqlite:///file.db", poolclass=QueuePool)
Disabling pooling using :class:`.NullPool`::
from sqlalchemy.pool import NullPool
+
engine = create_engine(
- 'postgresql+psycopg2://scott:tiger@localhost/test',
- poolclass=NullPool)
+ "postgresql+psycopg2://scott:tiger@localhost/test", poolclass=NullPool
+ )
Using a Custom Connection Function
----------------------------------
import sqlalchemy.pool as pool
import psycopg2
+
def getconn():
- c = psycopg2.connect(user='ed', host='127.0.0.1', dbname='test')
+ c = psycopg2.connect(user="ed", host="127.0.0.1", dbname="test")
return c
+
mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5)
DBAPI connections can then be procured from the pool using the
some_engine = create_engine(...)
+
@event.listens_for(some_engine, "engine_connect")
def ping_connection(connection, branch):
if branch:
illustrated by the code example below::
from sqlalchemy import create_engine, exc
+
e = create_engine(...)
c = e.connect()
period of time::
from sqlalchemy import create_engine
+
e = create_engine("mysql://scott:tiger@localhost/test", pool_recycle=3600)
Above, any DBAPI connection that has been open for more than one hour will be invalidated and replaced,
basically whether or not its desirable for the pool to keep a full set of
connections ready to go even during idle periods::
- engine = create_engine(
- "postgreql://", pool_use_lifo=True, pool_pre_ping=True)
+ engine = create_engine("postgreql://", pool_use_lifo=True, pool_pre_ping=True)
Above, we also make use of the :paramref:`_sa.create_engine.pool_pre_ping` flag
so that connections which are closed from the server side are gracefully
more than once::
from sqlalchemy.pool import NullPool
- engine = create_engine("mysql://user:pass@host/dbname", poolclass=NullPool)
+ engine = create_engine("mysql://user:pass@host/dbname", poolclass=NullPool)
2. Call :meth:`_engine.Engine.dispose` on any given :class:`_engine.Engine`,
passing the :paramref:`.Engine.dispose.close` parameter with a value of
engine = create_engine("mysql+mysqldb://user:pass@host/dbname")
+
def run_in_process(some_data_record):
with engine.connect() as conn:
conn.execute(text("..."))
+
def initializer():
"""ensure the parent proc's database connections are not touched
- in the new connection pool"""
+ in the new connection pool"""
engine.dispose(close=False)
+
with Pool(10, initializer=initializer) as p:
p.map(run_in_process, data)
-
.. versionadded:: 1.4.33 Added the :paramref:`.Engine.dispose.close`
parameter to allow the replacement of a connection pool in a child
process without interfering with the connections used by the parent
engine = create_engine("mysql://user:pass@host/dbname")
+
def run_in_process():
with engine.connect() as conn:
conn.execute(text("..."))
+
# before process starts, ensure engine.dispose() is called
engine.dispose()
p = Process(target=run_in_process)
engine = create_engine("...")
+
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
- connection_record.info['pid'] = os.getpid()
+ connection_record.info["pid"] = os.getpid()
+
@event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
- if connection_record.info['pid'] != pid:
+ if connection_record.info["pid"] != pid:
connection_record.dbapi_connection = connection_proxy.dbapi_connection = None
raise exc.DisconnectionError(
- "Connection record belongs to pid %s, "
- "attempting to check out in pid %s" %
- (connection_record.info['pid'], pid)
+ "Connection record belongs to pid %s, "
+ "attempting to check out in pid %s" % (connection_record.info["pid"], pid)
)
Above, we use an approach similar to that described in
most simple case you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData`
object, and the ``autoload_with`` argument::
- >>> messages = Table('messages', metadata_obj, autoload_with=engine)
+ >>> messages = Table("messages", metadata_obj, autoload_with=engine)
>>> [c.name for c in messages.columns]
['message_id', 'message_name', 'date']
``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the
effect such that the ``shopping_carts`` table will also be loaded::
- >>> shopping_cart_items = Table('shopping_cart_items', metadata_obj, autoload_with=engine)
+ >>> shopping_cart_items = Table("shopping_cart_items", metadata_obj, autoload_with=engine)
>>> 'shopping_carts' in metadata_obj.tables:
True
already exists with the given name. Such as below, we can access the already
generated ``shopping_carts`` table just by naming it::
- shopping_carts = Table('shopping_carts', metadata_obj)
+ shopping_carts = Table("shopping_carts", metadata_obj)
Of course, it's a good idea to use ``autoload_with=engine`` with the above table
regardless. This is so that the table's attributes will be loaded if they have
tables; this is handy for specifying custom datatypes, constraints such as
primary keys that may not be configured within the database, etc.::
- >>> mytable = Table('mytable', metadata_obj,
- ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key
- ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode
- ... # additional Column objects which require no change are reflected normally
- ... autoload_with=some_engine)
+ >>> mytable = Table(
+ ... "mytable",
+ ... metadata_obj,
+ ... Column(
+ ... "id", Integer, primary_key=True
+ ... ), # override reflected 'id' to have primary key
+ ... Column("mydata", Unicode(50)), # override reflected 'mydata' to be Unicode
+ ... # additional Column objects which require no change are reflected normally
+ ... autoload_with=some_engine,
+ ... )
.. seealso::
Use the "override" technique for this, specifying explicitly those columns
which are part of the primary key or have foreign key constraints::
- my_view = Table("some_view", metadata,
- Column("view_id", Integer, primary_key=True),
- Column("related_thing", Integer, ForeignKey("othertable.thing_id")),
- autoload_with=engine
+ my_view = Table(
+ "some_view",
+ metadata,
+ Column("view_id", Integer, primary_key=True),
+ Column("related_thing", Integer, ForeignKey("othertable.thing_id")),
+ autoload_with=engine,
)
Reflecting All Tables at Once
metadata_obj = MetaData()
metadata_obj.reflect(bind=someengine)
- users_table = metadata_obj.tables['users']
- addresses_table = metadata_obj.tables['addresses']
+ users_table = metadata_obj.tables["users"]
+ addresses_table = metadata_obj.tables["addresses"]
``metadata.reflect()`` also provides a handy way to clear or delete all the rows in a database::
schema will be reflected, and they will be populated as schema-qualified
with that name::
- >>> metadata_obj.tables['project.messages']
+ >>> metadata_obj.tables["project.messages"]
Table('messages', MetaData(), Column('message_id', INTEGER(), table=<messages>), schema='project')
Similarly, an individual :class:`_schema.Table` object that includes the
database schema, overriding any default schema that may have been configured on the
owning :class:`_schema.MetaData` collection::
- >>> messages = Table('messages', metadata_obj, schema="project", autoload_with=someengine)
+ >>> messages = Table("messages", metadata_obj, schema="project", autoload_with=someengine)
>>> messages
Table('messages', MetaData(), Column('message_id', INTEGER(), table=<messages>), schema='project')
>>> # reflect in non-schema qualified fashion
>>> messages_table_1 = Table("messages", metadata_obj, autoload_with=someengine)
>>> # reflect in schema qualified fashion
- >>> messages_table_2 = Table("messages", metadata_obj, schema="project", autoload_with=someengine)
+ >>> messages_table_2 = Table(
+ ... "messages", metadata_obj, schema="project", autoload_with=someengine
+ ... )
>>> # two different objects
>>> messages_table_1 is messages_table_2
False
qualified fashion::
>>> # reflect "messages" in a schema qualified fashion
- >>> messages_table_1 = Table("messages", metadata_obj, schema="project", autoload_with=someengine)
+ >>> messages_table_1 = Table(
+ ... "messages", metadata_obj, schema="project", autoload_with=someengine
+ ... )
The above ``messages_table_1`` will refer to ``projects`` also in a schema
qualified fashion. This "projects" table will be reflected automatically by
from sqlalchemy import create_engine
from sqlalchemy import inspect
- engine = create_engine('...')
+
+ engine = create_engine("...")
insp = inspect(engine)
print(insp.get_table_names())
.. sourcecode:: pycon+sql
>>> from sqlalchemy import create_engine
- >>> engine = create_engine('sqlite:///:memory:', echo=True)
+ >>> engine = create_engine("sqlite:///:memory:", echo=True)
The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is
accomplished via Python's standard ``logging`` module. With it enabled, we'll
>>> from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
>>> metadata_obj = MetaData()
- >>> users = Table('users', metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('name', String),
- ... Column('fullname', String),
+ >>> users = Table(
+ ... "users",
+ ... metadata_obj,
+ ... Column("id", Integer, primary_key=True),
+ ... Column("name", String),
+ ... Column("fullname", String),
... )
- >>> addresses = Table('addresses', metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('user_id', None, ForeignKey('users.id')),
- ... Column('email_address', String, nullable=False)
- ... )
+ >>> addresses = Table(
+ ... "addresses",
+ ... metadata_obj,
+ ... Column("id", Integer, primary_key=True),
+ ... Column("user_id", None, ForeignKey("users.id")),
+ ... Column("email_address", String, nullable=False),
+ ... )
All about how to define :class:`~sqlalchemy.schema.Table` objects, as well as
how to create them from an existing database automatically, is described in
issue CREATE TABLE, a "length" may be provided to the :class:`~sqlalchemy.types.String` type as
below::
- Column('name', String(50))
+ Column("name", String(50))
The length field on :class:`~sqlalchemy.types.String`, as well as similar precision/scale fields
available on :class:`~sqlalchemy.types.Integer`, :class:`~sqlalchemy.types.Numeric`, etc. are not referenced by
without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct::
from sqlalchemy import Sequence
- Column('id', Integer, Sequence('user_id_seq'), primary_key=True)
+
+ Column("id", Integer, Sequence("user_id_seq"), primary_key=True)
A full, foolproof :class:`~sqlalchemy.schema.Table` is therefore::
- users = Table('users', metadata_obj,
- Column('id', Integer, Sequence('user_id_seq'), primary_key=True),
- Column('name', String(50)),
- Column('fullname', String(50)),
- Column('nickname', String(50))
+ users = Table(
+ "users",
+ metadata_obj,
+ Column("id", Integer, Sequence("user_id_seq"), primary_key=True),
+ Column("name", String(50)),
+ Column("fullname", String(50)),
+ Column("nickname", String(50)),
)
We include this more verbose :class:`_schema.Table` construct separately
table. This can be limited by using the ``values()`` method, which establishes
the VALUES clause of the INSERT explicitly::
- >>> ins = users.insert().values(name='jack', fullname='Jack Jones')
+ >>> ins = users.insert().values(name="jack", fullname="Jack Jones")
>>> str(ins)
'INSERT INTO users (name, fullname) VALUES (:name, :fullname)'
.. sourcecode:: pycon+sql
>>> ins = users.insert()
- >>> conn.execute(ins, {"id": 2, "name":"wendy", "fullname": "Wendy Williams"})
+ >>> conn.execute(ins, {"id": 2, "name": "wendy", "fullname": "Wendy Williams"})
{opensql}INSERT INTO users (id, name, fullname) VALUES (?, ?, ?)
[...] (2, 'wendy', 'Wendy Williams')
COMMIT
.. sourcecode:: pycon+sql
- >>> conn.execute(addresses.insert(), [
- ... {'user_id': 1, 'email_address' : 'jack@yahoo.com'},
- ... {'user_id': 1, 'email_address' : 'jack@msn.com'},
- ... {'user_id': 2, 'email_address' : 'www@www.org'},
- ... {'user_id': 2, 'email_address' : 'wendy@aol.com'},
- ... ])
+ >>> conn.execute(
+ ... addresses.insert(),
+ ... [
+ ... {"user_id": 1, "email_address": "jack@yahoo.com"},
+ ... {"user_id": 1, "email_address": "jack@msn.com"},
+ ... {"user_id": 2, "email_address": "www@www.org"},
+ ... {"user_id": 2, "email_address": "wendy@aol.com"},
+ ... ],
+ ... )
{opensql}INSERT INTO addresses (user_id, email_address) VALUES (?, ?)
[...] ((1, 'jack@yahoo.com'), (1, 'jack@msn.com'), (2, 'www@www.org'), (2, 'wendy@aol.com'))
COMMIT
[...] ()
{stop}>>> row = result.fetchone()
- >>> print("name:", row._mapping['name'], "; fullname:", row._mapping['fullname'])
+ >>> print("name:", row._mapping["name"], "; fullname:", row._mapping["fullname"])
name: jack ; fullname: Jack Jones
.. deprecated:: 1.4
.. sourcecode:: pycon+sql
{sql}>>> for row in conn.execute(s):
- ... print("name:", row._mapping[users.c.name], "; fullname:", row._mapping[users.c.fullname])
+ ... print(
+ ... "name:",
+ ... row._mapping[users.c.name],
+ ... "; fullname:",
+ ... row._mapping[users.c.fullname],
+ ... )
SELECT users.id, users.name, users.fullname
FROM users
[...] ()
users.name IS NULL
>>> # reverse works too
- >>> print('fred' > users.c.name)
+ >>> print("fred" > users.c.name)
users.name < :name_1
If we add two integer columns together, we get an addition expression:
.. sourcecode:: pycon+sql
- >>> print((users.c.name + users.c.fullname).
- ... compile(bind=create_engine('mysql://'))) # doctest: +SKIP
+ >>> print(
+ ... (users.c.name + users.c.fullname).compile(bind=create_engine("mysql://"))
+ ... ) # doctest: +SKIP
concat(users.name, users.fullname)
The above illustrates the SQL that's generated for an
.. sourcecode:: pycon+sql
- >>> print(users.c.name.op('tiddlywinks')('foo'))
+ >>> print(users.c.name.op("tiddlywinks")("foo"))
users.name tiddlywinks :name_1
This function can also be used to make bitwise operators explicit. For example::
- somecolumn.op('&')(0xff)
+ somecolumn.op("&")(0xFF)
is a bitwise AND of the value in ``somecolumn``.
normally expected, using :func:`.type_coerce`::
from sqlalchemy import type_coerce
- expr = type_coerce(somecolumn.op('-%>')('foo'), MySpecialType())
- stmt = select(expr)
+ expr = type_coerce(somecolumn.op("-%>")("foo"), MySpecialType())
+ stmt = select(expr)
For boolean operators, use the :meth:`.Operators.bool_op` method, which
will ensure that the return type of the expression is handled as boolean::
- somecolumn.bool_op('-->')('some value')
-
+ somecolumn.bool_op("-->")("some value")
Commonly Used Operators
-------------------------
* :meth:`equals <.ColumnOperators.__eq__>`::
- statement.where(users.c.name == 'ed')
+ statement.where(users.c.name == "ed")
* :meth:`not equals <.ColumnOperators.__ne__>`::
- statement.where(users.c.name != 'ed')
+ statement.where(users.c.name != "ed")
* :meth:`LIKE <.ColumnOperators.like>`::
* :meth:`IN <.ColumnOperators.in_>`::
- statement.where(users.c.name.in_(['ed', 'wendy', 'jack']))
+ statement.where(users.c.name.in_(["ed", "wendy", "jack"]))
# works with Select objects too:
- statement.where.filter(users.c.name.in_(
- select(users.c.name).where(users.c.name.like('%ed%'))
- ))
+ statement.where.filter(
+ users.c.name.in_(select(users.c.name).where(users.c.name.like("%ed%")))
+ )
# use tuple_() for composite (multi-column) queries
from sqlalchemy import tuple_
+
statement.where(
- tuple_(users.c.name, users.c.nickname).\
- in_([('ed', 'edsnickname'), ('wendy', 'windy')])
+ tuple_(users.c.name, users.c.nickname).in_(
+ [("ed", "edsnickname"), ("wendy", "windy")]
+ )
)
* :meth:`NOT IN <.ColumnOperators.not_in>`::
- statement.where(~users.c.name.in_(['ed', 'wendy', 'jack']))
+ statement.where(~users.c.name.in_(["ed", "wendy", "jack"]))
* :meth:`IS NULL <.ColumnOperators.is_>`::
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import and_, or_, not_
- >>> print(and_(
- ... users.c.name.like('j%'),
+ >>> print(
+ ... and_(
+ ... users.c.name.like("j%"),
... users.c.id == addresses.c.user_id,
... or_(
- ... addresses.c.email_address == 'wendy@aol.com',
- ... addresses.c.email_address == 'jack@yahoo.com'
+ ... addresses.c.email_address == "wendy@aol.com",
+ ... addresses.c.email_address == "jack@yahoo.com",
... ),
- ... not_(users.c.id > 5)
- ... )
- ... )
+ ... not_(users.c.id > 5),
+ ... )
+ ... )
users.name LIKE :name_1 AND users.id = addresses.user_id AND
(addresses.email_address = :email_address_1
OR addresses.email_address = :email_address_2)
.. sourcecode:: pycon+sql
- >>> print(users.c.name.like('j%') & (users.c.id == addresses.c.user_id) &
- ... (
- ... (addresses.c.email_address == 'wendy@aol.com') | \
- ... (addresses.c.email_address == 'jack@yahoo.com')
- ... ) \
- ... & ~(users.c.id>5)
+ >>> print(
+ ... users.c.name.like("j%")
+ ... & (users.c.id == addresses.c.user_id)
+ ... & (
+ ... (addresses.c.email_address == "wendy@aol.com")
+ ... | (addresses.c.email_address == "jack@yahoo.com")
+ ... )
+ ... & ~(users.c.id > 5)
... )
users.name LIKE :name_1 AND users.id = addresses.user_id AND
(addresses.email_address = :email_address_1
.. sourcecode:: pycon+sql
- >>> s = select((users.c.fullname +
- ... ", " + addresses.c.email_address).
- ... label('title')).\
- ... where(
- ... and_(
- ... users.c.id == addresses.c.user_id,
- ... users.c.name.between('m', 'z'),
- ... or_(
- ... addresses.c.email_address.like('%@aol.com'),
- ... addresses.c.email_address.like('%@msn.com')
- ... )
- ... )
- ... )
+ >>> s = select((users.c.fullname + ", " + addresses.c.email_address).label("title")).where(
+ ... and_(
+ ... users.c.id == addresses.c.user_id,
+ ... users.c.name.between("m", "z"),
+ ... or_(
+ ... addresses.c.email_address.like("%@aol.com"),
+ ... addresses.c.email_address.like("%@msn.com"),
+ ... ),
+ ... )
+ ... )
>>> conn.execute(s).fetchall()
{opensql}SELECT users.fullname || ? || addresses.email_address AS title
FROM users, addresses
.. sourcecode:: pycon+sql
- >>> s = select((users.c.fullname +
- ... ", " + addresses.c.email_address).
- ... label('title')).\
- ... where(users.c.id == addresses.c.user_id).\
- ... where(users.c.name.between('m', 'z')).\
- ... where(
- ... or_(
- ... addresses.c.email_address.like('%@aol.com'),
- ... addresses.c.email_address.like('%@msn.com')
- ... )
- ... )
+ >>> s = (
+ ... select((users.c.fullname + ", " + addresses.c.email_address).label("title"))
+ ... .where(users.c.id == addresses.c.user_id)
+ ... .where(users.c.name.between("m", "z"))
+ ... .where(
+ ... or_(
+ ... addresses.c.email_address.like("%@aol.com"),
+ ... addresses.c.email_address.like("%@msn.com"),
+ ... )
+ ... )
+ ... )
>>> conn.execute(s).fetchall()
{opensql}SELECT users.fullname || ? || addresses.email_address AS title
FROM users, addresses
>>> from sqlalchemy.sql import text
>>> s = text(
... "SELECT users.fullname || ', ' || addresses.email_address AS title "
- ... "FROM users, addresses "
- ... "WHERE users.id = addresses.user_id "
- ... "AND users.name BETWEEN :x AND :y "
- ... "AND (addresses.email_address LIKE :e1 "
- ... "OR addresses.email_address LIKE :e2)")
- >>> conn.execute(s, {"x":"m", "y":"z", "e1":"%@aol.com", "e2":"%@msn.com"}).fetchall()
+ ... "FROM users, addresses "
+ ... "WHERE users.id = addresses.user_id "
+ ... "AND users.name BETWEEN :x AND :y "
+ ... "AND (addresses.email_address LIKE :e1 "
+ ... "OR addresses.email_address LIKE :e2)"
+ ... )
+ >>> conn.execute(s, {"x": "m", "y": "z", "e1": "%@aol.com", "e2": "%@msn.com"}).fetchall()
{opensql}SELECT users.fullname || ', ' || addresses.email_address AS title
FROM users, addresses
WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
j = stmt.join(addresses, stmt.c.id == addresses.c.user_id)
- new_stmt = select(stmt.c.id, addresses.c.id).\
- select_from(j).where(stmt.c.name == 'x')
+ new_stmt = select(stmt.c.id, addresses.c.id).select_from(j).where(stmt.c.name == "x")
The positional form of :meth:`_expression.TextClause.columns` is particularly useful
when relating textual SQL to existing Core or ORM models, because we can use
.. sourcecode:: pycon+sql
- >>> stmt = text("SELECT users.id, addresses.id, users.id, "
+ >>> stmt = text(
+ ... "SELECT users.id, addresses.id, users.id, "
... "users.name, addresses.email_address AS email "
... "FROM users JOIN addresses ON users.id=addresses.user_id "
- ... "WHERE users.id = 1").columns(
- ... users.c.id,
- ... addresses.c.id,
- ... addresses.c.user_id,
- ... users.c.name,
- ... addresses.c.email_address
- ... )
+ ... "WHERE users.id = 1"
+ ... ).columns(
+ ... users.c.id,
+ ... addresses.c.id,
+ ... addresses.c.user_id,
+ ... users.c.name,
+ ... addresses.c.email_address,
+ ... )
>>> result = conn.execute(stmt)
{opensql}SELECT users.id, addresses.id, users.id, users.name,
addresses.email_address AS email
.. sourcecode:: pycon+sql
- >>> s = select(
- ... text("users.fullname || ', ' || addresses.email_address AS title")
- ... ).\
- ... where(
- ... and_(
- ... text("users.id = addresses.user_id"),
- ... text("users.name BETWEEN 'm' AND 'z'"),
- ... text(
- ... "(addresses.email_address LIKE :x "
- ... "OR addresses.email_address LIKE :y)")
- ... )
- ... ).select_from(text('users, addresses'))
+ >>> s = (
+ ... select(text("users.fullname || ', ' || addresses.email_address AS title"))
+ ... .where(
+ ... and_(
+ ... text("users.id = addresses.user_id"),
+ ... text("users.name BETWEEN 'm' AND 'z'"),
+ ... text(
+ ... "(addresses.email_address LIKE :x "
+ ... "OR addresses.email_address LIKE :y)"
+ ... ),
+ ... )
+ ... )
+ ... .select_from(text("users, addresses"))
+ ... )
>>> conn.execute(s, {"x": "%@aol.com", "y": "%@msn.com"}).fetchall()
{opensql}SELECT users.fullname || ', ' || addresses.email_address AS title
FROM users, addresses
>>> from sqlalchemy import select, and_, text, String
>>> from sqlalchemy.sql import table, literal_column
- >>> s = select(
- ... literal_column("users.fullname", String) +
- ... ', ' +
- ... literal_column("addresses.email_address").label("title")
- ... ).\
- ... where(
- ... and_(
- ... literal_column("users.id") == literal_column("addresses.user_id"),
- ... text("users.name BETWEEN 'm' AND 'z'"),
- ... text(
- ... "(addresses.email_address LIKE :x OR "
- ... "addresses.email_address LIKE :y)")
- ... )
- ... ).select_from(table('users')).select_from(table('addresses'))
-
- >>> conn.execute(s, {"x":"%@aol.com", "y":"%@msn.com"}).fetchall()
+ >>> s = (
+ ... select(
+ ... literal_column("users.fullname", String)
+ ... + ", "
+ ... + literal_column("addresses.email_address").label("title")
+ ... )
+ ... .where(
+ ... and_(
+ ... literal_column("users.id") == literal_column("addresses.user_id"),
+ ... text("users.name BETWEEN 'm' AND 'z'"),
+ ... text(
+ ... "(addresses.email_address LIKE :x OR "
+ ... "addresses.email_address LIKE :y)"
+ ... ),
+ ... )
+ ... )
+ ... .select_from(table("users"))
+ ... .select_from(table("addresses"))
+ ... )
+
+ >>> conn.execute(s, {"x": "%@aol.com", "y": "%@msn.com"}).fetchall()
{opensql}SELECT users.fullname || ? || addresses.email_address AS anon_1
FROM users, addresses
WHERE users.id = addresses.user_id
.. sourcecode:: pycon+sql
>>> from sqlalchemy import func
- >>> stmt = select(
- ... addresses.c.user_id,
- ... func.count(addresses.c.id).label('num_addresses')).\
- ... group_by("user_id").order_by("user_id", "num_addresses")
+ >>> stmt = (
+ ... select(addresses.c.user_id, func.count(addresses.c.id).label("num_addresses"))
+ ... .group_by("user_id")
+ ... .order_by("user_id", "num_addresses")
+ ... )
{sql}>>> conn.execute(stmt).fetchall()
SELECT addresses.user_id, count(addresses.id) AS num_addresses
.. sourcecode:: pycon+sql
>>> from sqlalchemy import func, desc
- >>> stmt = select(
- ... addresses.c.user_id,
- ... func.count(addresses.c.id).label('num_addresses')).\
- ... group_by("user_id").order_by("user_id", desc("num_addresses"))
+ >>> stmt = (
+ ... select(addresses.c.user_id, func.count(addresses.c.id).label("num_addresses"))
+ ... .group_by("user_id")
+ ... .order_by("user_id", desc("num_addresses"))
+ ... )
{sql}>>> conn.execute(stmt).fetchall()
SELECT addresses.user_id, count(addresses.id) AS num_addresses
.. sourcecode:: pycon+sql
>>> u1a, u1b = users.alias(), users.alias()
- >>> stmt = select(u1a, u1b).\
- ... where(u1a.c.name > u1b.c.name).\
- ... order_by(u1a.c.name) # using "name" here would be ambiguous
+ >>> stmt = (
+ ... select(u1a, u1b).where(u1a.c.name > u1b.c.name).order_by(u1a.c.name)
+ ... ) # using "name" here would be ambiguous
{sql}>>> conn.execute(stmt).fetchall()
SELECT users_1.id, users_1.name, users_1.fullname, users_2.id AS id_1,
>>> a1 = addresses.alias()
>>> a2 = addresses.alias()
- >>> s = select(users).\
- ... where(and_(
- ... users.c.id == a1.c.user_id,
- ... users.c.id == a2.c.user_id,
- ... a1.c.email_address == 'jack@msn.com',
- ... a2.c.email_address == 'jack@yahoo.com'
- ... ))
+ >>> s = select(users).where(
+ ... and_(
+ ... users.c.id == a1.c.user_id,
+ ... users.c.id == a2.c.user_id,
+ ... a1.c.email_address == "jack@msn.com",
+ ... a2.c.email_address == "jack@yahoo.com",
+ ... )
+ ... )
>>> conn.execute(s).fetchall()
{opensql}SELECT users.id, users.name, users.fullname
FROM users, addresses AS addresses_1, addresses AS addresses_2
the purposes of debugging, it can be specified by passing a string name
to the :meth:`_expression.FromClause.alias` method::
- >>> a1 = addresses.alias('a1')
+ >>> a1 = addresses.alias("a1")
SELECT-oriented constructs which extend from :class:`_expression.SelectBase` may be turned
into aliased subqueries using the :meth:`_expression.SelectBase.subquery` method, which
.. sourcecode:: pycon+sql
- >>> print(users.join(addresses,
- ... addresses.c.email_address.like(users.c.name + '%')
- ... )
- ... )
+ >>> print(users.join(addresses, addresses.c.email_address.like(users.c.name + "%")))
users JOIN addresses ON addresses.email_address LIKE users.name || :name_1
When we create a :func:`_expression.select` construct, SQLAlchemy looks around at the
.. sourcecode:: pycon+sql
>>> s = select(users.c.fullname).select_from(
- ... users.join(addresses,
- ... addresses.c.email_address.like(users.c.name + '%'))
- ... )
+ ... users.join(addresses, addresses.c.email_address.like(users.c.name + "%"))
+ ... )
{sql}>>> conn.execute(s).fetchall()
SELECT users.fullname
FROM users JOIN addresses ON addresses.email_address LIKE users.name || ?
.. sourcecode:: pycon+sql
- >>> users_cte = select(users.c.id, users.c.name).where(users.c.name == 'wendy').cte()
- >>> stmt = select(addresses).where(addresses.c.user_id == users_cte.c.id).order_by(addresses.c.id)
+ >>> users_cte = select(users.c.id, users.c.name).where(users.c.name == "wendy").cte()
+ >>> stmt = (
+ ... select(addresses)
+ ... .where(addresses.c.user_id == users_cte.c.id)
+ ... .order_by(addresses.c.id)
+ ... )
>>> conn.execute(stmt).fetchall()
{opensql}WITH anon_1 AS
(SELECT users.id AS id, users.name AS name
>>> users_cte = select(users.c.id, users.c.name).cte(recursive=True)
>>> users_recursive = users_cte.alias()
- >>> users_cte = users_cte.union(select(users.c.id, users.c.name).where(users.c.id > users_recursive.c.id))
- >>> stmt = select(addresses).where(addresses.c.user_id == users_cte.c.id).order_by(addresses.c.id)
+ >>> users_cte = users_cte.union(
+ ... select(users.c.id, users.c.name).where(users.c.id > users_recursive.c.id)
+ ... )
+ >>> stmt = (
+ ... select(addresses)
+ ... .where(addresses.c.user_id == users_cte.c.id)
+ ... .order_by(addresses.c.id)
+ ... )
>>> conn.execute(stmt).fetchall()
{opensql}WITH RECURSIVE anon_1(id, name) AS
(SELECT users.id AS id, users.name AS name
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import bindparam
- >>> s = users.select().where(users.c.name == bindparam('username'))
+ >>> s = users.select().where(users.c.name == bindparam("username"))
{sql}>>> conn.execute(s, {"username": "wendy"}).fetchall()
SELECT users.id, users.name, users.fullname
FROM users
.. sourcecode:: pycon+sql
- >>> s = users.select().where(users.c.name.like(bindparam('username', type_=String) + text("'%'")))
+ >>> s = users.select().where(
+ ... users.c.name.like(bindparam("username", type_=String) + text("'%'"))
+ ... )
{sql}>>> conn.execute(s, {"username": "wendy"}).fetchall()
SELECT users.id, users.name, users.fullname
FROM users
.. sourcecode:: pycon+sql
- >>> s = select(users, addresses).\
- ... where(
- ... or_(
- ... users.c.name.like(
- ... bindparam('name', type_=String) + text("'%'")),
- ... addresses.c.email_address.like(
- ... bindparam('name', type_=String) + text("'@%'"))
- ... )
- ... ).\
- ... select_from(users.outerjoin(addresses)).\
- ... order_by(addresses.c.id)
+ >>> s = (
+ ... select(users, addresses)
+ ... .where(
+ ... or_(
+ ... users.c.name.like(bindparam("name", type_=String) + text("'%'")),
+ ... addresses.c.email_address.like(
+ ... bindparam("name", type_=String) + text("'@%'")
+ ... ),
+ ... )
+ ... )
+ ... .select_from(users.outerjoin(addresses))
+ ... .order_by(addresses.c.id)
+ ... )
{sql}>>> conn.execute(s, {"name": "jack"}).fetchall()
SELECT users.id, users.name, users.fullname, addresses.id AS id_1,
addresses.user_id, addresses.email_address
>>> print(func.now())
now()
- >>> print(func.concat('x', 'y'))
+ >>> print(func.concat("x", "y"))
concat(:concat_1, :concat_2)
By "generates", we mean that **any** SQL function is created based on the word
stmt = select(func.date(some_table.c.date_string, type_=Date))
-
Functions are most typically used in the columns clause of a select statement,
and can also be labeled as well as given a type. Labeling a function is
recommended so that the result can be targeted in a result row based on a
.. sourcecode:: pycon+sql
>>> conn.execute(
- ... select(
- ... func.max(addresses.c.email_address, type_=String).
- ... label('maxemail')
- ... )
- ... ).scalar()
+ ... select(func.max(addresses.c.email_address, type_=String).label("maxemail"))
+ ... ).scalar()
{opensql}SELECT max(addresses.email_address) AS maxemail
FROM addresses
[...] ()
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import column
- >>> calculate = select(column('q'), column('z'), column('r')).\
- ... select_from(
- ... func.calculate(
- ... bindparam('x'),
- ... bindparam('y')
- ... )
- ... )
+ >>> calculate = select(column("q"), column("z"), column("r")).select_from(
+ ... func.calculate(bindparam("x"), bindparam("y"))
+ ... )
>>> calc = calculate.alias()
>>> print(select(users).where(users.c.id > calc.c.z))
SELECT users.id, users.name, users.fullname
.. sourcecode:: pycon+sql
- >>> calc1 = calculate.alias('c1').unique_params(x=17, y=45)
- >>> calc2 = calculate.alias('c2').unique_params(x=5, y=12)
- >>> s = select(users).\
- ... where(users.c.id.between(calc1.c.z, calc2.c.z))
+ >>> calc1 = calculate.alias("c1").unique_params(x=17, y=45)
+ >>> calc2 = calculate.alias("c2").unique_params(x=5, y=12)
+ >>> s = select(users).where(users.c.id.between(calc1.c.z, calc2.c.z))
>>> print(s)
SELECT users.id, users.name, users.fullname
FROM users,
(SELECT q, z, r FROM calculate(:x_2, :y_2)) AS c2
WHERE users.id BETWEEN c1.z AND c2.z
- >>> s.compile().params # doctest: +SKIP
+ >>> s.compile().params # doctest: +SKIP
{u'x_2': 5, u'y_2': 12, u'y_1': 45, u'x_1': 17}
.. seealso::
:data:`~.expression.func`, can be turned into a "window function", that is an
OVER clause, using the :meth:`.FunctionElement.over` method::
- >>> s = select(
- ... users.c.id,
- ... func.row_number().over(order_by=users.c.name)
- ... )
+ >>> s = select(users.c.id, func.row_number().over(order_by=users.c.name))
>>> print(s)
SELECT users.id, row_number() OVER (ORDER BY users.name) AS anon_1
FROM users
either the :paramref:`.expression.over.rows` or
:paramref:`.expression.over.range` parameters::
- >>> s = select(
- ... users.c.id,
- ... func.row_number().over(
- ... order_by=users.c.name,
- ... rows=(-2, None))
- ... )
+ >>> s = select(users.c.id, func.row_number().over(order_by=users.c.name, rows=(-2, None)))
>>> print(s)
SELECT users.id, row_number() OVER
(ORDER BY users.name ROWS BETWEEN :param_1 PRECEDING AND UNBOUNDED FOLLOWING) AS anon_1
>>> from sqlalchemy import JSON
>>> from sqlalchemy import type_coerce
>>> from sqlalchemy.dialects import mysql
- >>> s = select(
- ... type_coerce(
- ... {'some_key': {'foo': 'bar'}}, JSON
- ... )['some_key']
- ... )
+ >>> s = select(type_coerce({"some_key": {"foo": "bar"}}, JSON)["some_key"])
>>> print(s.compile(dialect=mysql.dialect()))
SELECT JSON_EXTRACT(%s, %s) AS anon_1
>>> from sqlalchemy.sql import union
>>> u = union(
- ... addresses.select().
- ... where(addresses.c.email_address == 'foo@bar.com'),
- ... addresses.select().
- ... where(addresses.c.email_address.like('%@yahoo.com')),
+ ... addresses.select().where(addresses.c.email_address == "foo@bar.com"),
+ ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")),
... ).order_by(addresses.c.email_address)
{sql}>>> conn.execute(u).fetchall()
>>> from sqlalchemy.sql import except_
>>> u = except_(
- ... addresses.select().
- ... where(addresses.c.email_address.like('%@%.com')),
- ... addresses.select().
- ... where(addresses.c.email_address.like('%@msn.com'))
+ ... addresses.select().where(addresses.c.email_address.like("%@%.com")),
+ ... addresses.select().where(addresses.c.email_address.like("%@msn.com")),
... )
{sql}>>> conn.execute(u).fetchall()
.. sourcecode:: pycon+sql
>>> u = except_(
- ... union(
- ... addresses.select().
- ... where(addresses.c.email_address.like('%@yahoo.com')),
- ... addresses.select().
- ... where(addresses.c.email_address.like('%@msn.com'))
- ... ).subquery().select(), # apply subquery here
- ... addresses.select().where(addresses.c.email_address.like('%@msn.com'))
+ ... union(
+ ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")),
+ ... addresses.select().where(addresses.c.email_address.like("%@msn.com")),
+ ... )
+ ... .subquery()
+ ... .select(), # apply subquery here
+ ... addresses.select().where(addresses.c.email_address.like("%@msn.com")),
... )
{sql}>>> conn.execute(u).fetchall()
SELECT anon_1.id, anon_1.user_id, anon_1.email_address
be rendered without table names::
>>> u = union(
- ... addresses.select().
- ... where(addresses.c.email_address == 'foo@bar.com'),
- ... addresses.select().
- ... where(addresses.c.email_address.like('%@yahoo.com')),
+ ... addresses.select().where(addresses.c.email_address == "foo@bar.com"),
+ ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")),
... )
>>> u = u.order_by(u.selected_columns.email_address)
>>> print(u)
.. sourcecode:: pycon+sql
- >>> subq = select(func.count(addresses.c.id)).\
- ... where(users.c.id == addresses.c.user_id).\
- ... scalar_subquery()
+ >>> subq = (
+ ... select(func.count(addresses.c.id))
+ ... .where(users.c.id == addresses.c.user_id)
+ ... .scalar_subquery()
+ ... )
The above construct is now a :class:`_expression.ScalarSelect` object,
which is an adapter around the original :class:`.~expression.Select`
.. sourcecode:: pycon+sql
- >>> subq = select(func.count(addresses.c.id)).\
- ... where(users.c.id == addresses.c.user_id).\
- ... label("address_count")
+ >>> subq = (
+ ... select(func.count(addresses.c.id))
+ ... .where(users.c.id == addresses.c.user_id)
+ ... .label("address_count")
+ ... )
>>> conn.execute(select(users.c.name, subq)).fetchall()
{opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1
FROM addresses
.. sourcecode:: pycon+sql
- >>> stmt = select(addresses.c.user_id).\
- ... where(addresses.c.user_id == users.c.id).\
- ... where(addresses.c.email_address == 'jack@yahoo.com')
- >>> enclosing_stmt = select(users.c.name).\
- ... where(users.c.id == stmt.scalar_subquery())
+ >>> stmt = (
+ ... select(addresses.c.user_id)
+ ... .where(addresses.c.user_id == users.c.id)
+ ... .where(addresses.c.email_address == "jack@yahoo.com")
+ ... )
+ >>> enclosing_stmt = select(users.c.name).where(users.c.id == stmt.scalar_subquery())
>>> conn.execute(enclosing_stmt).fetchall()
{opensql}SELECT users.name
FROM users
.. sourcecode:: pycon+sql
- >>> stmt = select(users.c.id).\
- ... where(users.c.id == addresses.c.user_id).\
- ... where(users.c.name == 'jack').\
- ... correlate(addresses)
- >>> enclosing_stmt = select(
- ... users.c.name, addresses.c.email_address).\
- ... select_from(users.join(addresses)).\
- ... where(users.c.id == stmt.scalar_subquery())
+ >>> stmt = (
+ ... select(users.c.id)
+ ... .where(users.c.id == addresses.c.user_id)
+ ... .where(users.c.name == "jack")
+ ... .correlate(addresses)
+ ... )
+ >>> enclosing_stmt = (
+ ... select(users.c.name, addresses.c.email_address)
+ ... .select_from(users.join(addresses))
+ ... .where(users.c.id == stmt.scalar_subquery())
+ ... )
>>> conn.execute(enclosing_stmt).fetchall()
{opensql}SELECT users.name, addresses.email_address
FROM users JOIN addresses ON users.id = addresses.user_id
.. sourcecode:: pycon+sql
- >>> stmt = select(users.c.id).\
- ... where(users.c.name == 'wendy').\
- ... correlate(None)
- >>> enclosing_stmt = select(users.c.name).\
- ... where(users.c.id == stmt.scalar_subquery())
+ >>> stmt = select(users.c.id).where(users.c.name == "wendy").correlate(None)
+ >>> enclosing_stmt = select(users.c.name).where(users.c.id == stmt.scalar_subquery())
>>> conn.execute(enclosing_stmt).fetchall()
{opensql}SELECT users.name
FROM users
.. sourcecode:: pycon+sql
- >>> stmt = select(users.c.id).\
- ... where(users.c.id == addresses.c.user_id).\
- ... where(users.c.name == 'jack').\
- ... correlate_except(users)
- >>> enclosing_stmt = select(
- ... users.c.name, addresses.c.email_address).\
- ... select_from(users.join(addresses)).\
- ... where(users.c.id == stmt.scalar_subquery())
+ >>> stmt = (
+ ... select(users.c.id)
+ ... .where(users.c.id == addresses.c.user_id)
+ ... .where(users.c.name == "jack")
+ ... .correlate_except(users)
+ ... )
+ >>> enclosing_stmt = (
+ ... select(users.c.name, addresses.c.email_address)
+ ... .select_from(users.join(addresses))
+ ... .where(users.c.id == stmt.scalar_subquery())
+ ... )
>>> conn.execute(enclosing_stmt).fetchall()
{opensql}SELECT users.name, addresses.email_address
FROM users JOIN addresses ON users.id = addresses.user_id
like the above using the :meth:`_expression.Select.lateral` method as follows::
>>> from sqlalchemy import table, column, select, true
- >>> people = table('people', column('people_id'), column('age'), column('name'))
- >>> books = table('books', column('book_id'), column('owner_id'))
- >>> subq = select(books.c.book_id).\
- ... where(books.c.owner_id == people.c.people_id).lateral("book_subq")
+ >>> people = table("people", column("people_id"), column("age"), column("name"))
+ >>> books = table("books", column("book_id"), column("owner_id"))
+ >>> subq = (
+ ... select(books.c.book_id)
+ ... .where(books.c.owner_id == people.c.people_id)
+ ... .lateral("book_subq")
+ ... )
>>> print(select(people).select_from(people.join(subq, true())))
SELECT people.people_id, people.age, people.name
FROM people JOIN LATERAL (SELECT books.book_id AS book_id
.. sourcecode:: pycon+sql
- >>> stmt = select(users.c.name, func.count(addresses.c.id)).\
- ... select_from(users.join(addresses)).\
- ... group_by(users.c.name)
+ >>> stmt = (
+ ... select(users.c.name, func.count(addresses.c.id))
+ ... .select_from(users.join(addresses))
+ ... .group_by(users.c.name)
+ ... )
>>> conn.execute(stmt).fetchall()
{opensql}SELECT users.name, count(addresses.id) AS count_1
FROM users JOIN addresses
.. sourcecode:: pycon+sql
- >>> stmt = select(users.c.name, func.count(addresses.c.id)).\
- ... select_from(users.join(addresses)).\
- ... group_by(users.c.name).\
- ... having(func.length(users.c.name) > 4)
+ >>> stmt = (
+ ... select(users.c.name, func.count(addresses.c.id))
+ ... .select_from(users.join(addresses))
+ ... .group_by(users.c.name)
+ ... .having(func.length(users.c.name) > 4)
+ ... )
>>> conn.execute(stmt).fetchall()
{opensql}SELECT users.name, count(addresses.id) AS count_1
FROM users JOIN addresses
.. sourcecode:: pycon+sql
- >>> stmt = select(users.c.name).\
- ... where(addresses.c.email_address.
- ... contains(users.c.name)).\
- ... distinct()
+ >>> stmt = (
+ ... select(users.c.name)
+ ... .where(addresses.c.email_address.contains(users.c.name))
+ ... .distinct()
+ ... )
>>> conn.execute(stmt).fetchall()
{opensql}SELECT DISTINCT users.name
FROM users, addresses
.. sourcecode:: pycon+sql
- >>> stmt = select(users.c.name, addresses.c.email_address).\
- ... select_from(users.join(addresses)).\
- ... limit(1).offset(1)
+ >>> stmt = (
+ ... select(users.c.name, addresses.c.email_address)
+ ... .select_from(users.join(addresses))
+ ... .limit(1)
+ ... .offset(1)
+ ... )
>>> conn.execute(stmt).fetchall()
{opensql}SELECT users.name, addresses.email_address
FROM users JOIN addresses ON users.id = addresses.user_id
.. sourcecode:: pycon+sql
- >>> stmt = users.update().\
- ... values(fullname="Fullname: " + users.c.name)
+ >>> stmt = users.update().values(fullname="Fullname: " + users.c.name)
>>> conn.execute(stmt)
{opensql}UPDATE users SET fullname=(? || users.name)
[...] ('Fullname: ',)
.. sourcecode:: pycon+sql
- >>> stmt = users.insert().\
- ... values(name=bindparam('_name') + " .. name")
- >>> conn.execute(stmt, [
- ... {'id':4, '_name':'name1'},
- ... {'id':5, '_name':'name2'},
- ... {'id':6, '_name':'name3'},
- ... ])
+ >>> stmt = users.insert().values(name=bindparam("_name") + " .. name")
+ >>> conn.execute(
+ ... stmt,
+ ... [
+ ... {"id": 4, "_name": "name1"},
+ ... {"id": 5, "_name": "name2"},
+ ... {"id": 6, "_name": "name3"},
+ ... ],
+ ... )
{opensql}INSERT INTO users (id, name) VALUES (?, (? || ?))
[...] ((4, 'name1', ' .. name'), (5, 'name2', ' .. name'), (6, 'name3', ' .. name'))
COMMIT
.. sourcecode:: pycon+sql
- >>> stmt = users.update().\
- ... where(users.c.name == 'jack').\
- ... values(name='ed')
+ >>> stmt = users.update().where(users.c.name == "jack").values(name="ed")
>>> conn.execute(stmt)
{opensql}UPDATE users SET name=? WHERE users.name = ?
.. sourcecode:: pycon+sql
- >>> stmt = users.update().\
- ... where(users.c.name == bindparam('oldname')).\
- ... values(name=bindparam('newname'))
- >>> conn.execute(stmt, [
- ... {'oldname':'jack', 'newname':'ed'},
- ... {'oldname':'wendy', 'newname':'mary'},
- ... {'oldname':'jim', 'newname':'jake'},
- ... ])
+ >>> stmt = (
+ ... users.update()
+ ... .where(users.c.name == bindparam("oldname"))
+ ... .values(name=bindparam("newname"))
+ ... )
+ >>> conn.execute(
+ ... stmt,
+ ... [
+ ... {"oldname": "jack", "newname": "ed"},
+ ... {"oldname": "wendy", "newname": "mary"},
+ ... {"oldname": "jim", "newname": "jake"},
+ ... ],
+ ... )
{opensql}UPDATE users SET name=? WHERE users.name = ?
[...] (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim'))
COMMIT
.. sourcecode:: pycon+sql
- >>> stmt = select(addresses.c.email_address).\
- ... where(addresses.c.user_id == users.c.id).\
- ... limit(1)
+ >>> stmt = (
+ ... select(addresses.c.email_address).where(addresses.c.user_id == users.c.id).limit(1)
+ ... )
>>> conn.execute(users.update().values(fullname=stmt.scalar_subquery()))
{opensql}UPDATE users SET fullname=(SELECT addresses.email_address
FROM addresses
The SQLAlchemy :func:`_expression.update` construct supports both of these modes
implicitly, by specifying multiple tables in the WHERE clause::
- stmt = users.update().\
- values(name='ed wood').\
- where(users.c.id == addresses.c.id).\
- where(addresses.c.email_address.startswith('ed%'))
+ stmt = (
+ users.update()
+ .values(name="ed wood")
+ .where(users.c.id == addresses.c.id)
+ .where(addresses.c.email_address.startswith("ed%"))
+ )
conn.execute(stmt)
The resulting SQL from the above statement would render as::
When using MySQL, columns from each table can be assigned to in the
SET clause directly, using the dictionary form passed to :meth:`_expression.Update.values`::
- stmt = users.update().\
- values({
- users.c.name:'ed wood',
- addresses.c.email_address:'ed.wood@foo.com'
- }).\
- where(users.c.id == addresses.c.id).\
- where(addresses.c.email_address.startswith('ed%'))
+ stmt = (
+ users.update()
+ .values({users.c.name: "ed wood", addresses.c.email_address: "ed.wood@foo.com"})
+ .where(users.c.id == addresses.c.id)
+ .where(addresses.c.email_address.startswith("ed%"))
+ )
The tables are referenced explicitly in the SET clause::
we supply a **series of 2-tuples**
as the argument to the method::
- stmt = some_table.update().\
- ordered_values((some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10))
+ stmt = some_table.update().ordered_values(
+ (some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10)
+ )
The series of 2-tuples is essentially the same structure as a Python
dictionary, except that it explicitly suggests a specific ordering. Using the
COMMIT
{stop}<sqlalchemy.engine.cursor.LegacyCursorResult object at 0x...>
- >>> conn.execute(users.delete().where(users.c.name > 'm'))
+ >>> conn.execute(users.delete().where(users.c.name > "m"))
{opensql}DELETE FROM users WHERE users.name > ?
[...] ('m',)
COMMIT
:func:`_expression.delete` construct supports both of these modes
implicitly, by specifying multiple tables in the WHERE clause::
- stmt = users.delete().\
- where(users.c.id == addresses.c.id).\
- where(addresses.c.email_address.startswith('ed%'))
+ stmt = (
+ users.delete()
+ .where(users.c.id == addresses.c.id)
+ .where(addresses.c.email_address.startswith("ed%"))
+ )
conn.execute(stmt)
On a PostgreSQL backend, the resulting SQL from the above statement would render as::
metadata_obj = MetaData()
user = Table(
- 'user',
+ "user",
metadata_obj,
- Column('user_name', String, primary_key=True),
- Column('email_address', String(60)),
+ Column("user_name", String, primary_key=True),
+ Column("email_address", String(60)),
)
When using a particular :class:`_types.TypeEngine` class in a
valid with SQL server are importable from the top level dialect, whether
they originate from :mod:`sqlalchemy.types` or from the local dialect::
- from sqlalchemy.dialects.mssql import \
- BIGINT, BINARY, BIT, CHAR, DATE, DATETIME, DATETIME2, \
- DATETIMEOFFSET, DECIMAL, FLOAT, IMAGE, INTEGER, JSON, MONEY, \
- NCHAR, NTEXT, NUMERIC, NVARCHAR, REAL, SMALLDATETIME, \
- SMALLINT, SMALLMONEY, SQL_VARIANT, TEXT, TIME, \
- TIMESTAMP, TINYINT, UNIQUEIDENTIFIER, VARBINARY, VARCHAR
+ from sqlalchemy.dialects.mssql import (
+ BIGINT,
+ BINARY,
+ BIT,
+ CHAR,
+ DATE,
+ DATETIME,
+ DATETIME2,
+ DATETIMEOFFSET,
+ DECIMAL,
+ FLOAT,
+ IMAGE,
+ INTEGER,
+ JSON,
+ MONEY,
+ NCHAR,
+ NTEXT,
+ NUMERIC,
+ NVARCHAR,
+ REAL,
+ SMALLDATETIME,
+ SMALLINT,
+ SMALLMONEY,
+ SQL_VARIANT,
+ TEXT,
+ TIME,
+ TIMESTAMP,
+ TINYINT,
+ UNIQUEIDENTIFIER,
+ VARBINARY,
+ VARCHAR,
+ )
Types which are specific to SQL Server, or have SQL Server-specific
construction arguments, are as follows:
As with all SQLAlchemy dialects, all UPPERCASE types that are known to be
valid with MySQL are importable from the top level dialect::
- from sqlalchemy.dialects.mysql import \
- BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \
- DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \
- LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \
- NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \
- TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR
+ from sqlalchemy.dialects.mysql import (
+ BIGINT,
+ BINARY,
+ BIT,
+ BLOB,
+ BOOLEAN,
+ CHAR,
+ DATE,
+ DATETIME,
+ DECIMAL,
+ DECIMAL,
+ DOUBLE,
+ ENUM,
+ FLOAT,
+ INTEGER,
+ LONGBLOB,
+ LONGTEXT,
+ MEDIUMBLOB,
+ MEDIUMINT,
+ MEDIUMTEXT,
+ NCHAR,
+ NUMERIC,
+ NVARCHAR,
+ REAL,
+ SET,
+ SMALLINT,
+ TEXT,
+ TIME,
+ TIMESTAMP,
+ TINYBLOB,
+ TINYINT,
+ TINYTEXT,
+ VARBINARY,
+ VARCHAR,
+ YEAR,
+ )
Types which are specific to MySQL, or have MySQL-specific
construction arguments, are as follows:
valid with Oracle are importable from the top level dialect, whether
they originate from :mod:`sqlalchemy.types` or from the local dialect::
- from sqlalchemy.dialects.oracle import \
- BFILE, BLOB, CHAR, CLOB, DATE, \
- DOUBLE_PRECISION, FLOAT, INTERVAL, LONG, NCLOB, NCHAR, \
- NUMBER, NVARCHAR, NVARCHAR2, RAW, TIMESTAMP, VARCHAR, \
- VARCHAR2
+ from sqlalchemy.dialects.oracle import (
+ BFILE,
+ BLOB,
+ CHAR,
+ CLOB,
+ DATE,
+ DOUBLE_PRECISION,
+ FLOAT,
+ INTERVAL,
+ LONG,
+ NCLOB,
+ NCHAR,
+ NUMBER,
+ NVARCHAR,
+ NVARCHAR2,
+ RAW,
+ TIMESTAMP,
+ VARCHAR,
+ VARCHAR2,
+ )
.. versionadded:: 1.2.19 Added :class:`_types.NCHAR` to the list of datatypes
exported by the Oracle dialect.
valid with PostgreSQL are importable from the top level dialect, whether
they originate from :mod:`sqlalchemy.types` or from the local dialect::
- from sqlalchemy.dialects.postgresql import \
- ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, \
- DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER, \
- INTERVAL, JSON, JSONB, MACADDR, MONEY, NUMERIC, OID, REAL, SMALLINT, TEXT, \
- TIME, TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE, \
- DATERANGE, TSRANGE, TSTZRANGE, TSVECTOR
+ from sqlalchemy.dialects.postgresql import (
+ ARRAY,
+ BIGINT,
+ BIT,
+ BOOLEAN,
+ BYTEA,
+ CHAR,
+ CIDR,
+ DATE,
+ DOUBLE_PRECISION,
+ ENUM,
+ FLOAT,
+ HSTORE,
+ INET,
+ INTEGER,
+ INTERVAL,
+ JSON,
+ JSONB,
+ MACADDR,
+ MONEY,
+ NUMERIC,
+ OID,
+ REAL,
+ SMALLINT,
+ TEXT,
+ TIME,
+ TIMESTAMP,
+ UUID,
+ VARCHAR,
+ INT4RANGE,
+ INT8RANGE,
+ NUMRANGE,
+ DATERANGE,
+ TSRANGE,
+ TSTZRANGE,
+ TSVECTOR,
+ )
Types which are specific to PostgreSQL, or have PostgreSQL-specific
construction arguments, are as follows:
from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE
+
class RoomBooking(Base):
- __tablename__ = 'room_booking'
+ __tablename__ = "room_booking"
room = Column(Integer(), primary_key=True)
during = Column(TSRANGE())
- __table_args__ = (
- ExcludeConstraint(('room', '='), ('during', '&&')),
- )
+ __table_args__ = (ExcludeConstraint(("room", "="), ("during", "&&")),)
PostgreSQL DML Constructs
-------------------------
valid with SQLite are importable from the top level dialect, whether
they originate from :mod:`sqlalchemy.types` or from the local dialect::
- from sqlalchemy.dialects.sqlite import \
- BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \
- INTEGER, NUMERIC, JSON, SMALLINT, TEXT, TIME, TIMESTAMP, \
- VARCHAR
+ from sqlalchemy.dialects.sqlite import (
+ BLOB,
+ BOOLEAN,
+ CHAR,
+ DATE,
+ DATETIME,
+ DECIMAL,
+ FLOAT,
+ INTEGER,
+ NUMERIC,
+ JSON,
+ SMALLINT,
+ TEXT,
+ TIME,
+ TIMESTAMP,
+ VARCHAR,
+ )
.. module:: sqlalchemy.dialects.sqlite
directly, such as when we use ``print()``::
>>> from sqlalchemy import column
- >>> print(column('x') == 5)
+ >>> print(column("x") == 5)
x = :x_1
When the above SQL expression is stringified, the :class:`.StrSQLCompiler`
>>> from sqlalchemy.dialects.postgresql import insert
>>> from sqlalchemy import table, column
- >>> my_table = table('my_table', column('x'), column('y'))
- >>> insert_stmt = insert(my_table).values(x='foo')
- >>> insert_stmt = insert_stmt.on_conflict_do_nothing(
- ... index_elements=['y']
- ... )
+ >>> my_table = table("my_table", column("x"), column("y"))
+ >>> insert_stmt = insert(my_table).values(x="foo")
+ >>> insert_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["y"])
>>> print(insert_stmt)
Traceback (most recent call last):
declarative such as::
class Bar(Base):
- __tablename__ = 'bar'
+ __tablename__ = "bar"
id = Column(Integer, primary_key=True)
cprop = deferred(Column(Integer))
- __table_args__ = (
- CheckConstraint(cprop > 5),
- )
+ __table_args__ = (CheckConstraint(cprop > 5),)
Above, the ``cprop`` attribute is used inline before it has been mapped,
however this ``cprop`` attribute is not a :class:`_schema.Column`,
:attr:`.ColumnProperty.expression` attribute::
class Bar(Base):
- __tablename__ = 'bar'
+ __tablename__ = "bar"
id = Column(Integer, primary_key=True)
cprop = deferred(Column(Integer))
- __table_args__ = (
- CheckConstraint(cprop.expression > 5),
- )
-
-
+ __table_args__ = (CheckConstraint(cprop.expression > 5),)
.. _error_cd3x:
implicitly or explicitly and does not provide a value when the statement
is executed::
- stmt = select(table.c.column).where(table.c.id == bindparam('my_param'))
+ stmt = select(table.c.column).where(table.c.id == bindparam("my_param"))
result = conn.execute(stmt)
Since "b" is required, pass it as ``None`` so that the INSERT may proceed::
e.execute(
- t.insert(), [
+ t.insert(),
+ [
{"a": 1, "b": 2, "c": 3},
{"a": 2, "b": None, "c": 4},
{"a": 3, "b": 4, "c": 5},
- ]
+ ],
)
.. seealso::
Given an example as::
m = MetaData()
- t = Table(
- 't', m,
- Column('a', Integer),
- Column('b', Integer),
- Column('c', Integer)
- )
+ t = Table("t", m, Column("a", Integer), Column("b", Integer), Column("c", Integer))
stmt = select(t)
Above, ``stmt`` represents a SELECT statement. The error is produced when we want
a1 = Address.__table__
- q = s.query(User).\
- join(a1, User.addresses).\
- filter(Address.email_address == 'ed@foo.com').all()
-
+ q = (
+ s.query(User)
+ .join(a1, User.addresses)
+ .filter(Address.email_address == "ed@foo.com")
+ .all()
+ )
The above pattern also allows an arbitrary selectable, such as
a Core :class:`_sql.Join` or :class:`_sql.Alias` object,
a1 = Address.__table__.alias()
- q = s.query(User).\
- join(a1, User.addresses).\
- filter(a1.c.email_address == 'ed@foo.com').all()
+ q = (
+ s.query(User)
+ .join(a1, User.addresses)
+ .filter(a1.c.email_address == "ed@foo.com")
+ .all()
+ )
The correct way to specify a join target is always by using the mapped
class itself or an :class:`_orm.aliased` object, in the latter case using the
:meth:`_orm.PropComparator.of_type` modifier to set up an alias::
# normal join to relationship entity
- q = s.query(User).\
- join(User.addresses).\
- filter(Address.email_address == 'ed@foo.com')
+ q = s.query(User).join(User.addresses).filter(Address.email_address == "ed@foo.com")
# name Address target explicitly, not necessary but legal
- q = s.query(User).\
- join(Address, User.addresses).\
- filter(Address.email_address == 'ed@foo.com')
+ q = (
+ s.query(User)
+ .join(Address, User.addresses)
+ .filter(Address.email_address == "ed@foo.com")
+ )
Join to an alias::
a1 = aliased(Address)
# of_type() form; recommended
- q = s.query(User).\
- join(User.addresses.of_type(a1)).\
- filter(a1.email_address == 'ed@foo.com')
+ q = (
+ s.query(User)
+ .join(User.addresses.of_type(a1))
+ .filter(a1.email_address == "ed@foo.com")
+ )
# target, onclause form
- q = s.query(User).\
- join(a1, User.addresses).\
- filter(a1.email_address == 'ed@foo.com')
-
+ q = s.query(User).join(a1, User.addresses).filter(a1.email_address == "ed@foo.com")
.. _error_xaj2:
of the join. For example given a joined inheritance mapping as::
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = Column(Integer, primary_key=True)
manager_id = Column(ForeignKey("manager.id"))
name = Column(String(50))
reports_to = relationship("Manager", foreign_keys=manager_id)
__mapper_args__ = {
- 'polymorphic_identity':'employee',
- 'polymorphic_on':type,
+ "polymorphic_identity": "employee",
+ "polymorphic_on": type,
}
+
class Manager(Employee):
- __tablename__ = 'manager'
- id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "manager"
+ id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
__mapper_args__ = {
- 'polymorphic_identity':'manager',
- 'inherit_condition': id == Employee.id
+ "polymorphic_identity": "manager",
+ "inherit_condition": id == Employee.id,
}
The above mapping includes a relationship between the ``Employee`` and
If we then wanted to use :func:`_orm.contains_eager` to populate the
``reports_to`` attribute, we refer to the alias::
- >>> stmt =select(Employee).join(
- ... Employee.reports_to.of_type(manager_alias)
- ... ).options(
- ... contains_eager(Employee.reports_to.of_type(manager_alias))
+ >>> stmt = (
+ ... select(Employee)
+ ... .join(Employee.reports_to.of_type(manager_alias))
+ ... .options(contains_eager(Employee.reports_to.of_type(manager_alias)))
... )
Without using the explicit :func:`_orm.aliased` object, in some more nested
# configuration step occurs
a = relationship("A", back_populates="bs", cascade="all, delete-orphan")
+
configure_mappers()
Above, the "delete-orphan" setting on ``B.a`` indicates the intent that
"Child",
primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 0)",
backref="parent",
- overlaps="c2, parent"
+ overlaps="c2, parent",
)
c2 = relationship(
"Child",
primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 1)",
- overlaps="c1, parent"
+ overlaps="c1, parent",
)
flag = Column(Integer)
-
Above, the ORM will know that the overlap between ``Parent.c1``,
``Parent.c2`` and ``Child.parent`` is intentional.
# result internally pre-fetches all objects
result = sess.execute(
- select(User).where(User.id == 7),
- execution_options={"prebuffer_rows": True}
+ select(User).where(User.id == 7), execution_options={"prebuffer_rows": True}
)
# context manager is closed, so session_obj above is closed, identity
that is not associated with any :class:`_engine.Engine`::
metadata_obj = MetaData()
- table = Table('t', metadata_obj, Column('q', Integer))
+ table = Table("t", metadata_obj, Column("q", Integer))
stmt = select(table)
- result = stmt.execute() # <--- raises
+ result = stmt.execute() # <--- raises
What the logic is expecting is that the :class:`_schema.MetaData` object has
been **bound** to a :class:`_engine.Engine`::
the :meth:`_engine.Connection.execute` method of a :class:`_engine.Connection`::
with engine.connect() as conn:
- result = conn.execute(stmt)
+ result = conn.execute(stmt)
When using the ORM, a similar facility is available via the :class:`.Session`::
The :func:`_sa.create_engine` call accepts additional arguments either
directly via the ``connect_args`` keyword argument::
- e = create_engine("mysql://scott:tiger@localhost/test",
- connect_args={"encoding": "utf8"})
+ e = create_engine(
+ "mysql://scott:tiger@localhost/test", connect_args={"encoding": "utf8"}
+ )
Or for basic string and integer arguments, they can usually be specified
in the query string of the URL::
fn(cursor_obj, statement, context=context, *arg)
except engine.dialect.dbapi.Error as raw_dbapi_err:
connection = context.root_connection
- if engine.dialect.is_disconnect(
- raw_dbapi_err, connection, cursor_obj
- ):
+ if engine.dialect.is_disconnect(raw_dbapi_err, connection, cursor_obj):
if retry > num_retries:
raise
engine.logger.error(
time.sleep(5)
e = reconnecting_engine(
- create_engine(
- "mysql://scott:tiger@localhost/test", echo_pool=True
- ),
+ create_engine("mysql://scott:tiger@localhost/test", echo_pool=True),
num_retries=5,
retry_interval=2,
)
from sqlalchemy import create_engine
from sqlalchemy.pool import QueuePool
- engine = create_engine('mysql://scott:tiger@localhost/myisam_database', pool=QueuePool(reset_on_return=False))
+ engine = create_engine(
+ "mysql://scott:tiger@localhost/myisam_database",
+ pool=QueuePool(reset_on_return=False),
+ )
I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
to ``True``, ``False``, and ``None``. Setting to ``commit`` will cause
a COMMIT as any connection is returned to the pool::
- engine = create_engine('mssql://scott:tiger@mydsn', pool=QueuePool(reset_on_return='commit'))
-
+ engine = create_engine(
+ "mssql://scott:tiger@mydsn", pool=QueuePool(reset_on_return="commit")
+ )
I am using multiple connections with a SQLite database (typically to test transaction operation), and my test program is not working!
----------------------------------------------------------------------------------------------------------------------------------------------------------
from sqlalchemy import create_mock_engine
+
def dump(sql, *multiparams, **params):
print(sql.compile(dialect=engine.dialect))
- engine = create_mock_engine('postgresql://', dump)
+
+
+ engine = create_mock_engine("postgresql://", dump)
metadata_obj.create_all(engine, checkfirst=False)
The `Alembic <https://alembic.sqlalchemy.org>`_ tool also supports
class SomeClass(Base):
__table__ = some_table_with_no_pk
__mapper_args__ = {
- 'primary_key':[some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar]
+ "primary_key": [some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar]
}
Better yet is when using fully declared table metadata, use the ``primary_key=True``
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(A):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(Integer, ForeignKey('a.id'))
+ a_id = Column(Integer, ForeignKey("a.id"))
As of SQLAlchemy version 0.9.5, the above condition is detected, and will
warn that the ``id`` column of ``A`` and ``B`` is being combined under
A mapping which resolves this is as follows::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(A):
- __tablename__ = 'b'
+ __tablename__ = "b"
- b_id = Column('id', Integer, primary_key=True)
- a_id = Column(Integer, ForeignKey('a.id'))
+ b_id = Column("id", Integer, primary_key=True)
+ a_id = Column(Integer, ForeignKey("a.id"))
Suppose we did want ``A.id`` and ``B.id`` to be mirrors of each other, despite
the fact that ``B.a_id`` is where ``A.id`` is related. We could combine
them together using :func:`.column_property`::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(A):
- __tablename__ = 'b'
+ __tablename__ = "b"
# probably not what you want, but this is a demonstration
id = column_property(Column(Integer, primary_key=True), A.id)
- a_id = Column(Integer, ForeignKey('a.id'))
-
-
+ a_id = Column(Integer, ForeignKey("a.id"))
I'm using Declarative and setting primaryjoin/secondaryjoin using an ``and_()`` or ``or_()``, and I am getting an error message about foreign keys.
------------------------------------------------------------------------------------------------------------------------------------------------------------------
class MyClass(Base):
# ....
- foo = relationship("Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar"))
+ foo = relationship(
+ "Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar")
+ )
That's an ``and_()`` of two string expressions, which SQLAlchemy cannot apply any mapping towards. Declarative allows :func:`_orm.relationship` arguments to be specified as strings, which are converted into expression objects using ``eval()``. But this doesn't occur inside of an ``and_()`` expression - it's a special operation declarative applies only to the *entirety* of what's passed to primaryjoin or other arguments as a string::
class MyClass(Base):
# ....
- foo = relationship("Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)")
+ foo = relationship(
+ "Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)"
+ )
Or if the objects you need are already available, skip the strings::
class MyClass(Base):
# ....
- foo = relationship(Dest, primaryjoin=and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar))
+ foo = relationship(
+ Dest, primaryjoin=and_(MyClass.id == Dest.foo_id, MyClass.foo == Dest.bar)
+ )
The same idea applies to all the other arguments, such as ``foreign_keys``::
logger = logging.getLogger("myapp.sqltime")
logger.setLevel(logging.DEBUG)
+
@event.listens_for(Engine, "before_cursor_execute")
- def before_cursor_execute(conn, cursor, statement,
- parameters, context, executemany):
- conn.info.setdefault('query_start_time', []).append(time.time())
+ def before_cursor_execute(conn, cursor, statement, parameters, context, executemany):
+ conn.info.setdefault("query_start_time", []).append(time.time())
logger.debug("Start Query: %s", statement)
+
@event.listens_for(Engine, "after_cursor_execute")
- def after_cursor_execute(conn, cursor, statement,
- parameters, context, executemany):
- total = time.time() - conn.info['query_start_time'].pop(-1)
+ def after_cursor_execute(conn, cursor, statement, parameters, context, executemany):
+ total = time.time() - conn.info["query_start_time"].pop(-1)
logger.debug("Query Complete!")
logger.debug("Total Time: %f", total)
import pstats
import contextlib
+
@contextlib.contextmanager
def profiled():
pr = cProfile.Profile()
yield
pr.disable()
s = io.StringIO()
- ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
+ ps = pstats.Stats(pr, stream=s).sort_stats("cumulative")
ps.print_stats()
# uncomment this to see who's calling what
# ps.print_callers()
To profile a section of code::
with profiled():
- Session.query(FooClass).filter(FooClass.somevalue==8).all()
+ Session.query(FooClass).filter(FooClass.somevalue == 8).all()
The output of profiling can be used to give an idea where time is
being spent. A section of profiling output looks like this::
from sqlalchemy import TypeDecorator
import time
+
class Foo(TypeDecorator):
impl = String
def process_result_value(self, value, thing):
# intentionally add slowness for illustration purposes
- time.sleep(.001)
+ time.sleep(0.001)
return value
the profiling output of this intentionally slow operation can be seen like this::
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
- Base = declarative_base(create_engine('sqlite://'))
+ Base = declarative_base(create_engine("sqlite://"))
+
class Foo(Base):
- __tablename__ = 'foo'
+ __tablename__ = "foo"
id = Column(Integer, primary_key=True)
+
Base.metadata.create_all()
session = sessionmaker()()
# continue using session without rolling back
session.commit()
-
The usage of the :class:`.Session` should fit within a structure similar to this::
try:
Given a block such as::
- sess = Session() # begins a logical transaction
+ sess = Session() # begins a logical transaction
try:
sess.flush()
for example use the ``User`` mapping described at :ref:`ormtutorial_toplevel`,
and we had a SQL query like the following::
- q = session.query(User).outerjoin(User.addresses).filter(User.name == 'jack')
+ q = session.query(User).outerjoin(User.addresses).filter(User.name == "jack")
Above, the sample data used in the tutorial has two rows in the ``addresses``
table for the ``users`` row with the name ``'jack'``, primary key value 5.
are **deduplicated**. This does not occur if we instead request individual
columns back::
- >>> session.query(User.id, User.name).outerjoin(User.addresses).filter(User.name == 'jack').all()
+ >>> session.query(User.id, User.name).outerjoin(User.addresses).filter(
+ ... User.name == "jack"
+ ... ).all()
[(5, 'jack'), (5, 'jack')]
There are two main reasons the :class:`_query.Query` will deduplicate:
print("ITER!")
return iter([1, 2, 3, 4, 5])
+
list(Iterates())
output::
o = Session.query(SomeClass).first()
o.foo_id = 7
- Session.expire(o, ['foo']) # object must be persistent for this
+ Session.expire(o, ["foo"]) # object must be persistent for this
foo_7 = Session.query(Foo).get(7)
Session.flush() # emits INSERT
# expire this because we already set .foo to None
- Session.expire(o, ['foo'])
+ Session.expire(o, ["foo"])
assert new_obj.foo is foo_7 # now it loads
-
.. topic:: Attribute loading for non-persistent objects
One variant on the "pending" behavior above is if we use the flag
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B", backref="a")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
- c_id = Column(ForeignKey('c.id'))
+ a_id = Column(ForeignKey("a.id"))
+ c_id = Column(ForeignKey("c.id"))
c = relationship("C", backref="bs")
class C(Base):
- __tablename__ = 'c'
+ __tablename__ = "c"
id = Column(Integer, primary_key=True)
if we don't use it explicitly)::
>>> from sqlalchemy import table, column, select
- >>> t = table('my_table', column('x'))
+ >>> t = table("my_table", column("x"))
>>> statement = select(t)
>>> print(str(statement))
SELECT my_table.x
as::
>>> from sqlalchemy import column
- >>> print(column('x') == 'some value')
+ >>> print(column("x") == "some value")
x = :x_1
Stringifying for Specific Databases
use a PostgreSQL dialect::
from sqlalchemy.dialects import postgresql
+
print(statement.compile(dialect=postgresql.dialect()))
Note that any dialect can be assembled using :func:`_sa.create_engine` itself
from sqlalchemy.sql import table, column, select
- t = table('t', column('x'))
+ t = table("t", column("x"))
s = select(t).where(t.c.x == 5)
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(UUID)
+
stmt = select(A).where(A.data == uuid.uuid4())
Given the above model and statement which will compare a column to a single
their positional order for the statement as compiled::
import re
+
e = create_engine("sqlite+pysqlite://")
# will use qmark style, i.e. ? for param
# params in positional order
params = (repr(compiled.params[name]) for name in compiled.positiontup)
- print(re.sub(r'\?', lambda m: next(params), str(compiled)))
+ print(re.sub(r"\?", lambda m: next(params), str(compiled)))
The above snippet prints::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import BindParameter
+
@compiles(BindParameter)
def _render_literal_bindparam(element, compiler, use_my_literal_recipe=False, **kw):
if not use_my_literal_recipe:
# render the value directly
return repr(element.value)
+
e = create_engine("postgresql+psycopg2://")
print(stmt.compile(e, compile_kwargs={"use_my_literal_recipe": True}))
from sqlalchemy import TypeDecorator
+
class UUIDStringify(TypeDecorator):
impl = UUID
or locally within the statement using :func:`_sql.type_coerce`, such as ::
from sqlalchemy import type_coerce
+
stmt = select(A).where(type_coerce(A.data, UUIDStringify) == uuid.uuid4())
print(stmt.compile(e, compile_kwargs={"literal_binds": True}))
>>> e = create_engine("sqlite+pysqlite://")
>>> compiled = stmt.compile(e, compile_kwargs={"render_postcompile": True})
>>> params = (repr(compiled.params[name]) for name in compiled.positiontup)
- >>> print(re.sub(r'\?', lambda m: next(params), str(compiled)))
+ >>> print(re.sub(r"\?", lambda m: next(params), str(compiled)))
SELECT a.id, a.data
FROM a
WHERE a.data IN (UUID('aa1944d6-9a5a-45d5-b8da-0ba1ef0a4f38'), UUID('a81920e6-15e2-4392-8a3c-d775ffa9ccd2'), UUID('b5574cdb-ff9b-49a3-be52-dbc89f087bfa'))
The :meth:`.Operators.op` method allows one to create a custom database operator
otherwise not known by SQLAlchemy::
- >>> print(column('q').op('->')(column('p')))
+ >>> print(column("q").op("->")(column("p")))
q -> p
However, when using it on the right side of a compound expression, it doesn't
generate parenthesis as we expect::
- >>> print((column('q1') + column('q2')).op('->')(column('p')))
+ >>> print((column("q1") + column("q2")).op("->")(column("p")))
q1 + q2 -> p
Where above, we probably want ``(q1 + q2) -> p``.
number, where 100 is the maximum value, and the highest number used by any
SQLAlchemy operator is currently 15::
- >>> print((column('q1') + column('q2')).op('->', precedence=100)(column('p')))
+ >>> print((column("q1") + column("q2")).op("->", precedence=100)(column("p")))
(q1 + q2) -> p
We can also usually force parenthesization around a binary expression (e.g.
an expression that has left/right operands and an operator) using the
:meth:`_expression.ColumnElement.self_group` method::
- >>> print((column('q1') + column('q2')).self_group().op('->')(column('p')))
+ >>> print((column("q1") + column("q2")).self_group().op("->")(column("p")))
(q1 + q2) -> p
Why are the parentheses rules like this?
operator is known to be associative, so that parenthesis are generated
minimally. Otherwise, an expression like::
- column('a') & column('b') & column('c') & column('d')
+ column("a") & column("b") & column("c") & column("d")
would produce::
other cases, it leads to things that are more likely to confuse databases or at
the very least readability, such as::
- column('q', ARRAY(Integer, dimensions=2))[5][6]
+ column("q", ARRAY(Integer, dimensions=2))[5][6]
would produce::
e.g. the highest? Then this expression makes more parenthesis, but is
otherwise OK, that is, these two are equivalent::
- >>> print((column('q') - column('y')).op('+', precedence=100)(column('z')))
+ >>> print((column("q") - column("y")).op("+", precedence=100)(column("z")))
(q - y) + z
- >>> print((column('q') - column('y')).op('+')(column('z')))
+ >>> print((column("q") - column("y")).op("+")(column("z")))
q - y + z
but these two are not::
- >>> print(column('q') - column('y').op('+', precedence=100)(column('z')))
+ >>> print(column("q") - column("y").op("+", precedence=100)(column("z")))
q - y + z
- >>> print(column('q') - column('y').op('+')(column('z')))
+ >>> print(column("q") - column("y").op("+")(column("z")))
q - (y + z)
For now, it's not clear that as long as we are doing parenthesization based on
import numpy
+
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(Integer)
+
# .. later
session.add(A(data=numpy.int64(10)))
session.commit()
-
In the latter case, the issue is due to the ``numpy.int64`` datatype overriding
the ``__eq__()`` method and enforcing that the return type of an expression is
``numpy.True`` or ``numpy.False``, which breaks SQLAlchemy's expression
>>> import numpy
>>> from sqlalchemy import column, Integer
- >>> print(column('x', Integer) == numpy.int64(10)) # works
+ >>> print(column("x", Integer) == numpy.int64(10)) # works
x = :x_1
- >>> print(numpy.int64(10) == column('x', Integer)) # breaks
+ >>> print(numpy.int64(10) == column("x", Integer)) # breaks
False
These errors are both solved in the same way, which is that special numpy
session.add(A(data=int(data)))
- result = session.execute(
- select(A.data).where(int(data) == A.data)
- )
+ result = session.execute(select(A.data).where(int(data) == A.data))
session.commit()
SQL expression for WHERE/HAVING role expected, got True
-------------------------------------------------------
-See :ref:`numpy_int64`.
\ No newline at end of file
+See :ref:`numpy_int64`.
# Session returns a Result that has ORM entities
list_of_users = result.scalars().all()
-
reflection
reflected
In SQLAlchemy, this term refers to the feature of querying a database's
dictionary is associated with a copy of the object, which contains key/value
pairs significant to various internal systems, mostly within the ORM::
- some_column = Column('some_column', Integer)
+ some_column = Column("some_column", Integer)
some_column_annotated = some_column._annotate({"entity": User})
The annotation system differs from the public dictionary :attr:`_schema.Column.info`
on mapped classes. When a class is mapped as such::
class MyClass(Base):
- __tablename__ = 'foo'
+ __tablename__ = "foo"
id = Column(Integer, primary_key=True)
data = Column(String)
single department. A SQLAlchemy mapping might look like::
class Department(Base):
- __tablename__ = 'department'
+ __tablename__ = "department"
id = Column(Integer, primary_key=True)
name = Column(String(30))
employees = relationship("Employee")
+
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String(30))
- dep_id = Column(Integer, ForeignKey('department.id'))
+ dep_id = Column(Integer, ForeignKey("department.id"))
.. seealso::
single department. A SQLAlchemy mapping might look like::
class Department(Base):
- __tablename__ = 'department'
+ __tablename__ = "department"
id = Column(Integer, primary_key=True)
name = Column(String(30))
+
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String(30))
- dep_id = Column(Integer, ForeignKey('department.id'))
+ dep_id = Column(Integer, ForeignKey("department.id"))
department = relationship("Department")
.. seealso::
used in :term:`one to many` as follows::
class Department(Base):
- __tablename__ = 'department'
+ __tablename__ = "department"
id = Column(Integer, primary_key=True)
name = Column(String(30))
employees = relationship("Employee", backref="department")
+
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String(30))
- dep_id = Column(Integer, ForeignKey('department.id'))
+ dep_id = Column(Integer, ForeignKey("department.id"))
A backref can be applied to any relationship, including one to many,
many to one, and :term:`many to many`.
specified using plain table metadata::
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String(30))
projects = relationship(
"Project",
- secondary=Table('employee_project', Base.metadata,
- Column("employee_id", Integer, ForeignKey('employee.id'),
- primary_key=True),
- Column("project_id", Integer, ForeignKey('project.id'),
- primary_key=True)
- ),
- backref="employees"
- )
+ secondary=Table(
+ "employee_project",
+ Base.metadata,
+ Column("employee_id", Integer, ForeignKey("employee.id"), primary_key=True),
+ Column("project_id", Integer, ForeignKey("project.id"), primary_key=True),
+ ),
+ backref="employees",
+ )
+
class Project(Base):
- __tablename__ = 'project'
+ __tablename__ = "project"
id = Column(Integer, primary_key=True)
name = Column(String(30))
A SQLAlchemy declarative mapping for the above might look like::
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String(30))
class Project(Base):
- __tablename__ = 'project'
+ __tablename__ = "project"
id = Column(Integer, primary_key=True)
name = Column(String(30))
class EmployeeProject(Base):
- __tablename__ = 'employee_project'
+ __tablename__ = "employee_project"
- employee_id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
- project_id = Column(Integer, ForeignKey('project.id'), primary_key=True)
+ employee_id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
+ project_id = Column(Integer, ForeignKey("project.id"), primary_key=True)
role_name = Column(String(30))
project = relationship("Project", backref="project_employees")
employee = relationship("Employee", backref="employee_projects")
-
Employees can be added to a project given a role name::
proj = Project(name="Client A")
emp1 = Employee(name="emp1")
emp2 = Employee(name="emp2")
- proj.project_employees.extend([
- EmployeeProject(employee=emp1, role_name="tech lead"),
- EmployeeProject(employee=emp2, role_name="account executive")
- ])
+ proj.project_employees.extend(
+ [
+ EmployeeProject(employee=emp1, role_name="tech lead"),
+ EmployeeProject(employee=emp2, role_name="account executive"),
+ ]
+ )
.. seealso::
.. sourcecode:: python+sql
>>> import sqlalchemy
- >>> sqlalchemy.__version__ # doctest: +SKIP
+ >>> sqlalchemy.__version__ # doctest: +SKIP
1.4.0
Next Steps
"Parent", secondary=association_table, back_populates="children"
)
-
When using the :paramref:`_orm.relationship.backref` parameter instead of
:paramref:`_orm.relationship.back_populates`, the backref will automatically
use the same :paramref:`_orm.relationship.secondary` argument for the
class Parent(Base):
__tablename__ = "left"
id = Column(Integer, primary_key=True)
- children = relationship(
- "Child", secondary=association_table, backref="parents"
- )
+ children = relationship("Child", secondary=association_table, backref="parents")
class Child(Base):
>>> user1 = sess1.query(User).filter_by(id=1).first()
>>> address1 = user1.addresses[0]
- >>> sess1.close() # user1, address1 no longer associated with sess1
+ >>> sess1.close() # user1, address1 no longer associated with sess1
>>> user1.addresses.remove(address1) # address1 no longer associated with user1
>>> sess2 = Session()
- >>> sess2.add(user1) # ... but it still gets added to the new session,
+ >>> sess2.add(user1) # ... but it still gets added to the new session,
>>> address1 in sess2 # because it's still "pending" for flush
True
probably a confusing statement more easily described through demonstration; it
means that, given a mapping such as this::
- mapper_registry.map_imperatively(Order, order_table, properties={
- 'items' : relationship(Item, backref='order')
- })
+ mapper_registry.map_imperatively(
+ Order, order_table, properties={"items": relationship(Item, backref="order")}
+ )
If an ``Order`` is already in the session, and is assigned to the ``order``
attribute of an ``Item``, the backref appends the ``Item`` to the ``items``
This behavior can be disabled using the :paramref:`_orm.relationship.cascade_backrefs` flag::
- mapper_registry.map_imperatively(Order, order_table, properties={
- 'items' : relationship(Item, backref='order', cascade_backrefs=False)
- })
+ mapper_registry.map_imperatively(
+ Order,
+ order_table,
+ properties={"items": relationship(Item, backref="order", cascade_backrefs=False)},
+ )
So above, the assignment of ``i1.order = o1`` will append ``i1`` to the ``items``
collection of ``o1``, but will not add ``i1`` to the session. You can, of
:func:`_orm.backref` function instead of a string. For example, the above relationship
could be declared::
- mapper_registry.map_imperatively(Order, order_table, properties={
- 'items' : relationship(
- Item, backref=backref('order', cascade_backrefs=False), cascade_backrefs=False
- )
- })
+ mapper_registry.map_imperatively(
+ Order,
+ order_table,
+ properties={
+ "items": relationship(
+ Item,
+ backref=backref("order", cascade_backrefs=False),
+ cascade_backrefs=False,
+ )
+ },
+ )
This sets the ``cascade_backrefs=False`` behavior on both relationships.
addresses = relationship("Address", cascade="all, delete-orphan")
+
# ...
del user.addresses[1]
class ListLike(object):
def __init__(self):
self.data = []
+
def append(self, item):
self.data.append(item)
+
def remove(self, item):
self.data.remove(item)
+
def extend(self, items):
self.data.extend(items)
+
def __iter__(self):
return iter(self.data)
+
def foo(self):
- return 'foo'
+ return "foo"
``append``, ``remove``, and ``extend`` are known list-like methods, and will
be instrumented automatically. ``__iter__`` is not a mutator method and won't
def __init__(self):
self.data = set()
+
def append(self, item):
self.data.add(item)
+
def remove(self, item):
self.data.remove(item)
+
def __iter__(self):
return iter(self.data)
from sqlalchemy.orm.collections import collection
+
class SetLike(object):
__emulates__ = set
from sqlalchemy.util import OrderedDict
from sqlalchemy.orm.collections import MappedCollection
+
class NodeMap(OrderedDict, MappedCollection):
"""Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained."""
class MyAwesomeList(some.great.library.AwesomeList):
pass
+
# ... relationship(..., collection_class=MyAwesomeList)
The ORM uses this approach for built-ins, quietly substituting a trivial
return f"Point(x={self.x!r}, y={self.y!r})"
def __eq__(self, other):
- return (
- isinstance(other, Point)
- and other.x == self.x
- and other.y == self.y
- )
+ return isinstance(other, Point) and other.x == self.x and other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
return f"Point(x={self.x!r}, y={self.y!r})"
def __eq__(self, other):
- return (
- isinstance(other, Point)
- and other.x == self.x
- and other.y == self.y
- )
+ return isinstance(other, Point) and other.x == self.x and other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
return Vertex(Point(x1, y1), Point(x2, y2))
def __composite_values__(self):
- return (
- self.start.__composite_values__()
- + self.end.__composite_values__()
- )
+ return self.start.__composite_values__() + self.end.__composite_values__()
class HasVertex(Base):
s.add(hv)
s.commit()
- hv = s.query(HasVertex).filter(
- HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4))).first()
+ hv = (
+ s.query(HasVertex)
+ .filter(HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4)))
+ .first()
+ )
print(hv.vertex.start)
print(hv.vertex.end)
Session = scoped_session(sessionmaker(bind=some_engine), scopefunc=get_current_request)
+
@on_request_end
def remove_session(req):
Session.remove()
__tablename__ = "user"
__sa_dataclass_metadata_key__ = "sa"
- id: int = field(
- init=False, metadata={"sa": Column(Integer, primary_key=True)}
- )
+ id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)})
name: str = field(default=None, metadata={"sa": Column(String(50))})
fullname: str = field(default=None, metadata={"sa": Column(String(50))})
nickname: str = field(default=None, metadata={"sa": Column(String(12))})
class Address:
__tablename__ = "address"
__sa_dataclass_metadata_key__ = "sa"
- id: int = field(
- init=False, metadata={"sa": Column(Integer, primary_key=True)}
- )
- user_id: int = field(
- init=False, metadata={"sa": Column(ForeignKey("user.id"))}
- )
- email_address: str = field(
- default=None, metadata={"sa": Column(String(50))}
- )
+ id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)})
+ user_id: int = field(init=False, metadata={"sa": Column(ForeignKey("user.id"))})
+ email_address: str = field(default=None, metadata={"sa": Column(String(50))})
.. _orm_imperative_dataclasses:
mapper_registry = registry()
+
@dataclass
class User:
id: int = field(init=False)
nickname: str = None
addresses: List[Address] = field(default_factory=list)
+
@dataclass
class Address:
id: int = field(init=False)
user_id: int = field(init=False)
email_address: str = None
+
metadata_obj = MetaData()
user = Table(
- 'user',
+ "user",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('fullname', String(50)),
- Column('nickname', String(12)),
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50)),
+ Column("fullname", String(50)),
+ Column("nickname", String(12)),
)
address = Table(
- 'address',
+ "address",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey('user.id')),
- Column('email_address', String(50)),
+ Column("id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("user.id")),
+ Column("email_address", String(50)),
)
- mapper_registry.map_imperatively(User, user, properties={
- 'addresses': relationship(Address, backref='user', order_by=address.c.id),
- })
+ mapper_registry.map_imperatively(
+ User,
+ user,
+ properties={
+ "addresses": relationship(Address, backref="user", order_by=address.c.id),
+ },
+ )
mapper_registry.map_imperatively(Address, address)
__sa_dataclass_metadata_key__ = "sa"
- id: int = field(
- init=False, metadata={"sa": Column(Integer, primary_key=True)}
- )
+ id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)})
addresses: List[Address] = field(
default_factory=list, metadata={"sa": lambda: relationship("Address")}
class AddressMixin:
__tablename__ = "address"
__sa_dataclass_metadata_key__ = "sa"
- id: int = field(
- init=False, metadata={"sa": Column(Integer, primary_key=True)}
- )
+ id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)})
user_id: int = field(
init=False, metadata={"sa": lambda: Column(ForeignKey("user.id"))}
)
- email_address: str = field(
- default=None, metadata={"sa": Column(String(50))}
- )
+ email_address: str = field(default=None, metadata={"sa": Column(String(50))})
@mapper_registry.mapped
}
}
+
@mapper_registry.mapped
@define(slots=False)
class Address:
user_id: int
email_address: Optional[str]
-
.. note:: The ``attrs`` ``slots=True`` option, which enables ``__slots__`` on
a mapped class, cannot be used with SQLAlchemy mappings without fully
implementing alternative
mapper_registry = registry()
+
@define(slots=False)
class User:
id: int
nickname: str
addresses: List[Address]
+
@define(slots=False)
class Address:
id: int
user_id: int
email_address: Optional[str]
+
metadata_obj = MetaData()
user = Table(
- 'user',
+ "user",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('fullname', String(50)),
- Column('nickname', String(12)),
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50)),
+ Column("fullname", String(50)),
+ Column("nickname", String(12)),
)
address = Table(
- 'address',
+ "address",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey('user.id')),
- Column('email_address', String(50)),
+ Column("id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("user.id")),
+ Column("email_address", String(50)),
)
- mapper_registry.map_imperatively(User, user, properties={
- 'addresses': relationship(Address, backref='user', order_by=address.c.id),
- })
+ mapper_registry.map_imperatively(
+ User,
+ user,
+ properties={
+ "addresses": relationship(Address, backref="user", order_by=address.c.id),
+ },
+ )
mapper_registry.map_imperatively(Address, address)
Column("lastname", String(50)),
)
- fullname = column_property(
- __table__.c.firstname + " " + __table__.c.lastname
- )
+ fullname = column_property(__table__.c.firstname + " " + __table__.c.lastname)
addresses = relationship("Address", back_populates="user")
key for the class, independently of schema-level primary key constraints::
class GroupUsers(Base):
- __tablename__ = 'group_users'
+ __tablename__ = "group_users"
user_id = Column(String(40))
group_id = Column(String(40))
- __mapper_args__ = {
- "primary_key": [user_id, group_id]
- }
+ __mapper_args__ = {"primary_key": [user_id, group_id]}
.. seealso::
polymorphic_identity="employee",
)
-
.. seealso::
:ref:`single_inheritance` - background on the ORM single table inheritance
def __mapper_args__(cls):
return {
"exclude_properties": [
- column.key for column in cls.__table__.c if
- column.info.get("exclude", False)
+ column.key
+ for column in cls.__table__.c
+ if column.info.get("exclude", False)
]
}
+
Base = declarative_base()
+
class SomeClass(ExcludeColsWFlag, Base):
- __tablename__ = 'some_table'
+ __tablename__ = "some_table"
id = Column(Integer, primary_key=True)
data = Column(String)
not_needed = Column(String, info={"exclude": True})
-
Above, the ``ExcludeColsWFlag`` mixin provides a per-class ``__mapper_args__``
hook that will scan for :class:`.Column` objects that include the key/value
``'exclude': True`` passed to the :paramref:`.Column.info` parameter, and then
class MyClass(Base):
@classmethod
def __declare_last__(cls):
- ""
+ """"""
# do something with mappings
``__declare_first__()``
class MyClass(Base):
@classmethod
def __declare_first__(cls):
- ""
+ """"""
# do something before mappings are configured
.. versionadded:: 0.9.3
class ReferenceAddressMixin:
@declared_attr
def address_id(cls):
- return Column(Integer, ForeignKey('address.id'))
+ return Column(Integer, ForeignKey("address.id"))
+
class User(ReferenceAddressMixin, Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
Where above, the ``address_id`` class-level callable is executed at the
def type_(cls):
return Column(String(50))
- __mapper_args__= {'polymorphic_on':type_}
+ __mapper_args__ = {"polymorphic_on": type_}
+
class MyModel(MyMixin, Base):
- __tablename__='test'
- id = Column(Integer, primary_key=True)
+ __tablename__ = "test"
+ id = Column(Integer, primary_key=True)
.. _orm_declarative_mixins_relationships:
class RefTargetMixin:
@declared_attr
def target_id(cls):
- return Column('target_id', ForeignKey('target.id'))
+ return Column("target_id", ForeignKey("target.id"))
@declared_attr
def target(cls):
__tablename__ = "target"
id = Column(Integer, primary_key=True)
-
Using Advanced Relationship Arguments (e.g. ``primaryjoin``, etc.)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
class RefTargetMixin:
@declared_attr
def target_id(cls):
- return Column('target_id', ForeignKey('target.id'))
+ return Column("target_id", ForeignKey("target.id"))
@declared_attr
def target(cls):
- return relationship(Target,
- primaryjoin=lambda: Target.id==cls.target_id
- )
+ return relationship(Target, primaryjoin=lambda: Target.id == cls.target_id)
or alternatively, the string form (which ultimately generates a lambda)::
@declared_attr
def target(cls):
- return relationship(
- Target, primaryjoin=f"Target.id=={cls.__name__}.target_id"
- )
+ return relationship(Target, primaryjoin=f"Target.id=={cls.__name__}.target_id")
.. seealso::
@declared_attr
def __table_args__(cls):
- return (
- Index(f"test_idx_{cls.__tablename__}", "a", "b"),
- )
+ return (Index(f"test_idx_{cls.__tablename__}", "a", "b"),)
class MyModel(MyMixin, Base):
# declarative base class
Base = declarative_base()
-
The declarative base class may also be created from an existing
:class:`_orm.registry`, by using the :meth:`_orm.registry.generate_base`
method::
mapper_registry = registry()
+
class Base(metaclass=DeclarativeMeta):
__abstract__ = True
from sqlalchemy import Table
from sqlalchemy.orm import declarative_base
- engine = create_engine(
- "postgresql+psycopg2://user:pass@hostname/my_existing_database"
- )
+ engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database")
Base = declarative_base()
+
class MyClass(Base):
__table__ = Table(
"mytable",
from sqlalchemy import Table
from sqlalchemy.orm import declarative_base
- engine = create_engine(
- "postgresql+psycopg2://user:pass@hostname/my_existing_database"
- )
+ engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database")
Base = declarative_base()
Base.metadata.reflect(engine)
+
class MyClass(Base):
- __table__ = Base.metadata.tables['mytable']
+ __table__ = Base.metadata.tables["mytable"]
.. seealso::
complete until we do so, given an :class:`_engine.Engine`::
- engine = create_engine(
- "postgresql+psycopg2://user:pass@hostname/my_existing_database"
- )
+ engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database")
Reflected.prepare(engine)
The purpose of the ``Reflected`` class is to define the scope at which
Base = declarative_base()
+
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
>>> user = User("log")
>>> for kw in (Keyword("new_from_blammo"), Keyword("its_big")):
... user.keywords.append(kw)
- ...
>>> print(user.keywords)
[Keyword('new_from_blammo'), Keyword('its_big')]
usage of the assignment operator, also appropriately handled by the
association proxy, to apply a dictionary value to the collection at once::
- >>> user = User('log')
- >>> user.keywords = {
- ... 'sk1':'kw1',
- ... 'sk2':'kw2'
- ... }
+ >>> user = User("log")
+ >>> user.keywords = {"sk1": "kw1", "sk2": "kw2"}
>>> print(user.keywords)
{'sk1': 'kw1', 'sk2': 'kw2'}
- >>> user.keywords['sk3'] = 'kw3'
- >>> del user.keywords['sk2']
+ >>> user.keywords["sk3"] = "kw3"
+ >>> del user.keywords["sk2"]
>>> print(user.keywords)
{'sk1': 'kw1', 'sk3': 'kw3'}
>>> # illustrate un-proxied usage
- ... print(user.user_keyword_associations['sk3'].kw)
+ ... print(user.user_keyword_associations["sk3"].kw)
<__main__.Keyword object at 0x12ceb90>
One caveat with our example above is that because ``Keyword`` objects are created
)
# column-targeted association proxy
- special_keys = association_proxy(
- "user_keyword_associations", "special_key"
- )
+ special_keys = association_proxy("user_keyword_associations", "special_key")
class UserKeywordAssociation(Base):
id = Column(Integer, primary_key=True)
keyword = Column("keyword", String(64))
-
The SQL generated takes the form of a correlated subquery against
the EXISTS SQL operator so that it can be used in a WHERE clause without
the need for additional modifications to the enclosing query. If the
# expire_on_commit=False will prevent attributes from being expired
# after commit.
- async_session = sessionmaker(
- engine, expire_on_commit=False, class_=AsyncSession
- )
+ async_session = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession)
async with async_session() as session:
async with session.begin():
asyncio.run(go())
-
The above example prints something along the lines of::
New DBAPI connection: <AdaptedConnection <asyncpg.connection.Connection ...>>
:meth:`_asyncio.async_scoped_session.remove` method::
async def some_function(some_async_session, some_object):
- # use the AsyncSession directly
- some_async_session.add(some_object)
+ # use the AsyncSession directly
+ some_async_session.add(some_object)
- # use the AsyncSession via the context-local proxy
- await AsyncScopedSession.commit()
+ # use the AsyncSession via the context-local proxy
+ await AsyncScopedSession.commit()
- # "remove" the current proxied AsyncSession for the local context
- await AsyncScopedSession.remove()
+ # "remove" the current proxied AsyncSession for the local context
+ await AsyncScopedSession.remove()
.. versionadded:: 1.4.19
my_simple_cache = {}
+
def lookup(session, id_argument):
if "my_key" not in my_simple_cache:
query = session.query(Model).filter(Model.id == bindparam("id"))
parameterized_query = bakery.bake(create_model_query)
if include_frobnizzle:
+
def include_frobnizzle_in_query(query):
return query.filter(Model.frobnizzle == True)
bakery = baked.bakery()
baked_query = bakery(lambda session: session.query(User))
- baked_query += lambda q: q.filter(
- User.name.in_(bindparam("username", expanding=True))
- )
+ baked_query += lambda q: q.filter(User.name.in_(bindparam("username", expanding=True)))
result = baked_query.with_session(session).params(username=["ed", "fred"]).all()
Mixin and Custom Base Classes
=============================
-See :ref:`orm_mixins_toplevel` for this section.
\ No newline at end of file
+See :ref:`orm_mixins_toplevel` for this section.
# a select() construct makes use of SQL expressions derived from the
# User class itself
- select_stmt = (
- select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s"))
- )
+ select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s"))
Above, the steps that the Mypy extension can take include:
)
name: Mapped[Optional[str]] = Mapped._special_method(Column(String))
- def __init__(
- self, id: Optional[int] = ..., name: Optional[str] = ...
- ) -> None:
+ def __init__(self, id: Optional[int] = ..., name: Optional[str] = ...) -> None:
...
print(f"Username: {some_user.name}")
- select_stmt = (
- select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s"))
- )
-
+ select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s"))
The key steps which have been taken above include:
id = Column(Integer, primary_key=True)
name = Column(String)
- addresses: Mapped[List["Address"]] = relationship(
- "Address", back_populates="user"
- )
+ addresses: Mapped[List["Address"]] = relationship("Address", back_populates="user")
class Address(Base):
Base = declarative_base()
+
class Employee(ConcreteBase, Base):
__tablename__ = "employee"
id = Column(Integer, primary_key=True)
Base = declarative_base()
+
class Employee(Base):
__abstract__ = True
"concrete": True,
}
+
Base.registry.configure()
Above, the :meth:`_orm.registry.configure` method is invoked, which will
"concrete": True,
}
-
Above, we use :func:`.polymorphic_union` in the same manner as before, except
that we omit the ``employee`` table.
entity = with_polymorphic(Employee, [Engineer, Manager])
# include columns for all mapped subclasses
- entity = with_polymorphic(Employee, '*')
+ entity = with_polymorphic(Employee, "*")
.. tip::
.. sourcecode:: python+sql
- engineer_employee = with_polymorphic(
- Employee, [Engineer], aliased=True)
- manager_employee = with_polymorphic(
- Employee, [Manager], aliased=True)
-
- q = s.query(engineer_employee, manager_employee).\
- join(
- manager_employee,
- and_(
- engineer_employee.id > manager_employee.id,
- engineer_employee.name == manager_employee.name
- )
+ engineer_employee = with_polymorphic(Employee, [Engineer], aliased=True)
+ manager_employee = with_polymorphic(Employee, [Manager], aliased=True)
+
+ q = s.query(engineer_employee, manager_employee).join(
+ manager_employee,
+ and_(
+ engineer_employee.id > manager_employee.id,
+ engineer_employee.name == manager_employee.name,
+ ),
)
q.all()
{opensql}
.. sourcecode:: python+sql
- engineer_employee = with_polymorphic(
- Employee, [Engineer], flat=True)
- manager_employee = with_polymorphic(
- Employee, [Manager], flat=True)
-
- q = s.query(engineer_employee, manager_employee).\
- join(
- manager_employee,
- and_(
- engineer_employee.id > manager_employee.id,
- engineer_employee.name == manager_employee.name
- )
+ engineer_employee = with_polymorphic(Employee, [Engineer], flat=True)
+ manager_employee = with_polymorphic(Employee, [Manager], flat=True)
+
+ q = s.query(engineer_employee, manager_employee).join(
+ manager_employee,
+ and_(
+ engineer_employee.id > manager_employee.id,
+ engineer_employee.name == manager_employee.name,
+ ),
)
q.all()
{opensql}
eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager])
query = session.query(eng_plus_manager).filter(
- or_(
- eng_plus_manager.Engineer.engineer_info=='x',
- eng_plus_manager.Manager.manager_data=='y'
- )
- )
+ or_(
+ eng_plus_manager.Engineer.engineer_info == "x",
+ eng_plus_manager.Manager.manager_data == "y",
+ )
+ )
A query as above would generate SQL resembling the following:
first introduced at :ref:`joined_inheritance`::
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(50))
__mapper_args__ = {
- 'polymorphic_identity':'employee',
- 'polymorphic_on':type,
- 'with_polymorphic': '*'
+ "polymorphic_identity": "employee",
+ "polymorphic_on": type,
+ "with_polymorphic": "*",
}
Above is a common setting for :paramref:`.mapper.with_polymorphic`,
default using the :paramref:`.mapper.polymorphic_load` parameter::
class Engineer(Employee):
- __tablename__ = 'engineer'
- id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "engineer"
+ id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
engineer_info = Column(String(50))
- __mapper_args__ = {
- 'polymorphic_identity':'engineer',
- 'polymorphic_load': 'inline'
- }
+ __mapper_args__ = {"polymorphic_identity": "engineer", "polymorphic_load": "inline"}
+
class Manager(Employee):
- __tablename__ = 'manager'
- id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "manager"
+ id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
manager_data = Column(String(50))
- __mapper_args__ = {
- 'polymorphic_identity':'manager',
- 'polymorphic_load': 'inline'
- }
+ __mapper_args__ = {"polymorphic_identity": "manager", "polymorphic_load": "inline"}
Setting the :paramref:`.mapper.polymorphic_load` parameter to the value
``"inline"`` means that the ``Engineer`` and ``Manager`` classes above
directly, rather than using an alias object. For simple cases it might be
considered to be more succinct::
- session.query(Employee).\
- with_polymorphic([Engineer, Manager]).\
- filter(
- or_(
- Engineer.engineer_info=='w',
- Manager.manager_data=='q'
- )
- )
+ session.query(Employee).with_polymorphic([Engineer, Manager]).filter(
+ or_(Engineer.engineer_info == "w", Manager.manager_data == "q")
+ )
The :meth:`_query.Query.with_polymorphic` method has a more complicated job
than the :func:`_orm.with_polymorphic` function, as it needs to correctly
using the value ``"selectin"`` on a per-subclass basis::
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(50))
- __mapper_args__ = {
- 'polymorphic_identity': 'employee',
- 'polymorphic_on': type
- }
+ __mapper_args__ = {"polymorphic_identity": "employee", "polymorphic_on": type}
+
class Engineer(Employee):
- __tablename__ = 'engineer'
- id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "engineer"
+ id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
engineer_name = Column(String(30))
__mapper_args__ = {
- 'polymorphic_load': 'selectin',
- 'polymorphic_identity': 'engineer',
+ "polymorphic_load": "selectin",
+ "polymorphic_identity": "engineer",
}
+
class Manager(Employee):
- __tablename__ = 'manager'
- id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "manager"
+ id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
manager_name = Column(String(30))
__mapper_args__ = {
- 'polymorphic_load': 'selectin',
- 'polymorphic_identity': 'manager',
+ "polymorphic_load": "selectin",
+ "polymorphic_identity": "manager",
}
-
Unlike when using :func:`_orm.with_polymorphic`, when using the
:func:`_orm.selectin_polymorphic` style of loading, we do **not** have the
ability to refer to the ``Engineer`` or ``Manager`` entities within our main
from sqlalchemy.orm import selectin_polymorphic
query = session.query(Employee).options(
- selectin_polymorphic(Employee, [Manager, Engineer]),
- joinedload(Manager.paperwork)
+ selectin_polymorphic(Employee, [Manager, Engineer]), joinedload(Manager.paperwork)
)
Using the query above, we get three SELECT statements emitted, however
# use "Employee" example from the enclosing section
+
class Manager(Employee):
- __tablename__ = 'manager'
- id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "manager"
+ id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
manager_name = Column(String(30))
__mapper_args__ = {
- 'polymorphic_load': 'selectin',
- 'polymorphic_identity': 'manager',
+ "polymorphic_load": "selectin",
+ "polymorphic_identity": "manager",
}
+
class VicePresident(Manager):
vp_info = Column(String(30))
- __mapper_args__ = {
- "polymorphic_load": "inline",
- "polymorphic_identity": "vp"
- }
-
+ __mapper_args__ = {"polymorphic_load": "inline", "polymorphic_identity": "vp"}
Above, we add a ``vp_info`` column to the ``manager`` table, local to the
``VicePresident`` subclass. This subclass is linked to the polymorphic
manager_poly = with_polymorphic(Manager, [VicePresident])
- s.query(Employee).options(
- selectin_polymorphic(Employee, [manager_poly])).all()
+ s.query(Employee).options(selectin_polymorphic(Employee, [manager_poly])).all()
.. _inheritance_of_type:
.. sourcecode:: python
class Company(Base):
- __tablename__ = 'company'
+ __tablename__ = "company"
id = Column(Integer, primary_key=True)
name = Column(String(50))
- employees = relationship("Employee",
- backref='company')
+ employees = relationship("Employee", backref="company")
+
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = Column(Integer, primary_key=True)
type = Column(String(20))
- company_id = Column(Integer, ForeignKey('company.id'))
+ company_id = Column(Integer, ForeignKey("company.id"))
__mapper_args__ = {
- 'polymorphic_on':type,
- 'polymorphic_identity':'employee',
+ "polymorphic_on": type,
+ "polymorphic_identity": "employee",
}
+
class Engineer(Employee):
- __tablename__ = 'engineer'
- id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "engineer"
+ id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
engineer_info = Column(String(50))
- __mapper_args__ = {'polymorphic_identity':'engineer'}
+ __mapper_args__ = {"polymorphic_identity": "engineer"}
+
class Manager(Employee):
- __tablename__ = 'manager'
- id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "manager"
+ id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
manager_data = Column(String(50))
- __mapper_args__ = {'polymorphic_identity':'manager'}
+ __mapper_args__ = {"polymorphic_identity": "manager"}
When querying from ``Company`` onto the ``Employee`` relationship, the
:meth:`_query.Query.join` method as well as operators like :meth:`.PropComparator.any`
against the set of columns representing the subclass using the
:meth:`~.orm.interfaces.PropComparator.of_type` operator::
- session.query(Company).\
- join(Company.employees.of_type(Engineer)).\
- filter(Engineer.engineer_info=='someinfo')
+ session.query(Company).join(Company.employees.of_type(Engineer)).filter(
+ Engineer.engineer_info == "someinfo"
+ )
Similarly, to join from ``Company`` to the polymorphic entity that includes both
``Engineer`` and ``Manager`` columns::
- manager_and_engineer = with_polymorphic(
- Employee, [Manager, Engineer])
+ manager_and_engineer = with_polymorphic(Employee, [Manager, Engineer])
- session.query(Company).\
- join(Company.employees.of_type(manager_and_engineer)).\
- filter(
- or_(
- manager_and_engineer.Engineer.engineer_info == 'someinfo',
- manager_and_engineer.Manager.manager_data == 'somedata'
- )
+ session.query(Company).join(Company.employees.of_type(manager_and_engineer)).filter(
+ or_(
+ manager_and_engineer.Engineer.engineer_info == "someinfo",
+ manager_and_engineer.Manager.manager_data == "somedata",
)
+ )
The :meth:`.PropComparator.any` and :meth:`.PropComparator.has` operators also
can be used with :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type`,
such as when the embedded criterion is in terms of a subclass::
- session.query(Company).\
- filter(
- Company.employees.of_type(Engineer).
- any(Engineer.engineer_info=='someinfo')
- ).all()
+ session.query(Company).filter(
+ Company.employees.of_type(Engineer).any(Engineer.engineer_info == "someinfo")
+ ).all()
.. _eagerloading_polymorphic_subtypes:
so that all sub-attributes of all referenced subtypes
can be loaded::
- manager_and_engineer = with_polymorphic(
- Employee, [Manager, Engineer],
- flat=True)
+ manager_and_engineer = with_polymorphic(Employee, [Manager, Engineer], flat=True)
- session.query(Company).\
- options(
- joinedload(
- Company.employees.of_type(manager_and_engineer)
- )
- )
+ session.query(Company).options(
+ joinedload(Company.employees.of_type(manager_and_engineer))
+ )
.. note::
class Manager(Employee):
manager_data = Column(String(50))
- __mapper_args__ = {
- 'polymorphic_identity':'manager'
- }
+ __mapper_args__ = {"polymorphic_identity": "manager"}
Above, there would be no ``Employee.manager_data``
attribute, even though the ``employee`` table has a ``manager_data`` column.
of subclass attributes as well as specification of subclasses in a query,
just without the overhead of using OUTER JOIN::
- employee_poly = with_polymorphic(Employee, '*')
+ employee_poly = with_polymorphic(Employee, "*")
q = session.query(employee_poly).filter(
- or_(
- employee_poly.name == 'a',
- employee_poly.Manager.manager_data == 'b'
- )
+ or_(employee_poly.name == "a", employee_poly.Manager.manager_data == "b")
)
Above, our query remains against a single table however we can refer to the
Base = declarative_base()
+
class Customer(Base):
- __tablename__ = 'customer'
+ __tablename__ = "customer"
id = Column(Integer, primary_key=True)
name = Column(String)
billing_address = relationship("Address")
shipping_address = relationship("Address")
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
id = Column(Integer, primary_key=True)
street = Column(String)
city = Column(String)
the appropriate form is as follows::
class Customer(Base):
- __tablename__ = 'customer'
+ __tablename__ = "customer"
id = Column(Integer, primary_key=True)
name = Column(String)
Base = declarative_base()
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String)
- boston_addresses = relationship("Address",
- primaryjoin="and_(User.id==Address.user_id, "
- "Address.city=='Boston')")
+ boston_addresses = relationship(
+ "Address",
+ primaryjoin="and_(User.id==Address.user_id, " "Address.city=='Boston')",
+ )
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
id = Column(Integer, primary_key=True)
- user_id = Column(Integer, ForeignKey('user.id'))
+ user_id = Column(Integer, ForeignKey("user.id"))
street = Column(String)
city = Column(String)
Base = declarative_base()
+
class HostEntry(Base):
- __tablename__ = 'host_entry'
+ __tablename__ = "host_entry"
id = Column(Integer, primary_key=True)
ip_address = Column(INET)
content = Column(String(50))
# relationship() using explicit foreign_keys, remote_side
- parent_host = relationship("HostEntry",
- primaryjoin=ip_address == cast(content, INET),
- foreign_keys=content,
- remote_side=ip_address
- )
+ parent_host = relationship(
+ "HostEntry",
+ primaryjoin=ip_address == cast(content, INET),
+ foreign_keys=content,
+ remote_side=ip_address,
+ )
The above relationship will produce a join like::
from sqlalchemy.orm import foreign, remote
+
class HostEntry(Base):
- __tablename__ = 'host_entry'
+ __tablename__ = "host_entry"
id = Column(Integer, primary_key=True)
ip_address = Column(INET)
# relationship() using explicit foreign() and remote() annotations
# in lieu of separate arguments
- parent_host = relationship("HostEntry",
- primaryjoin=remote(ip_address) == \
- cast(foreign(content), INET),
- )
-
+ parent_host = relationship(
+ "HostEntry",
+ primaryjoin=remote(ip_address) == cast(foreign(content), INET),
+ )
.. _relationship_custom_operator:
a :func:`_orm.relationship`::
class IPA(Base):
- __tablename__ = 'ip_address'
+ __tablename__ = "ip_address"
id = Column(Integer, primary_key=True)
v4address = Column(INET)
- network = relationship("Network",
- primaryjoin="IPA.v4address.bool_op('<<')"
- "(foreign(Network.v4representation))",
- viewonly=True
- )
+ network = relationship(
+ "Network",
+ primaryjoin="IPA.v4address.bool_op('<<')" "(foreign(Network.v4representation))",
+ viewonly=True,
+ )
+
+
class Network(Base):
- __tablename__ = 'network'
+ __tablename__ = "network"
id = Column(Integer, primary_key=True)
v4representation = Column(CIDR)
from sqlalchemy import Column, Integer, func
from sqlalchemy.orm import relationship, foreign
+
class Polygon(Base):
__tablename__ = "polygon"
id = Column(Integer, primary_key=True)
viewonly=True,
)
+
class Point(Base):
__tablename__ = "point"
id = Column(Integer, primary_key=True)
``Article.magazine`` and ``Article.writer``::
class Magazine(Base):
- __tablename__ = 'magazine'
+ __tablename__ = "magazine"
id = Column(Integer, primary_key=True)
class Article(Base):
- __tablename__ = 'article'
+ __tablename__ = "article"
article_id = Column(Integer)
- magazine_id = Column(ForeignKey('magazine.id'))
+ magazine_id = Column(ForeignKey("magazine.id"))
writer_id = Column()
magazine = relationship("Magazine")
writer = relationship("Writer")
__table_args__ = (
- PrimaryKeyConstraint('article_id', 'magazine_id'),
+ PrimaryKeyConstraint("article_id", "magazine_id"),
ForeignKeyConstraint(
- ['writer_id', 'magazine_id'],
- ['writer.id', 'writer.magazine_id']
+ ["writer_id", "magazine_id"], ["writer.id", "writer.magazine_id"]
),
)
class Writer(Base):
- __tablename__ = 'writer'
+ __tablename__ = "writer"
id = Column(Integer, primary_key=True)
- magazine_id = Column(ForeignKey('magazine.id'), primary_key=True)
+ magazine_id = Column(ForeignKey("magazine.id"), primary_key=True)
magazine = relationship("Magazine")
When the above mapping is configured, we will see this warning emitted::
class Article(Base):
# ...
- writer = relationship("Writer", foreign_keys='Article.writer_id')
+ writer = relationship("Writer", foreign_keys="Article.writer_id")
However, this has the effect of ``Article.writer`` not taking
``Article.magazine_id`` into account when querying against ``Writer``:
writer = relationship(
"Writer",
primaryjoin="and_(Writer.id == foreign(Article.writer_id), "
- "Writer.magazine_id == Article.magazine_id)")
+ "Writer.magazine_id == Article.magazine_id)",
+ )
.. versionchanged:: 1.0.0 the ORM will attempt to warn when a column is used
as the synchronization target from more than one relationship
we'll be dealing with collections so we keep things configured as "one to many"::
class Element(Base):
- __tablename__ = 'element'
+ __tablename__ = "element"
path = Column(String, primary_key=True)
- descendants = relationship('Element',
- primaryjoin=
- remote(foreign(path)).like(
- path.concat('/%')),
- viewonly=True,
- order_by=path)
+ descendants = relationship(
+ "Element",
+ primaryjoin=remote(foreign(path)).like(path.concat("/%")),
+ viewonly=True,
+ order_by=path,
+ )
Above, if given an ``Element`` object with a path attribute of ``"/foo/bar2"``,
we seek for a load of ``Element.descendants`` to look like::
Base = declarative_base()
- node_to_node = Table("node_to_node", Base.metadata,
+ node_to_node = Table(
+ "node_to_node",
+ Base.metadata,
Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True),
- Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True)
+ Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True),
)
+
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = Column(Integer, primary_key=True)
label = Column(String)
- right_nodes = relationship("Node",
- secondary=node_to_node,
- primaryjoin=id==node_to_node.c.left_node_id,
- secondaryjoin=id==node_to_node.c.right_node_id,
- backref="left_nodes"
+ right_nodes = relationship(
+ "Node",
+ secondary=node_to_node,
+ primaryjoin=id == node_to_node.c.left_node_id,
+ secondaryjoin=id == node_to_node.c.right_node_id,
+ backref="left_nodes",
)
Where above, SQLAlchemy can't know automatically which columns should connect
use the string name of the table as it is present in the :class:`_schema.MetaData`::
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = Column(Integer, primary_key=True)
label = Column(String)
- right_nodes = relationship("Node",
- secondary="node_to_node",
- primaryjoin="Node.id==node_to_node.c.left_node_id",
- secondaryjoin="Node.id==node_to_node.c.right_node_id",
- backref="left_nodes"
+ right_nodes = relationship(
+ "Node",
+ secondary="node_to_node",
+ primaryjoin="Node.id==node_to_node.c.left_node_id",
+ secondaryjoin="Node.id==node_to_node.c.right_node_id",
+ backref="left_nodes",
)
.. warning:: When passed as a Python-evaluable string, the
metadata_obj = MetaData()
mapper_registry = registry()
- node_to_node = Table("node_to_node", metadata_obj,
+ node_to_node = Table(
+ "node_to_node",
+ metadata_obj,
Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True),
- Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True)
+ Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True),
)
- node = Table("node", metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('label', String)
+ node = Table(
+ "node",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("label", String),
)
+
+
class Node(object):
pass
- mapper_registry.map_imperatively(Node, node, properties={
- 'right_nodes':relationship(Node,
- secondary=node_to_node,
- primaryjoin=node.c.id==node_to_node.c.left_node_id,
- secondaryjoin=node.c.id==node_to_node.c.right_node_id,
- backref="left_nodes"
- )})
+ mapper_registry.map_imperatively(
+ Node,
+ node,
+ properties={
+ "right_nodes": relationship(
+ Node,
+ secondary=node_to_node,
+ primaryjoin=node.c.id == node_to_node.c.left_node_id,
+ secondaryjoin=node.c.id == node_to_node.c.right_node_id,
+ backref="left_nodes",
+ )
+ },
+ )
Note that in both examples, the :paramref:`_orm.relationship.backref`
keyword specifies a ``left_nodes`` backref - when
join condition (requires version 0.9.2 at least to function as is)::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
- b_id = Column(ForeignKey('b.id'))
+ b_id = Column(ForeignKey("b.id"))
+
+ d = relationship(
+ "D",
+ secondary="join(B, D, B.d_id == D.id)." "join(C, C.d_id == D.id)",
+ primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)",
+ secondaryjoin="D.id == B.d_id",
+ uselist=False,
+ viewonly=True,
+ )
- d = relationship("D",
- secondary="join(B, D, B.d_id == D.id)."
- "join(C, C.d_id == D.id)",
- primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)",
- secondaryjoin="D.id == B.d_id",
- uselist=False,
- viewonly=True
- )
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- d_id = Column(ForeignKey('d.id'))
+ d_id = Column(ForeignKey("d.id"))
+
class C(Base):
- __tablename__ = 'c'
+ __tablename__ = "c"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
- d_id = Column(ForeignKey('d.id'))
+ a_id = Column(ForeignKey("a.id"))
+ d_id = Column(ForeignKey("d.id"))
+
class D(Base):
- __tablename__ = 'd'
+ __tablename__ = "d"
id = Column(Integer, primary_key=True)
the rows in both ``A`` and ``B`` simultaneously::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
- b_id = Column(ForeignKey('b.id'))
+ b_id = Column(ForeignKey("b.id"))
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
+
class C(Base):
- __tablename__ = 'c'
+ __tablename__ = "c"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
some_c_value = Column(String)
+
class D(Base):
- __tablename__ = 'd'
+ __tablename__ = "d"
id = Column(Integer, primary_key=True)
- c_id = Column(ForeignKey('c.id'))
- b_id = Column(ForeignKey('b.id'))
+ c_id = Column(ForeignKey("c.id"))
+ b_id = Column(ForeignKey("b.id"))
some_d_value = Column(String)
+
# 1. set up the join() as a variable, so we can refer
# to it in the mapping multiple times.
j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id)
.. sourcecode:: python+sql
(
- sess.query(A).join(A.b).
- filter(B_viacd_subquery.some_b_column == "some b").
- order_by(B_viacd_subquery.id)
+ sess.query(A)
+ .join(A.b)
+ .filter(B_viacd_subquery.some_b_column == "some b")
+ .order_by(B_viacd_subquery.id)
).all()
{opensql}SELECT a.id AS a_id, a.b_id AS a_b_id
ten items for each collection::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
+
partition = select(
- B,
- func.row_number().over(
- order_by=B.id, partition_by=B.a_id
- ).label('index')
+ B, func.row_number().over(order_by=B.id, partition_by=B.a_id).label("index")
).alias()
partitioned_b = aliased(B, partition)
A.partitioned_bs = relationship(
- partitioned_b,
- primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10)
+ partitioned_b, primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10)
)
We can use the above ``partitioned_bs`` relationship with most of the loader
strategies, such as :func:`.selectinload`::
for a1 in s.query(A).options(selectinload(A.partitioned_bs)):
- print(a1.partitioned_bs) # <-- will be no more than ten objects
+ print(a1.partitioned_bs) # <-- will be no more than ten objects
Where above, the "selectinload" query looks like:
.. sourcecode:: python
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
@property
.. seealso::
- :ref:`mapper_hybrids`
\ No newline at end of file
+ :ref:`mapper_hybrids`
:orphan:
-Moved! :doc:`/orm/loading_relationships`
\ No newline at end of file
+Moved! :doc:`/orm/loading_relationships`
from sqlalchemy.orm import deferred
from sqlalchemy import Integer, String, Text, Binary, Column
+
class Book(Base):
- __tablename__ = 'book'
+ __tablename__ = "book"
book_id = Column(Integer, primary_key=True)
title = Column(String(200), nullable=False)
Classical mappings as always place the usage of :func:`_orm.deferred` in the
``properties`` dictionary against the table-bound :class:`_schema.Column`::
- mapper_registry.map_imperatively(Book, book_table, properties={
- 'photo':deferred(book_table.c.photo)
- })
+ mapper_registry.map_imperatively(
+ Book, book_table, properties={"photo": deferred(book_table.c.photo)}
+ )
Deferred columns can be associated with a "group" name, so that they load
together when any of them are first accessed. The example below defines a
separately when it is accessed::
class Book(Base):
- __tablename__ = 'book'
+ __tablename__ = "book"
book_id = Column(Integer, primary_key=True)
title = Column(String(200), nullable=False)
summary = Column(String(2000))
excerpt = deferred(Column(Text))
- photo1 = deferred(Column(Binary), group='photos')
- photo2 = deferred(Column(Binary), group='photos')
- photo3 = deferred(Column(Binary), group='photos')
+ photo1 = deferred(Column(Binary), group="photos")
+ photo2 = deferred(Column(Binary), group="photos")
+ photo3 = deferred(Column(Binary), group="photos")
.. _deferred_options:
from sqlalchemy.orm import undefer
query = session.query(Book)
- query = query.options(defer('summary'), undefer('excerpt'))
+ query = query.options(defer("summary"), undefer("excerpt"))
query.all()
Above, the "summary" column will not load until accessed, and the "excerpt"
from sqlalchemy.orm import undefer_group
query = session.query(Book)
- query.options(undefer_group('photos')).all()
+ query.options(undefer_group("photos")).all()
.. _deferred_loading_w_multiple:
query = session.query(Author)
query = query.options(
- joinedload(Author.books).load_only(Book.summary, Book.excerpt),
- )
+ joinedload(Author.books).load_only(Book.summary, Book.excerpt),
+ )
Option structures as above can also be organized in more complex ways, such
as hierarchically using the :meth:`_orm.Load.options`
query = session.query(Author)
query = query.options(
- joinedload(Author.book).options(
- load_only(Book.summary, Book.excerpt),
- joinedload(Book.citations).options(
- joinedload(Citation.author),
- defer(Citation.fulltext)
- )
- )
- )
+ joinedload(Author.book).options(
+ load_only(Book.summary, Book.excerpt),
+ joinedload(Book.citations).options(
+ joinedload(Citation.author), defer(Citation.fulltext)
+ ),
+ )
+ )
.. versionadded:: 1.3.6 Added :meth:`_orm.Load.options` to allow easier
construction of hierarchies of loader options.
query = query.options(
joinedload(Author.book).load_only(Book.summary, Book.excerpt),
defaultload(Author.book).joinedload(Book.citations).joinedload(Citation.author),
- defaultload(Author.book).defaultload(Book.citations).defer(Citation.fulltext)
+ defaultload(Author.book).defaultload(Book.citations).defer(Citation.fulltext),
)
.. seealso::
from sqlalchemy.orm import defer
from sqlalchemy.orm import undefer
- session.query(Book).options(
- defer('*'), undefer("summary"), undefer("excerpt"))
+ session.query(Book).options(defer("*"), undefer("summary"), undefer("excerpt"))
Above, the :func:`.defer` option is applied using a wildcard to all column
attributes on the ``Book`` class. Then, the :func:`.undefer` option is used
from sqlalchemy.orm import Load
query = session.query(Book, Author).join(Book.author)
- query = query.options(
- Load(Book).load_only(Book.summary, Book.excerpt)
- )
+ query = query.options(Load(Book).load_only(Book.summary, Book.excerpt))
Above, :class:`_orm.Load` is used in conjunction with the exclusionary option
:func:`.load_only` so that the deferral of all other columns only takes
class Book(Base):
- __tablename__ = 'book'
+ __tablename__ = "book"
book_id = Column(Integer, primary_key=True)
title = Column(String(200), nullable=False)
summary = deferred(Column(String(2000)), raiseload=True)
excerpt = deferred(Column(Text), raiseload=True)
- book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first()
-
+ book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first()
Column Deferral API
-------------------
from sqlalchemy.orm import Bundle
- bn = Bundle('mybundle', MyClass.data1, MyClass.data2)
- for row in session.query(bn).filter(bn.c.data1 == 'd1'):
+ bn = Bundle("mybundle", MyClass.data1, MyClass.data2)
+ for row in session.query(bn).filter(bn.c.data1 == "d1"):
print(row.mybundle.data1, row.mybundle.data2)
The bundle can be subclassed to provide custom behaviors when results
from sqlalchemy.orm import Bundle
+
class DictBundle(Bundle):
def create_row_processor(self, query, procs, labels):
"""Override create_row_processor to return values as dictionaries"""
+
def proc(row):
- return dict(
- zip(labels, (proc(row) for proc in procs))
- )
+ return dict(zip(labels, (proc(row) for proc in procs)))
+
return proc
.. note::
A result from the above bundle will return dictionary values::
- bn = DictBundle('mybundle', MyClass.data1, MyClass.data2)
- for row in session.query(bn).filter(bn.c.data1 == 'd1'):
- print(row.mybundle['data1'], row.mybundle['data2'])
+ bn = DictBundle("mybundle", MyClass.data1, MyClass.data2)
+ for row in session.query(bn).filter(bn.c.data1 == "d1"):
+ print(row.mybundle["data1"], row.mybundle["data2"])
The :class:`.Bundle` construct is also integrated into the behavior
of :func:`.composite`, where it is used to return composite attributes as objects
the parent object is queried::
class Parent(Base):
- __tablename__ = 'parent'
+ __tablename__ = "parent"
id = Column(Integer, primary_key=True)
- children = relationship("Child", lazy='joined')
+ children = relationship("Child", lazy="joined")
Above, whenever a collection of ``Parent`` objects are loaded, each
``Parent`` will also have its ``children`` collection populated, using
to specify how loading should occur further levels deep::
session.query(Parent).options(
- joinedload(Parent.children).
- subqueryload(Child.subelements)).all()
+ joinedload(Parent.children).subqueryload(Child.subelements)
+ ).all()
Chained loader options can be applied against a "lazy" loaded collection.
This means that when a collection or association is lazily loaded upon
access, the specified option will then take effect::
session.query(Parent).options(
- lazyload(Parent.children).
- subqueryload(Child.subelements)).all()
+ lazyload(Parent.children).subqueryload(Child.subelements)
+ ).all()
Above, the query will return ``Parent`` objects without the ``children``
collections loaded. When the ``children`` collection on a particular
:term:`1.x style` queries. The options system is available as well for
:term:`2.0 style` queries using the :meth:`_sql.Select.options` method::
- stmt = select(Parent).options(
- lazyload(Parent.children).
- subqueryload(Child.subelements))
+ stmt = select(Parent).options(lazyload(Parent.children).subqueryload(Child.subelements))
result = session.execute(stmt)
stated. To navigate along a path without changing the existing loader style
of a particular attribute, the :func:`.defaultload` method/function may be used::
- session.query(A).options(
- defaultload(A.atob).
- joinedload(B.btoc)).all()
+ session.query(A).options(defaultload(A.atob).joinedload(B.btoc)).all()
A similar approach can be used to specify multiple sub-options at once, using
the :meth:`_orm.Load.options` method::
session.query(A).options(
- defaultload(A.atob).options(
- joinedload(B.btoc),
- joinedload(B.btod)
- )).all()
+ defaultload(A.atob).options(joinedload(B.btoc), joinedload(B.btod))
+ ).all()
.. versionadded:: 1.3.6 added :meth:`_orm.Load.options`
memory. For example, given the previous example::
session.query(Parent).options(
- lazyload(Parent.children).
- subqueryload(Child.subelements)).all()
+ lazyload(Parent.children).subqueryload(Child.subelements)
+ ).all()
if the ``children`` collection on a particular ``Parent`` object loaded by
the above query is expired (such as when a :class:`.Session` object's
# change the options on Parent objects that were already loaded
session.query(Parent).populate_existing().options(
- lazyload(Parent.children).
- lazyload(Child.subelements)).all()
+ lazyload(Parent.children).lazyload(Child.subelements)
+ ).all()
If the objects loaded above are fully cleared from the :class:`.Session`,
such as due to garbage collection or that :meth:`.Session.expunge_all`
raised::
from sqlalchemy.orm import raiseload
+
session.query(User).options(raiseload(User.addresses))
Above, a ``User`` object loaded from the above query will not have
indicate that all relationships should use this strategy. For example,
to set up only one attribute as eager loading, and all the rest as raise::
- session.query(Order).options(
- joinedload(Order.items), raiseload('*'))
+ session.query(Order).options(joinedload(Order.items), raiseload("*"))
The above wildcard will apply to **all** relationships not just on ``Order``
besides ``items``, but all those on the ``Item`` objects as well. To set up
from sqlalchemy.orm import Load
- session.query(Order).options(
- joinedload(Order.items), Load(Order).raiseload('*'))
+ session.query(Order).options(joinedload(Order.items), Load(Order).raiseload("*"))
Conversely, to set up the raise for just the ``Item`` objects::
- session.query(Order).options(
- joinedload(Order.items).raiseload('*'))
-
+ session.query(Order).options(joinedload(Order.items).raiseload("*"))
The :func:`.raiseload` option applies only to relationship attributes. For
column-oriented attributes, the :func:`.defer` option supports the
.. sourcecode:: python+sql
- >>> jack = session.query(User).\
- ... options(joinedload(User.addresses)).\
- ... filter_by(name='jack').all()
+ >>> jack = (
+ ... session.query(User).options(joinedload(User.addresses)).filter_by(name="jack").all()
+ ... )
{opensql}SELECT
addresses_1.id AS addresses_1_id,
addresses_1.email_address AS addresses_1_email_address,
class Address(Base):
# ...
- user_id = Column(ForeignKey('users.id'), nullable=False)
+ user_id = Column(ForeignKey("users.id"), nullable=False)
user = relationship(User, lazy="joined", innerjoin=True)
At the query option level, via the :paramref:`_orm.joinedload.innerjoin` flag::
- session.query(Address).options(
- joinedload(Address.user, innerjoin=True))
+ session.query(Address).options(joinedload(Address.user, innerjoin=True))
The JOIN will right-nest itself when applied in a chain that includes
an OUTER JOIN:
.. sourcecode:: python+sql
>>> session.query(User).options(
- ... joinedload(User.addresses).
- ... joinedload(Address.widgets, innerjoin=True)).all()
+ ... joinedload(User.addresses).joinedload(Address.widgets, innerjoin=True)
+ ... ).all()
{opensql}SELECT
widgets_1.id AS widgets_1_id,
widgets_1.name AS widgets_1_name,
.. sourcecode:: python+sql
- >>> jack = session.query(User).\
- ... options(joinedload(User.addresses)).\
- ... filter(User.name=='jack').\
- ... order_by(Address.email_address).all()
+ >>> jack = (
+ ... session.query(User)
+ ... .options(joinedload(User.addresses))
+ ... .filter(User.name == "jack")
+ ... .order_by(Address.email_address)
+ ... .all()
+ ... )
{opensql}SELECT
addresses_1.id AS addresses_1_id,
addresses_1.email_address AS addresses_1_email_address,
.. sourcecode:: python+sql
- >>> jack = session.query(User).\
- ... join(User.addresses).\
- ... filter(User.name=='jack').\
- ... order_by(Address.email_address).all()
+ >>> jack = (
+ ... session.query(User)
+ ... .join(User.addresses)
+ ... .filter(User.name == "jack")
+ ... .order_by(Address.email_address)
+ ... .all()
+ ... )
{opensql}
SELECT
users.id AS users_id,
.. sourcecode:: python+sql
- >>> jack = session.query(User).\
- ... join(User.addresses).\
- ... options(joinedload(User.addresses)).\
- ... filter(User.name=='jack').\
- ... order_by(Address.email_address).all()
+ >>> jack = (
+ ... session.query(User)
+ ... .join(User.addresses)
+ ... .options(joinedload(User.addresses))
+ ... .filter(User.name == "jack")
+ ... .order_by(Address.email_address)
+ ... .all()
+ ... )
{opensql}SELECT
addresses_1.id AS addresses_1_id,
addresses_1.email_address AS addresses_1_email_address,
.. sourcecode:: python+sql
- >>> jack = session.query(User).\
- ... join(User.addresses).\
- ... options(joinedload(User.addresses)).\
- ... filter(User.name=='jack').\
- ... filter(Address.email_address=='someaddress@foo.com').\
- ... all()
+ >>> jack = (
+ ... session.query(User)
+ ... .join(User.addresses)
+ ... .options(joinedload(User.addresses))
+ ... .filter(User.name == "jack")
+ ... .filter(Address.email_address == "someaddress@foo.com")
+ ... .all()
+ ... )
{opensql}SELECT
addresses_1.id AS addresses_1_id,
addresses_1.email_address AS addresses_1_email_address,
.. sourcecode:: python+sql
- >>> jack = session.query(User).\
- ... join(User.addresses).\
- ... options(subqueryload(User.addresses)).\
- ... filter(User.name=='jack').\
- ... filter(Address.email_address=='someaddress@foo.com').\
- ... all()
+ >>> jack = (
+ ... session.query(User)
+ ... .join(User.addresses)
+ ... .options(subqueryload(User.addresses))
+ ... .filter(User.name == "jack")
+ ... .filter(Address.email_address == "someaddress@foo.com")
+ ... .all()
+ ... )
{opensql}SELECT
users.id AS users_id,
users.name AS users_name,
.. sourcecode:: python+sql
- >>> jack = session.query(User).\
- ... options(subqueryload(User.addresses)).\
- ... filter_by(name='jack').all()
+ >>> jack = (
+ ... session.query(User)
+ ... .options(subqueryload(User.addresses))
+ ... .filter_by(name="jack")
+ ... .all()
+ ... )
{opensql}SELECT
users.id AS users_id,
users.name AS users_name,
that the inner query could return the wrong rows::
# incorrect, no ORDER BY
- session.query(User).options(
- subqueryload(User.addresses)).first()
+ session.query(User).options(subqueryload(User.addresses)).first()
# incorrect if User.name is not unique
- session.query(User).options(
- subqueryload(User.addresses)
- ).order_by(User.name).first()
+ session.query(User).options(subqueryload(User.addresses)).order_by(User.name).first()
# correct
- session.query(User).options(
- subqueryload(User.addresses)
- ).order_by(User.name, User.id).first()
+ session.query(User).options(subqueryload(User.addresses)).order_by(
+ User.name, User.id
+ ).first()
.. seealso::
.. sourcecode:: python+sql
- >>> jack = session.query(User).\
- ... options(selectinload(User.addresses)).\
- ... filter(or_(User.name == 'jack', User.name == 'ed')).all()
+ >>> jack = (
+ ... session.query(User)
+ ... .options(selectinload(User.addresses))
+ ... .filter(or_(User.name == "jack", User.name == "ed"))
+ ... .all()
+ ... )
{opensql}SELECT
users.id AS users_id,
users.name AS users_name,
.. sourcecode:: python+sql
- >>> session.query(Address).\
- ... options(selectinload(Address.user)).all()
+ >>> session.query(Address).options(selectinload(Address.user)).all()
{opensql}SELECT
addresses.id AS addresses_id,
addresses.email_address AS addresses_email_address,
specified in the :class:`_query.Query`. This feature is available by passing
the string ``'*'`` as the argument to any of these options::
- session.query(MyClass).options(lazyload('*'))
+ session.query(MyClass).options(lazyload("*"))
Above, the ``lazyload('*')`` option will supersede the ``lazy`` setting
of all :func:`_orm.relationship` constructs in use for that query,
:func:`.subqueryload`, etc. The query below will still use joined loading
for the ``widget`` relationship::
- session.query(MyClass).options(
- lazyload('*'),
- joinedload(MyClass.widget)
- )
+ session.query(MyClass).options(lazyload("*"), joinedload(MyClass.widget))
If multiple ``'*'`` options are passed, the last one overrides
those previously passed.
by first applying the :class:`_orm.Load` object, then specifying the ``*`` as a
chained option::
- session.query(User, Address).options(
- Load(Address).lazyload('*'))
+ session.query(User, Address).options(Load(Address).lazyload("*"))
Above, all relationships on ``Address`` will be set to a lazy load.
and additionally establish this as the basis for eager loading of ``User.addresses``::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
addresses = relationship("Address")
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
# ...
- q = session.query(User).join(User.addresses).\
- options(contains_eager(User.addresses))
+ q = session.query(User).join(User.addresses).options(contains_eager(User.addresses))
If the "eager" portion of the statement is "aliased", the path
should be specified using :meth:`.PropComparator.of_type`, which allows
adalias = aliased(Address)
# construct a Query object which expects the "addresses" results
- query = session.query(User).\
- outerjoin(User.addresses.of_type(adalias)).\
- options(contains_eager(User.addresses.of_type(adalias)))
+ query = (
+ session.query(User)
+ .outerjoin(User.addresses.of_type(adalias))
+ .options(contains_eager(User.addresses.of_type(adalias)))
+ )
# get results normally
r = query.all()
to be a full path from the starting entity. For example if we were loading
``Users->orders->Order->items->Item``, the option would be used as::
- query(User).options(
- contains_eager(User.orders).
- contains_eager(Order.items))
+ query(User).options(contains_eager(User.orders).contains_eager(Order.items))
Using contains_eager() to load a custom-filtered collection result
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:meth:`_query.Query.populate_existing` to ensure any already-loaded collections
are overwritten::
- q = session.query(User).\
- join(User.addresses).\
- filter(Address.email_address.like('%@aol.com')).\
- options(contains_eager(User.addresses)).\
- populate_existing()
+ q = (
+ session.query(User)
+ .join(User.addresses)
+ .filter(Address.email_address.like("%@aol.com"))
+ .options(contains_eager(User.addresses))
+ .populate_existing()
+ )
The above query will load only ``User`` objects which contain at
least ``Address`` object that contains the substring ``'aol.com'`` in its
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
- b_id = Column(ForeignKey('b.id'))
- b = relationship(
- "B",
- backref=backref("a", uselist=False),
- lazy='joined')
+ b_id = Column(ForeignKey("b.id"))
+ b = relationship("B", backref=backref("a", uselist=False), lazy="joined")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
-
If we query for an ``A`` row, and then ask it for ``a.b.a``, we will get
an extra SELECT::
from sqlalchemy import event
from sqlalchemy.orm import attributes
+
@event.listens_for(A, "load")
def load_b(target, context):
- if 'b' in target.__dict__:
- attributes.set_committed_value(target.b, 'a', target)
+ if "b" in target.__dict__:
+ attributes.set_committed_value(target.b, "a", target)
Now when we query for ``A``, we will get ``A.b`` from the joined eager load,
and ``A.b.a`` from our event:
(1, 0)
{stop}assert a1.b.a is a1
-
Relationship Loader API
-----------------------
from sqlalchemy.orm import validates
+
class EmailAddress(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
id = Column(Integer, primary_key=True)
email = Column(String)
- @validates('email')
+ @validates("email")
def validate_email(self, key, address):
- if '@' not in address:
+ if "@" not in address:
raise ValueError("failed simple email validation")
return address
from sqlalchemy.orm import validates
+
class User(Base):
# ...
addresses = relationship("Address")
- @validates('addresses')
+ @validates("addresses")
def validate_address(self, key, address):
- if '@' not in address.email:
+ if "@" not in address.email:
raise ValueError("failed simplified email validation")
return address
-
The validation function by default does not get emitted for collection
remove events, as the typical expectation is that a value being discarded
doesn't require validation. However, :func:`.validates` supports reception
from sqlalchemy.orm import validates
+
class User(Base):
# ...
addresses = relationship("Address")
- @validates('addresses', include_removes=True)
+ @validates("addresses", include_removes=True)
def validate_address(self, key, address, is_remove):
if is_remove:
- raise ValueError(
- "not allowed to remove items from the collection")
+ raise ValueError("not allowed to remove items from the collection")
else:
- if '@' not in address.email:
+ if "@" not in address.email:
raise ValueError("failed simplified email validation")
return address
from sqlalchemy.orm import validates
+
class User(Base):
# ...
- addresses = relationship("Address", backref='user')
+ addresses = relationship("Address", backref="user")
- @validates('addresses', include_backrefs=False)
+ @validates("addresses", include_backrefs=False)
def validate_address(self, key, address):
- if '@' not in address:
+ if "@" not in address:
raise ValueError("failed simplified email validation")
return address
different name. Below we illustrate this using Python 2.6-style properties::
class EmailAddress(Base):
- __tablename__ = 'email_address'
+ __tablename__ = "email_address"
id = Column(Integer, primary_key=True)
from sqlalchemy.ext.hybrid import hybrid_property
+
class EmailAddress(Base):
- __tablename__ = 'email_address'
+ __tablename__ = "email_address"
id = Column(Integer, primary_key=True)
.. sourcecode:: python+sql
from sqlalchemy.orm import Session
+
session = Session()
{sql}address = session.query(EmailAddress).\
FROM address
WHERE address.email = ?
('address@example.com',)
- {stop}
- address.email = 'otheraddress@example.com'
+ address.email = "otheraddress@example.com"
{sql}session.commit()
UPDATE address SET email=? WHERE address.id = ?
('otheraddress@example.com', 1)
COMMIT
- {stop}
The :class:`~.hybrid_property` also allows us to change the behavior of the
attribute, including defining separate behaviors when the attribute is
logic::
class EmailAddress(Base):
- __tablename__ = 'email_address'
+ __tablename__ = "email_address"
id = Column(Integer, primary_key=True)
FROM address
WHERE substr(address.email, ?, length(address.email) - ?) = ?
(0, 12, 'address')
- {stop}
Read more about Hybrids at :ref:`hybrids_toplevel`.
attribute available by an additional name::
from sqlalchemy.orm import synonym
-
+
+
class MyClass(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = Column(Integer, primary_key=True)
job_status = Column(String(50))
``.status`` that will behave as one attribute, both at the expression
level::
- >>> print(MyClass.job_status == 'some_status')
+ >>> print(MyClass.job_status == "some_status")
my_table.job_status = :job_status_1
- >>> print(MyClass.status == 'some_status')
+ >>> print(MyClass.status == "some_status")
my_table.job_status = :job_status_1
and at the instance level::
- >>> m1 = MyClass(status='x')
+ >>> m1 = MyClass(status="x")
>>> m1.status, m1.job_status
('x', 'x')
- >>> m1.job_status = 'y'
+ >>> m1.job_status = "y"
>>> m1.status, m1.job_status
('y', 'y')
``status`` synonym with a ``@property``::
class MyClass(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = Column(Integer, primary_key=True)
status = Column(String(50))
from sqlalchemy.ext.declarative import synonym_for
+
class MyClass(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = Column(Integer, primary_key=True)
status = Column(String(50))
from sqlalchemy.ext.hybrid import hybrid_property
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
firstname = Column(String(50))
lastname = Column(String(50))
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.sql import case
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
firstname = Column(String(50))
lastname = Column(String(50))
@fullname.expression
def fullname(cls):
- return case([
- (cls.firstname != None, cls.firstname + " " + cls.lastname),
- ], else_ = cls.lastname)
+ return case(
+ [
+ (cls.firstname != None, cls.firstname + " " + cls.lastname),
+ ],
+ else_=cls.lastname,
+ )
.. _mapper_column_property_sql_expressions:
from sqlalchemy.orm import column_property
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
firstname = Column(String(50))
lastname = Column(String(50))
Base = declarative_base()
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
id = Column(Integer, primary_key=True)
- user_id = Column(Integer, ForeignKey('user.id'))
+ user_id = Column(Integer, ForeignKey("user.id"))
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
address_count = column_property(
- select(func.count(Address.id)).
- where(Address.user_id==id).
- correlate_except(Address).
- scalar_subquery()
+ select(func.count(Address.id))
+ .where(Address.user_id == id)
+ .correlate_except(Address)
+ .scalar_subquery()
)
In the above example, we define a :func:`_expression.ScalarSelect` construct like the following::
stmt = (
- select(func.count(Address.id)).
- where(Address.user_id==id).
- correlate_except(Address).
- scalar_subquery()
+ select(func.count(Address.id))
+ .where(Address.user_id == id)
+ .correlate_except(Address)
+ .scalar_subquery()
)
Above, we first use :func:`_sql.select` to create a :class:`_sql.Select`
# only works if a declarative base class is in use
User.address_count = column_property(
- select(func.count(Address.id)).
- where(Address.user_id==User.id).
- scalar_subquery()
+ select(func.count(Address.id)).where(Address.user_id == User.id).scalar_subquery()
)
When using mapping styles that don't use :func:`_orm.declarative_base`,
reg = registry()
+
@reg.mapped
class User:
- __tablename__ = 'user'
+ __tablename__ = "user"
# ... additional mapping directives
# works for any kind of mapping
from sqlalchemy import inspect
+
inspect(User).add_property(
column_property(
- select(func.count(Address.id)).
- where(Address.user_id==User.id).
- scalar_subquery()
+ select(func.count(Address.id))
+ .where(Address.user_id == User.id)
+ .scalar_subquery()
)
)
from sqlalchemy import and_
+
class Author(Base):
# ...
book_count = column_property(
- select(func.count(books.c.id)
- ).where(
+ select(func.count(books.c.id))
+ .where(
and_(
- book_authors.c.author_id==authors.c.id,
- book_authors.c.book_id==books.c.id
+ book_authors.c.author_id == authors.c.id,
+ book_authors.c.book_id == books.c.id,
)
- ).scalar_subquery()
+ )
+ .scalar_subquery()
)
.. _mapper_column_property_sql_expressions_composed:
class File(Base):
- __tablename__ = 'file'
+ __tablename__ = "file"
id = Column(Integer, primary_key=True)
name = Column(String(64))
extension = Column(String(8))
- filename = column_property(name + '.' + extension)
- path = column_property('C:/' + filename.expression)
+ filename = column_property(name + "." + extension)
+ path = column_property("C:/" + filename.expression)
When the ``File`` class is used in expressions normally, the attributes
assigned to ``filename`` and ``path`` are usable directly. The use of the
:attr:`.ColumnProperty.expression` attribute is only necessary when using
the :class:`.ColumnProperty` directly within the mapping definition::
- q = session.query(File.path).filter(File.filename == 'foo.txt')
-
+ q = session.query(File.path).filter(File.filename == "foo.txt")
Using a plain descriptor
------------------------
from sqlalchemy.orm import object_session
from sqlalchemy import select, func
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
firstname = Column(String(50))
lastname = Column(String(50))
@property
def address_count(self):
- return object_session(self).\
- scalar(
- select(func.count(Address.id)).\
- where(Address.user_id==self.id)
- )
+ return object_session(self).scalar(
+ select(func.count(Address.id)).where(Address.user_id == self.id)
+ )
The plain descriptor approach is useful as a last resort, but is less performant
in the usual case than both the hybrid and column property approaches, in that
from sqlalchemy.orm import query_expression
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
x = Column(Integer)
y = Column(Integer)
SQL expression to be populated into ``A.expr``::
from sqlalchemy.orm import with_expression
- q = session.query(A).options(
- with_expression(A.expr, A.x + A.y))
+
+ q = session.query(A).options(with_expression(A.expr, A.x + A.y))
The :func:`.query_expression` mapping has these caveats:
To ensure the attribute is re-loaded, use :meth:`_orm.Query.populate_existing`::
- obj = session.query(A).populate_existing().options(
- with_expression(A.expr, some_expr)).first()
+ obj = (
+ session.query(A)
+ .populate_existing()
+ .options(with_expression(A.expr, some_expr))
+ .first()
+ )
* The query_expression value **does not refresh when the object is
expired**. Once the object is expired, either via :meth:`.Session.expire`
ad-hoc expression; that is, this won't work::
# wont work
- q = session.query(A).options(
- with_expression(A.expr, A.x + A.y)
- ).filter(A.expr > 5).order_by(A.expr)
+ q = (
+ session.query(A)
+ .options(with_expression(A.expr, A.x + A.y))
+ .filter(A.expr > 5)
+ .order_by(A.expr)
+ )
The ``A.expr`` expression will resolve to NULL in the above WHERE clause
and ORDER BY clause. To use the expression throughout the query, assign to a
variable and use that::
a_expr = A.x + A.y
- q = session.query(A).options(
- with_expression(A.expr, a_expr)
- ).filter(a_expr > 5).order_by(a_expr)
+ q = (
+ session.query(A)
+ .options(with_expression(A.expr, a_expr))
+ .filter(a_expr > 5)
+ .order_by(a_expr)
+ )
.. versionadded:: 1.2
it that way, as we illustrate here in a Declarative mapping::
class User(Base):
- __tablename__ = 'user'
- id = Column('user_id', Integer, primary_key=True)
- name = Column('user_name', String(50))
+ __tablename__ = "user"
+ id = Column("user_id", Integer, primary_key=True)
+ name = Column("user_name", String(50))
Where above ``User.id`` resolves to a column named ``user_id``
and ``User.name`` resolves to a column named ``user_name``.
to place the desired key in the :paramref:`_orm.mapper.properties`
dictionary with the desired key::
- mapper_registry.map_imperatively(User, user_table, properties={
- 'id': user_table.c.user_id,
- 'name': user_table.c.user_name,
- })
-
+ mapper_registry.map_imperatively(
+ User,
+ user_table,
+ properties={
+ "id": user_table.c.user_id,
+ "name": user_table.c.user_name,
+ },
+ )
.. _mapper_automated_reflection_schemes:
@event.listens_for(Base.metadata, "column_reflect")
def column_reflect(inspector, table, column_info):
# set column.key = "attr_<lower_case_name>"
- column_info['key'] = "attr_%s" % column_info['name'].lower()
+ column_info["key"] = "attr_%s" % column_info["name"].lower()
With the above event, the reflection of :class:`_schema.Column` objects will be intercepted
with our event that adds a new ".key" element, such as in a mapping as below::
class MyClass(Base):
- __table__ = Table("some_table", Base.metadata,
- autoload_with=some_engine)
+ __table__ = Table("some_table", Base.metadata, autoload_with=some_engine)
The approach also works with both the :class:`.DeferredReflection` base class
as well as with the :ref:`automap_toplevel` extension. For automap
from sqlalchemy.orm import column_property
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
name = column_property(Column(String(50)), active_history=True)
columns::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
firstname = Column(String(50))
lastname = Column(String(50))
metadata,
Column("user_id", String(40), nullable=False),
Column("group_id", String(40), nullable=False),
- UniqueConstraint("user_id", "group_id")
+ UniqueConstraint("user_id", "group_id"),
)
class GroupUsers(Base):
__table__ = group_users
- __mapper_args__ = {
- "primary_key": [group_users.c.user_id, group_users.c.group_id]
- }
+ __mapper_args__ = {"primary_key": [group_users.c.user_id, group_users.c.group_id]}
Above, the ``group_users`` table is an association table of some kind
with string columns ``user_id`` and ``group_id``, but no primary key is set up;
class User(Base):
__table__ = user_table
- __mapper_args__ = {
- 'include_properties' :['user_id', 'user_name']
- }
+ __mapper_args__ = {"include_properties": ["user_id", "user_name"]}
...will map the ``User`` class to the ``user_table`` table, only including
the ``user_id`` and ``user_name`` columns - the rest are not referenced.
class Address(Base):
__table__ = address_table
- __mapper_args__ = {
- 'exclude_properties' : ['street', 'city', 'state', 'zip']
- }
+ __mapper_args__ = {"exclude_properties": ["street", "city", "state", "zip"]}
...will map the ``Address`` class to the ``address_table`` table, including
all columns present except ``street``, ``city``, ``state``, and ``zip``.
class UserAddress(Base):
__table__ = user_table.join(addresses_table)
__mapper_args__ = {
- 'exclude_properties' :[address_table.c.id],
- 'primary_key' : [user_table.c.id]
+ "exclude_properties": [address_table.c.id],
+ "primary_key": [user_table.c.id],
}
.. note::
# an example mapping using the base
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String)
mapper_registry = registry()
user_table = Table(
- 'user',
+ "user",
mapper_registry.metadata,
- Column('id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('fullname', String(50)),
- Column('nickname', String(12))
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50)),
+ Column("fullname", String(50)),
+ Column("nickname", String(12)),
)
+
class User:
pass
- mapper_registry.map_imperatively(User, user_table)
+ mapper_registry.map_imperatively(User, user_table)
Information about mapped attributes, such as relationships to other classes, are provided
via the ``properties`` dictionary. The example below illustrates a second :class:`_schema.Table`
object, mapped to a class called ``Address``, then linked to ``User`` via :func:`_orm.relationship`::
- address = Table('address', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey('user.id')),
- Column('email_address', String(50))
- )
+ address = Table(
+ "address",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("user.id")),
+ Column("email_address", String(50)),
+ )
- mapper_registry.map_imperatively(User, user, properties={
- 'addresses' : relationship(Address, backref='user', order_by=address.c.id)
- })
+ mapper_registry.map_imperatively(
+ User,
+ user,
+ properties={
+ "addresses": relationship(Address, backref="user", order_by=address.c.id)
+ },
+ )
mapper_registry.map_imperatively(Address, address)
Base = declarative_base()
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(...)
name = Column(...)
An object of type ``User`` above will have a constructor which allows
``User`` objects to be created as::
- u1 = User(name='some name', fullname='some fullname')
+ u1 = User(name="some name", fullname="some fullname")
The above constructor may be customized by passing a Python callable to
the :paramref:`_orm.registry.constructor` parameter which provides the
mapper_registry = registry()
user_table = Table(
- 'user',
+ "user",
mapper_registry.metadata,
- Column('id', Integer, primary_key=True),
- Column('name', String(50))
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50)),
)
+
class User:
pass
+
mapper_registry.map_imperatively(User, user_table)
The above class, mapped imperatively as described at :ref:`orm_imperative_mapping`,
>>> insp.attrs.nickname.value
'nickname'
- >>> u1.nickname = 'new nickname'
+ >>> u1.nickname = "new nickname"
>>> insp.attrs.nickname.history
History(added=['new nickname'], unchanged=(), deleted=['nickname'])
multiple tables, complete with its own composite primary key, which can be
mapped in the same way as a :class:`_schema.Table`::
- from sqlalchemy import Table, Column, Integer, \
- String, MetaData, join, ForeignKey
+ from sqlalchemy import Table, Column, Integer, String, MetaData, join, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import column_property
metadata_obj = MetaData()
# define two Table objects
- user_table = Table('user', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('name', String),
- )
-
- address_table = Table('address', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey('user.id')),
- Column('email_address', String)
- )
+ user_table = Table(
+ "user",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("name", String),
+ )
+
+ address_table = Table(
+ "address",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("user.id")),
+ Column("email_address", String),
+ )
# define a join between them. This
# takes place across the user.id and address.user_id
from sqlalchemy import event
- @event.listens_for(PtoQ, 'before_update')
+
+ @event.listens_for(PtoQ, "before_update")
def receive_before_update(mapper, connection, target):
- if target.some_required_attr_on_q is None:
+ if target.some_required_attr_on_q is None:
connection.execute(q_table.insert(), {"id": target.id})
where above, a row is INSERTed into the ``q_table`` table by creating an
from sqlalchemy import select, func
- subq = select(
- func.count(orders.c.id).label('order_count'),
- func.max(orders.c.price).label('highest_order'),
- orders.c.customer_id
- ).group_by(orders.c.customer_id).subquery()
+ subq = (
+ select(
+ func.count(orders.c.id).label("order_count"),
+ func.max(orders.c.price).label("highest_order"),
+ orders.c.customer_id,
+ )
+ .group_by(orders.c.customer_id)
+ .subquery()
+ )
+
+ customer_select = (
+ select(customers, subq)
+ .join_from(customers, subq, customers.c.id == subq.c.customer_id)
+ .subquery()
+ )
- customer_select = select(customers, subq).join_from(
- customers, subq, customers.c.id == subq.c.customer_id
- ).subquery()
class Customer(Base):
__table__ = customer_select
value = Column(Integer)
+
someobject = session.query(SomeClass).get(5)
# set 'value' attribute to a SQL expression adding one
session = Session()
# execute a string statement
- result = session.execute("select * from table where id=:id", {'id':7})
+ result = session.execute("select * from table where id=:id", {"id": 7})
# execute a SQL expression construct
- result = session.execute(select(mytable).where(mytable.c.id==7))
+ result = session.execute(select(mytable).where(mytable.c.id == 7))
The current :class:`~sqlalchemy.engine.Connection` held by the
:class:`~sqlalchemy.orm.session.Session` is accessible using the
# need to specify mapper or class when executing
result = session.execute(
text("select * from table where id=:id"),
- {'id':7},
- bind_arguments={'mapper': MyMappedClass}
+ {"id": 7},
+ bind_arguments={"mapper": MyMappedClass},
)
result = session.execute(
- select(mytable).where(mytable.c.id==7),
- bind_arguments={'mapper': MyMappedClass}
+ select(mytable).where(mytable.c.id == 7), bind_arguments={"mapper": MyMappedClass}
)
connection = session.connection(MyMappedClass)
"default" case; the attribute will be omitted from the INSERT statement::
class MyObject(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = Column(Integer, primary_key=True)
data = Column(String(50), nullable=True)
+
obj = MyObject(id=1)
session.add(obj)
session.commit() # INSERT with the 'data' column omitted; the database
- # itself will persist this as the NULL value
+ # itself will persist this as the NULL value
Omitting a column from the INSERT means that the column will
have the NULL value set, *unless* the column has a default set up,
defaults::
class MyObject(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = Column(Integer, primary_key=True)
data = Column(String(50), nullable=True, server_default="default")
+
obj = MyObject(id=1)
session.add(obj)
session.commit() # INSERT with the 'data' column omitted; the database
- # itself will persist this as the value 'default'
+ # itself will persist this as the value 'default'
However, in the ORM, even if one assigns the Python value ``None`` explicitly
to the object, this is treated the **same** as though the value were never
assigned::
class MyObject(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = Column(Integer, primary_key=True)
data = Column(String(50), nullable=True, server_default="default")
+
obj = MyObject(id=1, data=None)
session.add(obj)
session.commit() # INSERT with the 'data' column explicitly set to None;
- # the ORM still omits it from the statement and the
- # database will still persist this as the value 'default'
+ # the ORM still omits it from the statement and the
+ # database will still persist this as the value 'default'
The above operation will persist into the ``data`` column the
server default value of ``"default"`` and not SQL NULL, even though ``None``
obj = MyObject(id=1, data=null())
session.add(obj)
session.commit() # INSERT with the 'data' column explicitly set as null();
- # the ORM uses this directly, bypassing all client-
- # and server-side defaults, and the database will
- # persist this as the NULL value
+ # the ORM uses this directly, bypassing all client-
+ # and server-side defaults, and the database will
+ # persist this as the NULL value
The :obj:`_expression.null` SQL construct always translates into the SQL
NULL value being directly present in the target INSERT statement.
value and pass it through, rather than omitting it as a "missing" value::
class MyObject(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = Column(Integer, primary_key=True)
data = Column(
- String(50).evaluates_none(), # indicate that None should always be passed
- nullable=True, server_default="default")
+ String(50).evaluates_none(), # indicate that None should always be passed
+ nullable=True,
+ server_default="default",
+ )
+
obj = MyObject(id=1, data=None)
session.add(obj)
session.commit() # INSERT with the 'data' column explicitly set to None;
- # the ORM uses this directly, bypassing all client-
- # and server-side defaults, and the database will
- # persist this as the NULL value
+ # the ORM uses this directly, bypassing all client-
+ # and server-side defaults, and the database will
+ # persist this as the NULL value
.. topic:: Evaluating None
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime(), server_default=func.now())
:paramref:`.orm.mapper.eager_defaults`::
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime(), server_default=func.now())
the :class:`.Sequence` construct::
class MyOracleModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = Column(Integer, Sequence("my_sequence"), primary_key=True)
data = Column(String(50))
SQL Server TIMESTAMP column as the primary key, which generates values automatically::
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
timestamp = Column(TIMESTAMP(), server_default=FetchedValue(), primary_key=True)
pre-execute-supported default using the "NOW()" SQL function::
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
timestamp = Column(DateTime(), default=func.now(), primary_key=True)
from sqlalchemy import cast, Binary
+
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
- timestamp = Column(
- TIMESTAMP(),
- default=cast(func.now(), Binary),
- primary_key=True)
+ timestamp = Column(TIMESTAMP(), default=cast(func.now(), Binary), primary_key=True)
Above, in addition to selecting the "NOW()" function, we additionally make
use of the :class:`.Binary` datatype in conjunction with :func:`.cast` so that
by passing this as the ``type_`` parameter::
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
timestamp = Column(
DateTime,
- default=func.datetime('now', 'localtime', type_=DateTime),
- primary_key=True)
+ default=func.datetime("now", "localtime", type_=DateTime),
+ primary_key=True,
+ )
The above mapping upon INSERT will look like:
to ensure that the fetch occurs::
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = Column(Integer, primary_key=True)
created = Column(DateTime(), default=func.now(), server_default=FetchedValue())
- updated = Column(DateTime(), onupdate=func.now(), server_default=FetchedValue(), server_onupdate=FetchedValue())
+ updated = Column(
+ DateTime(),
+ onupdate=func.now(),
+ server_default=FetchedValue(),
+ server_onupdate=FetchedValue(),
+ )
__mapper_args__ = {"eager_defaults": True}
from sqlalchemy import update
- stmt = update(User).where(User.name == "squidward").values(name="spongebob").\
- returning(User.id)
+ stmt = (
+ update(User)
+ .where(User.name == "squidward")
+ .values(name="spongebob")
+ .returning(User.id)
+ )
for row in session.execute(stmt):
print(f"id: {row.id}")
statement in an ORM context using the :meth:`_sql.Select.from_statement`
method::
- stmt = update(User).where(User.name == "squidward").values(name="spongebob").\
- returning(User)
+ stmt = (
+ update(User)
+ .where(User.name == "squidward")
+ .values(name="spongebob")
+ .returning(User)
+ )
orm_stmt = select(User).from_statement(stmt).execution_options(populate_existing=True)
index_elements=[User.name], set_=dict(fullname=stmt.excluded.fullname)
).returning(User)
- orm_stmt = (
- select(User)
- .from_statement(stmt)
- .execution_options(populate_existing=True)
- )
+ orm_stmt = select(User).from_statement(stmt).execution_options(populate_existing=True)
for user in session.execute(
orm_stmt,
).scalars():
emit SQL on behalf of a particular kind of mapped class in order to locate
the appropriate source of database connectivity::
- engine1 = create_engine('postgresql://db1')
- engine2 = create_engine('postgresql://db2')
+ engine1 = create_engine("postgresql://db1")
+ engine2 = create_engine("postgresql://db2")
Session = sessionmaker()
# bind User operations to engine 1, Account operations to engine 2
- Session.configure(binds={User:engine1, Account:engine2})
+ Session.configure(binds={User: engine1, Account: engine2})
session = Session()
::
engines = {
- 'leader':create_engine("sqlite:///leader.db"),
- 'other':create_engine("sqlite:///other.db"),
- 'follower1':create_engine("sqlite:///follower1.db"),
- 'follower2':create_engine("sqlite:///follower2.db"),
+ "leader": create_engine("sqlite:///leader.db"),
+ "other": create_engine("sqlite:///other.db"),
+ "follower1": create_engine("sqlite:///follower1.db"),
+ "follower2": create_engine("sqlite:///follower2.db"),
}
from sqlalchemy.sql import Update, Delete
from sqlalchemy.orm import Session, sessionmaker
import random
+
class RoutingSession(Session):
def get_bind(self, mapper=None, clause=None):
if mapper and issubclass(mapper.class_, MyOtherClass):
- return engines['other']
+ return engines["other"]
elif self._flushing or isinstance(clause, (Update, Delete)):
- return engines['leader']
+ return engines["leader"]
else:
- return engines[
- random.choice(['follower1','follower2'])
- ]
+ return engines[random.choice(["follower1", "follower2"])]
The above :class:`.Session` class is plugged in using the ``class_``
argument to :class:`.sessionmaker`::
transaction, like any other::
s = Session()
- objects = [
- User(name="u1"),
- User(name="u2"),
- User(name="u3")
- ]
+ objects = [User(name="u1"), User(name="u2"), User(name="u3")]
s.bulk_save_objects(objects)
For :meth:`.Session.bulk_insert_mappings`, and :meth:`.Session.bulk_update_mappings`,
dictionaries are passed::
- s.bulk_insert_mappings(User,
- [dict(name="u1"), dict(name="u2"), dict(name="u3")]
- )
+ s.bulk_insert_mappings(User, [dict(name="u1"), dict(name="u2"), dict(name="u3")])
.. seealso::
>>> user_table = Table(
... "user_account",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('name', String(30)),
- ... Column('fullname', String)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("name", String(30)),
+ ... Column("fullname", String),
... )
>>> from sqlalchemy import ForeignKey
>>> address_table = Table(
... "address",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('user_id', None, ForeignKey('user_account.id')),
- ... Column('email_address', String, nullable=False)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("user_id", None, ForeignKey("user_account.id")),
+ ... Column("email_address", String, nullable=False),
... )
>>> orders_table = Table(
... "user_order",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('user_id', None, ForeignKey('user_account.id')),
- ... Column('email_address', String, nullable=False)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("user_id", None, ForeignKey("user_account.id")),
+ ... Column("email_address", String, nullable=False),
... )
>>> order_items_table = Table(
... "order_items",
... metadata_obj,
... Column("order_id", ForeignKey("user_order.id"), primary_key=True),
- ... Column("item_id", ForeignKey("item.id"), primary_key=True)
+ ... Column("item_id", ForeignKey("item.id"), primary_key=True),
... )
>>> items_table = Table(
... "item",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('name', String),
- ... Column('description', String)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("name", String),
+ ... Column("description", String),
... )
>>> metadata_obj.create_all(engine)
BEGIN (implicit)
... orders = relationship("Order")
...
... def __repr__(self):
- ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
+ ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
>>> class Address(Base):
... __table__ = address_table
>>> conn = engine.connect()
>>> from sqlalchemy.orm import Session
>>> session = Session(conn)
- >>> session.add_all([
- ... User(name="spongebob", fullname="Spongebob Squarepants", addresses=[
- ... Address(email_address="spongebob@sqlalchemy.org")
- ... ]),
- ... User(name="sandy", fullname="Sandy Cheeks", addresses=[
- ... Address(email_address="sandy@sqlalchemy.org"),
- ... Address(email_address="squirrel@squirrelpower.org")
- ... ]),
- ... User(name="patrick", fullname="Patrick Star", addresses=[
- ... Address(email_address="pat999@aol.com")
- ... ]),
- ... User(name="squidward", fullname="Squidward Tentacles", addresses=[
- ... Address(email_address="stentcl@sqlalchemy.org")
- ... ]),
- ... User(name="ehkrabs", fullname="Eugene H. Krabs"),
- ... ])
+ >>> session.add_all(
+ ... [
+ ... User(
+ ... name="spongebob",
+ ... fullname="Spongebob Squarepants",
+ ... addresses=[Address(email_address="spongebob@sqlalchemy.org")],
+ ... ),
+ ... User(
+ ... name="sandy",
+ ... fullname="Sandy Cheeks",
+ ... addresses=[
+ ... Address(email_address="sandy@sqlalchemy.org"),
+ ... Address(email_address="squirrel@squirrelpower.org"),
+ ... ],
+ ... ),
+ ... User(
+ ... name="patrick",
+ ... fullname="Patrick Star",
+ ... addresses=[Address(email_address="pat999@aol.com")],
+ ... ),
+ ... User(
+ ... name="squidward",
+ ... fullname="Squidward Tentacles",
+ ... addresses=[Address(email_address="stentcl@sqlalchemy.org")],
+ ... ),
+ ... User(name="ehkrabs", fullname="Eugene H. Krabs"),
+ ... ]
+ ... )
>>> session.commit()
BEGIN ...
>>> conn.begin()
returns a :class:`_sql.Select` object::
>>> from sqlalchemy import select
- >>> stmt = select(User).where(User.name == 'spongebob')
+ >>> stmt = select(User).where(User.name == "spongebob")
To invoke a :class:`_sql.Select` with the ORM, it is passed to
:meth:`_orm.Session.execute`::
>>> stmt = select(User, Address).join(User.addresses).order_by(User.id, Address.id)
{sql}>>> for row in session.execute(stmt):
- ... print(f"{row.User.name} {row.Address.email_address}")
+ ... print(f"{row.User.name} {row.Address.email_address}")
SELECT user_account.id, user_account.name, user_account.fullname,
address.id AS id_1, address.user_id, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
as table columns are used::
{sql}>>> result = session.execute(
- ... select(User.name, Address.email_address).
- ... join(User.addresses).
- ... order_by(User.id, Address.id)
+ ... select(User.name, Address.email_address)
+ ... .join(User.addresses)
+ ... .order_by(User.id, Address.id)
... )
SELECT user_account.name, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
>>> from sqlalchemy.orm import Bundle
>>> stmt = select(
- ... Bundle("user", User.name, User.fullname),
- ... Bundle("email", Address.email_address)
+ ... Bundle("user", User.name, User.fullname), Bundle("email", Address.email_address)
... ).join_from(User, Address)
{sql}>>> for row in session.execute(stmt):
... print(f"{row.user.name} {row.user.fullname} {row.email.email_address}")
>>> from sqlalchemy import union_all
>>> u = union_all(
- ... select(User).where(User.id < 2),
- ... select(User).where(User.id == 3)
+ ... select(User).where(User.id < 2), select(User).where(User.id == 3)
... ).order_by(User.id)
>>> stmt = select(User).from_statement(u)
>>> for user_obj in session.execute(stmt).scalars():
and order by criteria based on its exported columns::
>>> subq = union_all(
- ... select(User).where(User.id < 2),
- ... select(User).where(User.id == 3)
+ ... select(User).where(User.id < 2), select(User).where(User.id == 3)
... ).subquery()
>>> user_alias = aliased(User, subq)
>>> stmt = select(user_alias).order_by(user_alias.id)
relationship, it results in two separate JOIN elements, for a total of three
JOIN elements in the resulting SQL::
- >>> stmt = (
- ... select(User).
- ... join(User.orders).
- ... join(Order.items)
- ... )
+ >>> stmt = select(User).join(User.orders).join(Order.items)
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
other elements to join FROM the ``User`` entity above, for example adding
on the ``User.addresses`` relationship to our chain of joins::
- >>> stmt = (
- ... select(User).
- ... join(User.orders).
- ... join(Order.items).
- ... join(User.addresses)
- ... )
+ >>> stmt = select(User).join(User.orders).join(Order.items).join(User.addresses)
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
as the ON clause to be passed explicitly. A example that includes
a SQL expression as the ON clause is as follows::
- >>> stmt = select(User).join(Address, User.id==Address.user_id)
+ >>> stmt = select(User).join(Address, User.id == Address.user_id)
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account JOIN address ON user_account.id = address.user_id
>>> a1 = aliased(Address)
>>> a2 = aliased(Address)
>>> stmt = (
- ... select(User).
- ... join(a1, User.addresses).
- ... join(a2, User.addresses).
- ... where(a1.email_address == 'ed@foo.com').
- ... where(a2.email_address == 'ed@bar.com')
+ ... select(User)
+ ... .join(a1, User.addresses)
+ ... .join(a2, User.addresses)
+ ... .where(a1.email_address == "ed@foo.com")
+ ... .where(a2.email_address == "ed@bar.com")
... )
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
this method would be::
>>> stmt = (
- ... select(User).
- ... join(User.addresses.of_type(a1)).
- ... join(User.addresses.of_type(a2)).
- ... where(a1.email_address == 'ed@foo.com').
- ... where(a2.email_address == 'ed@bar.com')
+ ... select(User)
+ ... .join(User.addresses.of_type(a1))
+ ... .join(User.addresses.of_type(a2))
+ ... .where(a1.email_address == "ed@foo.com")
+ ... .where(a2.email_address == "ed@bar.com")
... )
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
by ``AND``, the first one being the natural join along the foreign key,
and the second being a custom limiting criteria::
- >>> stmt = (
- ... select(User).
- ... join(User.addresses.and_(Address.email_address != 'foo@bar.com'))
- ... )
+ >>> stmt = select(User).join(User.addresses.and_(Address.email_address != "foo@bar.com"))
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
object using :meth:`_sql.Select.subquery`, which may then be used as the
target of the :meth:`_sql.Select.join` method::
- >>> subq = (
- ... select(Address).
- ... where(Address.email_address == 'pat999@aol.com').
- ... subquery()
- ... )
+ >>> subq = select(Address).where(Address.email_address == "pat999@aol.com").subquery()
>>> stmt = select(User).join(subq, User.id == subq.c.user_id)
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
to it using :class:`_orm.aliased` refer to distinct sets of columns::
>>> user_address_subq = (
- ... select(User.id, User.name, Address.id, Address.email_address).
- ... join_from(User, Address).
- ... where(Address.email_address.in_(['pat999@aol.com', 'squirrel@squirrelpower.org'])).
- ... subquery()
+ ... select(User.id, User.name, Address.id, Address.email_address)
+ ... .join_from(User, Address)
+ ... .where(Address.email_address.in_(["pat999@aol.com", "squirrel@squirrelpower.org"]))
+ ... .subquery()
... )
>>> user_alias = aliased(User, user_address_subq, name="user")
>>> address_alias = aliased(Address, user_address_subq, name="address")
- >>> stmt = select(user_alias, address_alias).where(user_alias.name == 'sandy')
+ >>> stmt = select(user_alias, address_alias).where(user_alias.name == "sandy")
>>> for row in session.execute(stmt):
... print(f"{row.user} {row.address}")
{opensql}SELECT anon_1.id, anon_1.name, anon_1.id_1, anon_1.email_address
:class:`_sql.Select` is not in line with what we want to join from,
the :meth:`_sql.Select.join_from` method may be used::
- >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == 'sandy')
+ >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == "sandy")
>>> print(stmt)
SELECT address.id, address.user_id, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
in the form ``<join from>, <onclause>``, or ``<join from>, <join to>,
[<onclause>]``::
- >>> stmt = select(Address).join_from(User, Address).where(User.name == 'sandy')
+ >>> stmt = select(Address).join_from(User, Address).where(User.name == "sandy")
>>> print(stmt)
SELECT address.id, address.user_id, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
be used::
- >>> stmt = select(Address).select_from(User).join(Address).where(User.name == 'sandy')
+ >>> stmt = select(Address).select_from(User).join(Address).where(User.name == "sandy")
>>> print(stmt)
SELECT address.id, address.user_id, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
such a :class:`_sql.Join` object. Therefore we can see the contents
of :meth:`_sql.Select.select_from` being overridden in a case like this::
- >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == 'sandy')
+ >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == "sandy")
>>> print(stmt)
SELECT address.id, address.user_id, address.email_address
FROM address JOIN user_account ON user_account.id = address.user_id
>>>
>>> j = address_table.join(user_table, user_table.c.id == address_table.c.user_id)
>>> stmt = (
- ... select(address_table).select_from(user_table).select_from(j).
- ... where(user_table.c.name == 'sandy')
+ ... select(address_table)
+ ... .select_from(user_table)
+ ... .select_from(j)
+ ... .where(user_table.c.name == "sandy")
... )
>>> print(stmt)
SELECT address.id, address.user_id, address.email_address
returned is a list of dictionaries::
>>> from pprint import pprint
- >>> user_alias = aliased(User, name='user2')
+ >>> user_alias = aliased(User, name="user2")
>>> stmt = select(User, User.id, user_alias)
>>> pprint(stmt.column_descriptions)
[{'aliased': False,
.. sourcecode:: pycon+sql
>>> stmt = (
- ... select(Address)
- ... .join(Address.user)
- ... .where(User.name == "sandy")
- ... .where(Address.email_address == "sandy@sqlalchemy.org")
+ ... select(Address)
+ ... .join(Address.user)
+ ... .where(User.name == "sandy")
+ ... .where(Address.email_address == "sandy@sqlalchemy.org")
... )
>>> sandy_address = session.scalars(stmt).one()
{opensql}SELECT address.id, address.email_address, address.user_id
[...] ('patrick',)
{stop}
- >>> patrick.addresses.append(
- ... Address(email_address="patrickstar@sqlalchemy.org")
- ... )
+ >>> patrick.addresses.append(Address(email_address="patrickstar@sqlalchemy.org"))
{opensql}SELECT address.id AS address_id, address.email_address AS address_email_address, address.user_id AS address_user_id
FROM address
WHERE ? = address.user_id
Base = declarative_base()
+
class Entry(Base):
- __tablename__ = 'entry'
+ __tablename__ = "entry"
entry_id = Column(Integer, primary_key=True)
- widget_id = Column(Integer, ForeignKey('widget.widget_id'))
+ widget_id = Column(Integer, ForeignKey("widget.widget_id"))
name = Column(String(50))
+
class Widget(Base):
- __tablename__ = 'widget'
+ __tablename__ = "widget"
widget_id = Column(Integer, primary_key=True)
- favorite_entry_id = Column(Integer,
- ForeignKey('entry.entry_id',
- name="fk_favorite_entry"))
+ favorite_entry_id = Column(
+ Integer, ForeignKey("entry.entry_id", name="fk_favorite_entry")
+ )
name = Column(String(50))
- entries = relationship(Entry, primaryjoin=
- widget_id==Entry.widget_id)
- favorite_entry = relationship(Entry,
- primaryjoin=
- favorite_entry_id==Entry.entry_id,
- post_update=True)
+ entries = relationship(Entry, primaryjoin=widget_id == Entry.widget_id)
+ favorite_entry = relationship(
+ Entry, primaryjoin=favorite_entry_id == Entry.entry_id, post_update=True
+ )
When a structure against the above configuration is flushed, the "widget" row will be
INSERTed minus the "favorite_entry_id" value, then all the "entry" rows will
.. sourcecode:: pycon+sql
- >>> w1 = Widget(name='somewidget')
- >>> e1 = Entry(name='someentry')
+ >>> w1 = Widget(name="somewidget")
+ >>> e1 = Entry(name="someentry")
>>> w1.favorite_entry = e1
>>> w1.entries = [e1]
>>> session.add_all([w1, e1])
that also refers to this ``Widget``. We can use a composite foreign key,
as illustrated below::
- from sqlalchemy import Integer, ForeignKey, String, \
- Column, UniqueConstraint, ForeignKeyConstraint
+ from sqlalchemy import (
+ Integer,
+ ForeignKey,
+ String,
+ Column,
+ UniqueConstraint,
+ ForeignKeyConstraint,
+ )
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
+
class Entry(Base):
- __tablename__ = 'entry'
+ __tablename__ = "entry"
entry_id = Column(Integer, primary_key=True)
- widget_id = Column(Integer, ForeignKey('widget.widget_id'))
+ widget_id = Column(Integer, ForeignKey("widget.widget_id"))
name = Column(String(50))
- __table_args__ = (
- UniqueConstraint("entry_id", "widget_id"),
- )
+ __table_args__ = (UniqueConstraint("entry_id", "widget_id"),)
+
class Widget(Base):
- __tablename__ = 'widget'
+ __tablename__ = "widget"
- widget_id = Column(Integer, autoincrement='ignore_fk', primary_key=True)
+ widget_id = Column(Integer, autoincrement="ignore_fk", primary_key=True)
favorite_entry_id = Column(Integer)
name = Column(String(50))
ForeignKeyConstraint(
["widget_id", "favorite_entry_id"],
["entry.widget_id", "entry.entry_id"],
- name="fk_favorite_entry"
+ name="fk_favorite_entry",
),
)
- entries = relationship(Entry, primaryjoin=
- widget_id==Entry.widget_id,
- foreign_keys=Entry.widget_id)
- favorite_entry = relationship(Entry,
- primaryjoin=
- favorite_entry_id==Entry.entry_id,
- foreign_keys=favorite_entry_id,
- post_update=True)
+ entries = relationship(
+ Entry, primaryjoin=widget_id == Entry.widget_id, foreign_keys=Entry.widget_id
+ )
+ favorite_entry = relationship(
+ Entry,
+ primaryjoin=favorite_entry_id == Entry.entry_id,
+ foreign_keys=favorite_entry_id,
+ post_update=True,
+ )
The above mapping features a composite :class:`_schema.ForeignKeyConstraint`
bridging the ``widget_id`` and ``favorite_entry_id`` columns. To ensure
illustrates this is::
class User(Base):
- __tablename__ = 'user'
- __table_args__ = {'mysql_engine': 'InnoDB'}
+ __tablename__ = "user"
+ __table_args__ = {"mysql_engine": "InnoDB"}
username = Column(String(50), primary_key=True)
fullname = Column(String(100))
class Address(Base):
- __tablename__ = 'address'
- __table_args__ = {'mysql_engine': 'InnoDB'}
+ __tablename__ = "address"
+ __table_args__ = {"mysql_engine": "InnoDB"}
email = Column(String(50), primary_key=True)
- username = Column(String(50),
- ForeignKey('user.username', onupdate="cascade")
- )
+ username = Column(String(50), ForeignKey("user.username", onupdate="cascade"))
Above, we illustrate ``onupdate="cascade"`` on the :class:`_schema.ForeignKey`
object, and we also illustrate the ``mysql_engine='InnoDB'`` setting
Our previous mapping using ``passive_updates=False`` looks like::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
username = Column(String(50), primary_key=True)
fullname = Column(String(100))
# does not implement ON UPDATE CASCADE
addresses = relationship("Address", passive_updates=False)
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
email = Column(String(50), primary_key=True)
- username = Column(String(50), ForeignKey('user.username'))
+ username = Column(String(50), ForeignKey("user.username"))
Key limitations of ``passive_updates=False`` include:
class called ``Node``, representing a tree structure::
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = Column(Integer, primary_key=True)
- parent_id = Column(Integer, ForeignKey('node.id'))
+ parent_id = Column(Integer, ForeignKey("node.id"))
data = Column(String(50))
children = relationship("Node")
that indicate those which should be considered to be "remote"::
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = Column(Integer, primary_key=True)
- parent_id = Column(Integer, ForeignKey('node.id'))
+ parent_id = Column(Integer, ForeignKey("node.id"))
data = Column(String(50))
parent = relationship("Node", remote_side=[id])
relationship using the :func:`.backref` function::
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = Column(Integer, primary_key=True)
- parent_id = Column(Integer, ForeignKey('node.id'))
+ parent_id = Column(Integer, ForeignKey("node.id"))
data = Column(String(50))
- children = relationship("Node",
- backref=backref('parent', remote_side=[id])
- )
+ children = relationship("Node", backref=backref("parent", remote_side=[id]))
There are several examples included with SQLAlchemy illustrating
self-referential strategies; these include :ref:`examples_adjacencylist` and
to a specific folder within that account::
class Folder(Base):
- __tablename__ = 'folder'
+ __tablename__ = "folder"
__table_args__ = (
- ForeignKeyConstraint(
- ['account_id', 'parent_id'],
- ['folder.account_id', 'folder.folder_id']),
+ ForeignKeyConstraint(
+ ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"]
+ ),
)
account_id = Column(Integer, primary_key=True)
parent_id = Column(Integer)
name = Column(String)
- parent_folder = relationship("Folder",
- backref="child_folders",
- remote_side=[account_id, folder_id]
- )
+ parent_folder = relationship(
+ "Folder", backref="child_folders", remote_side=[account_id, folder_id]
+ )
Above, we pass ``account_id`` into the :paramref:`_orm.relationship.remote_side` list.
:func:`_orm.relationship` recognizes that the ``account_id`` column here
Querying of self-referential structures works like any other query::
# get all nodes named 'child2'
- session.query(Node).filter(Node.data=='child2')
+ session.query(Node).filter(Node.data == "child2")
However extra care is needed when attempting to join along
the foreign key from one level of the tree to the next. In SQL,
from sqlalchemy.orm import aliased
nodealias = aliased(Node)
- session.query(Node).filter(Node.data=='subchild1').\
- join(Node.parent.of_type(nodealias)).\
- filter(nodealias.data=="child2").\
- all()
+ session.query(Node).filter(Node.data == "subchild1").join(
+ Node.parent.of_type(nodealias)
+ ).filter(nodealias.data == "child2").all()
{opensql}SELECT node.id AS node_id,
node.parent_id AS node_parent_id,
node.data AS node_data
.. sourcecode:: python+sql
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = Column(Integer, primary_key=True)
- parent_id = Column(Integer, ForeignKey('node.id'))
+ parent_id = Column(Integer, ForeignKey("node.id"))
data = Column(String(50))
- children = relationship("Node",
- lazy="joined",
- join_depth=2)
+ children = relationship("Node", lazy="joined", join_depth=2)
+
session.query(Node).all()
{opensql}SELECT node_1.id AS node_1_id,
# an Engine, which the Session will use for connection
# resources
- engine = create_engine('postgresql://scott:tiger@localhost/')
+ engine = create_engine("postgresql://scott:tiger@localhost/")
# create session and add objects
with Session(engine) as session:
# create session and add objects
with Session(engine) as session:
with session.begin():
- session.add(some_object)
- session.add(some_other_object)
+ session.add(some_object)
+ session.add(some_other_object)
# inner context calls session.commit(), if there were no exceptions
# outer context calls session.close()
# an Engine, which the Session will use for connection
# resources, typically in module scope
- engine = create_engine('postgresql://scott:tiger@localhost/')
+ engine = create_engine("postgresql://scott:tiger@localhost/")
# a sessionmaker(), also in the same scope as the engine
Session = sessionmaker(engine)
# an Engine, which the Session will use for connection
# resources
- engine = create_engine('postgresql://scott:tiger@localhost/')
+ engine = create_engine("postgresql://scott:tiger@localhost/")
# a sessionmaker(), also in the same scope as the engine
Session = sessionmaker(engine)
other ORM constructs such as an :func:`_orm.aliased` construct::
# query from a class
- results = session.query(User).filter_by(name='ed').all()
+ results = session.query(User).filter_by(name="ed").all()
# query with multiple classes, returns tuples
- results = session.query(User, Address).join('addresses').filter_by(name='ed').all()
+ results = session.query(User, Address).join("addresses").filter_by(name="ed").all()
# query using orm-columns, also returns tuples
results = session.query(User.name, User.fullname).all()
result = session.execute(statement).scalars().all()
# query with multiple classes
- statement = select(User, Address).join('addresses').filter_by(name='ed')
+ statement = select(User, Address).join("addresses").filter_by(name="ed")
# list of tuples
result = session.execute(statement).all()
(i.e. have been removed from a session) may be re-associated with a session
using this method::
- user1 = User(name='user1')
- user2 = User(name='user2')
+ user1 = User(name="user1")
+ user2 = User(name="user2")
session.add(user1)
session.add(user2)
- session.commit() # write changes to the database
+ session.commit() # write changes to the database
To add a list of items to the session at once, use
:meth:`~.Session.add_all`::
To emit an ORM-enabled UPDATE in :term:`1.x style`, the :meth:`_query.Query.update` method
may be used::
- session.query(User).filter(User.name == "squidward").\
- update({"name": "spongebob"}, synchronize_session="fetch")
+ session.query(User).filter(User.name == "squidward").update(
+ {"name": "spongebob"}, synchronize_session="fetch"
+ )
Above, an UPDATE will be emitted against all rows that match the name
"squidward" and be updated to the name "spongebob". The
from sqlalchemy import update
- stmt = update(User).where(User.name == "squidward").values(name="spongebob").\
- execution_options(synchronize_session="fetch")
+ stmt = (
+ update(User)
+ .where(User.name == "squidward")
+ .values(name="spongebob")
+ .execution_options(synchronize_session="fetch")
+ )
result = session.execute(stmt)
ORM-enabled delete, :term:`1.x style`::
- session.query(User).filter(User.name == "squidward").\
- delete(synchronize_session="fetch")
+ session.query(User).filter(User.name == "squidward").delete(synchronize_session="fetch")
ORM-enabled delete, :term:`2.0 style`::
from sqlalchemy import delete
- stmt = delete(User).where(User.name == "squidward").execution_options(synchronize_session="fetch")
+ stmt = (
+ delete(User)
+ .where(User.name == "squidward")
+ .execution_options(synchronize_session="fetch")
+ )
session.execute(stmt)
### this is the **wrong way to do it** ###
+
class ThingOne(object):
def go(self):
session = Session()
session.rollback()
raise
+
class ThingTwo(object):
def go(self):
session = Session()
session.rollback()
raise
+
def run_my_program():
ThingOne().go()
ThingTwo().go()
### this is a **better** (but not the only) way to do it ###
+
class ThingOne(object):
def go(self, session):
session.query(FooBar).update({"x": 5})
+
class ThingTwo(object):
def go(self, session):
session.query(Widget).update({"q": 18})
+
def run_my_program():
with Session() as session:
with session.begin():
ThingOne().go(session)
ThingTwo().go(session)
-
.. versionchanged:: 1.4 The :class:`_orm.Session` may be used as a context
manager without the use of external helper functions.
The newer :ref:`core_inspection_toplevel` system can also be used::
from sqlalchemy import inspect
+
session = inspect(someobject).session
.. _session_faq_threadsafe:
Session = sessionmaker(engine, future=True)
+
@event.listens_for(Session, "do_orm_execute")
def _do_orm_execute(orm_execute_state):
if orm_execute_state.is_select:
# ORDER BY if so
col_descriptions = orm_execute_state.statement.column_descriptions
- if col_descriptions[0]['entity'] is MyEntity:
+ if col_descriptions[0]["entity"] is MyEntity:
orm_execute_state.statement = statement.order_by(MyEntity.name)
The above example illustrates some simple modifications to SELECT statements.
Session = sessionmaker(engine, future=True)
+
@event.listens_for(Session, "do_orm_execute")
def _do_orm_execute(orm_execute_state):
if (
- orm_execute_state.is_select and
- not orm_execute_state.is_column_load and
- not orm_execute_state.is_relationship_load
+ orm_execute_state.is_select
+ and not orm_execute_state.is_column_load
+ and not orm_execute_state.is_relationship_load
):
orm_execute_state.statement = orm_execute_state.statement.options(
with_loader_criteria(MyEntity.public == True)
import datetime
+
class HasTimestamp(object):
timestamp = Column(DateTime, default=datetime.datetime.now)
__tablename__ = "some_entity"
id = Column(Integer, primary_key=True)
+
class SomeOtherEntity(HasTimestamp, Base):
__tablename__ = "some_entity"
id = Column(Integer, primary_key=True)
-
The above classes ``SomeEntity`` and ``SomeOtherEntity`` will each have a column
``timestamp`` that defaults to the current date and time. An event may be used
to intercept all objects that extend from ``HasTimestamp`` and filter their
@event.listens_for(Session, "do_orm_execute")
def _do_orm_execute(orm_execute_state):
if (
- orm_execute_state.is_select
- and not orm_execute_state.is_column_load
- and not orm_execute_state.is_relationship_load
+ orm_execute_state.is_select
+ and not orm_execute_state.is_column_load
+ and not orm_execute_state.is_relationship_load
):
one_month_ago = datetime.datetime.today() - datetime.timedelta(months=1)
with_loader_criteria(
HasTimestamp,
lambda cls: cls.timestamp >= one_month_ago,
- include_aliases=True
+ include_aliases=True,
)
)
cache = {}
+
@event.listens_for(Session, "do_orm_execute")
def _do_orm_execute(orm_execute_state):
if "my_cache_key" in orm_execute_state.execution_options:
With the above hook in place, an example of using the cache would look like::
- stmt = select(User).where(User.name == 'sandy').execution_options(my_cache_key="key_sandy")
+ stmt = (
+ select(User).where(User.name == "sandy").execution_options(my_cache_key="key_sandy")
+ )
result = session.execute(stmt)
session = Session()
- @event.listens_for(session, 'transient_to_pending')
+
+ @event.listens_for(session, "transient_to_pending")
def object_is_pending(session, obj):
print("new pending: %s" % obj)
maker = sessionmaker()
- @event.listens_for(maker, 'transient_to_pending')
+
+ @event.listens_for(maker, "transient_to_pending")
def object_is_pending(session, obj):
print("new pending: %s" % obj)
Base = declarative_base()
+
@event.listens_for(Base, "init", propagate=True)
def intercept_init(instance, args, kwargs):
print("new transient: %s" % instance)
-
Transient to Pending
^^^^^^^^^^^^^^^^^^^^
def intercept_transient_to_pending(session, object_):
print("transient to pending: %s" % object_)
-
Pending to Persistent
^^^^^^^^^^^^^^^^^^^^^
def intercept_loaded_as_persistent(session, object_):
print("object loaded into persistent state: %s" % object_)
-
Persistent to Transient
^^^^^^^^^^^^^^^^^^^^^^^
def intercept_persistent_to_deleted(session, object_):
print("object was DELETEd, is now in deleted state: %s" % object_)
-
Deleted to Detached
^^^^^^^^^^^^^^^^^^^
def intercept_deleted_to_detached(session, object_):
print("deleted to detached: %s" % object_)
-
.. note::
While the object is in the deleted state, the :attr:`.InstanceState.deleted`
def intercept_detached_to_persistent(session, object_):
print("object became persistent again: %s" % object_)
-
Deleted to Persistent
^^^^^^^^^^^^^^^^^^^^^
from sqlalchemy import event
+
def strong_reference_session(session):
@event.listens_for(session, "pending_to_persistent")
@event.listens_for(session, "deleted_to_persistent")
@event.listens_for(session, "detached_to_persistent")
@event.listens_for(session, "loaded_as_persistent")
def strong_ref_object(sess, instance):
- if 'refs' not in sess.info:
- sess.info['refs'] = refs = set()
+ if "refs" not in sess.info:
+ sess.info["refs"] = refs = set()
else:
- refs = sess.info['refs']
+ refs = sess.info["refs"]
refs.add(instance)
-
@event.listens_for(session, "persistent_to_detached")
@event.listens_for(session, "persistent_to_deleted")
@event.listens_for(session, "persistent_to_transient")
def deref_object(sess, instance):
- sess.info['refs'].discard(instance)
+ sess.info["refs"].discard(instance)
Above, we intercept the :meth:`.SessionEvents.pending_to_persistent`,
:meth:`.SessionEvents.detached_to_persistent`,
maker = sessionmaker()
strong_reference_session(maker)
-
.. _unitofwork_merging:
Merging
Lets use the canonical example of the User and Address objects::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
addresses = relationship("Address", backref="user")
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
id = Column(Integer, primary_key=True)
email_address = Column(String(50), nullable=False)
- user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
+ user_id = Column(Integer, ForeignKey("user.id"), nullable=False)
Assume a ``User`` object with one ``Address``, already persistent::
- >>> u1 = User(name='ed', addresses=[Address(email_address='ed@ed.com')])
+ >>> u1 = User(name="ed", addresses=[Address(email_address="ed@ed.com")])
>>> session.add(u1)
>>> session.commit()
that is in the :term:`persistent` state. For example, if we load an object
as follows::
- user = session.query(User).filter_by(name='user1').first()
+ user = session.query(User).filter_by(name="user1").first()
The above ``User`` object is persistent, and has a series of attributes
present; if we were to look inside its ``__dict__``, we'd see that state
is that all un-flushed changes on an object are discarded. That is,
if we were to modify an attribute on our ``User``::
- >>> user.name = 'user2'
+ >>> user.name = "user2"
but then we call :meth:`~.Session.expire` without first calling :meth:`~.Session.flush`,
our pending value of ``'user2'`` is discarded::
attributes to be marked as expired::
# expire only attributes obj1.attr1, obj1.attr2
- session.expire(obj1, ['attr1', 'attr2'])
+ session.expire(obj1, ["attr1", "attr2"])
The :meth:`.Session.expire_all` method allows us to essentially call
:meth:`.Session.expire` on all objects contained within the :class:`.Session`
be that of a column-mapped attribute::
# reload obj1.attr1, obj1.attr2
- session.refresh(obj1, ['attr1', 'attr2'])
+ session.refresh(obj1, ["attr1", "attr2"])
.. tip::
Below, assume we start with a :class:`_orm.Session`::
from sqlalchemy.orm import Session
+
session = Session(engine)
We can now run operations within a demarcated transaction using a context
session.add(u1)
session.add(u2)
- nested = session.begin_nested() # establish a savepoint
+ nested = session.begin_nested() # establish a savepoint
session.add(u3)
nested.rollback() # rolls back u3, keeps u1 and u2
for record in records:
try:
with session.begin_nested():
- session.merge(record)
+ session.merge(record)
except:
- print("Skipped record %s" % record)
+ print("Skipped record %s" % record)
session.commit()
When the context manager yielded by :meth:`_orm.Session.begin_nested`
[
{"data": "some data one"},
{"data": "some data two"},
- {"data": "some data three"}
- ]
+ {"data": "some data three"},
+ ],
)
conn.commit()
Session = sessionmaker(engine, future=True)
with Session() as session:
- session.add_all([
- SomeClass(data="some data one"),
- SomeClass(data="some data two"),
- SomeClass(data="some data three")
- ])
+ session.add_all(
+ [
+ SomeClass(data="some data one"),
+ SomeClass(data="some data two"),
+ SomeClass(data="some data three"),
+ ]
+ )
session.commit()
Begin Once
[
{"data": "some data one"},
{"data": "some data two"},
- {"data": "some data three"}
- ]
+ {"data": "some data three"},
+ ],
)
# commits and closes automatically
Session = sessionmaker(engine, future=True)
with Session.begin() as session:
- session.add_all([
- SomeClass(data="some data one"),
- SomeClass(data="some data two"),
- SomeClass(data="some data three")
- ])
+ session.add_all(
+ [
+ SomeClass(data="some data one"),
+ SomeClass(data="some data two"),
+ SomeClass(data="some data three"),
+ ]
+ )
# commits and closes automatically
-
Nested Transaction
~~~~~~~~~~~~~~~~~~~~
[
{"data": "some data one"},
{"data": "some data two"},
- {"data": "some data three"}
- ]
+ {"data": "some data three"},
+ ],
)
savepoint.commit() # or rollback
with Session.begin() as session:
savepoint = session.begin_nested()
- session.add_all([
- SomeClass(data="some data one"),
- SomeClass(data="some data two"),
- SomeClass(data="some data three")
- ])
+ session.add_all(
+ [
+ SomeClass(data="some data one"),
+ SomeClass(data="some data two"),
+ SomeClass(data="some data three"),
+ ]
+ )
savepoint.commit() # or rollback
# commits automatically
-
-
-
.. _session_autocommit:
.. _session_explicit_begin:
try:
item1 = session.query(Item).get(1)
item2 = session.query(Item).get(2)
- item1.foo = 'bar'
- item2.bar = 'foo'
+ item1.foo = "bar"
+ item2.bar = "foo"
session.commit()
except:
session.rollback()
with session.begin():
item1 = session.query(Item).get(1)
item2 = session.query(Item).get(2)
- item1.foo = 'bar'
- item2.bar = 'foo'
+ item1.foo = "bar"
+ item2.bar = "foo"
The :meth:`_orm.Session.begin` method and the session's "autobegin" process
use the same sequence of steps to begin the transaction. This includes
import contextlib
+
@contextlib.contextmanager
def transaction(session):
if not session.in_transaction():
else:
yield
-
The above context manager may be used in the same way the
"subtransaction" flag works, such as in the following example::
with transaction(session):
method_b(session)
+
# method_b also starts a transaction, but when
# called from method_a participates in the ongoing
# transaction.
def method_b(session):
with transaction(session):
- session.add(SomeObject('bat', 'lala'))
+ session.add(SomeObject("bat", "lala"))
+
Session = sessionmaker(engine)
def method_a(session):
method_b(session)
+
def method_b(session):
- session.add(SomeObject('bat', 'lala'))
+ session.add(SomeObject("bat", "lala"))
+
Session = sessionmaker(engine)
interacting with transactions not managed by SQLAlchemy. To use two phase
transactions set the flag ``twophase=True`` on the session::
- engine1 = create_engine('postgresql://db1')
- engine2 = create_engine('postgresql://db2')
+ engine1 = create_engine("postgresql://db1")
+ engine2 = create_engine("postgresql://db2")
Session = sessionmaker(twophase=True)
# bind User operations to engine 1, Account operations to engine 2
- Session.configure(binds={User:engine1, Account:engine2})
+ Session.configure(binds={User: engine1, Account: engine2})
session = Session()
# before committing both transactions
session.commit()
-
.. _session_transaction_isolation:
Setting Transaction Isolation Levels / DBAPI AUTOCOMMIT
from sqlalchemy.orm import sessionmaker
eng = create_engine(
- "postgresql://scott:tiger@localhost/test",
- isolation_level='REPEATABLE READ'
+ "postgresql://scott:tiger@localhost/test", isolation_level="REPEATABLE READ"
)
Session = sessionmaker(eng)
-
Another option, useful if there are to be two engines with different isolation
levels at once, is to use the :meth:`_engine.Engine.execution_options` method,
which will produce a shallow copy of the original :class:`_engine.Engine` which
transactional_session = sessionmaker(eng)
autocommit_session = sessionmaker(autocommit_engine)
-
Above, both "``eng``" and ``"autocommit_engine"`` share the same dialect and
connection pool. However the "AUTOCOMMIT" mode will be set upon connections
when they are acquired from the ``autocommit_engine``. The two
with Session() as session:
session.bind_mapper(User, autocommit_engine)
-
Setting Isolation for Individual Transactions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# call connection() with options before any other operations proceed.
# this will procure a new connection from the bound engine and begin a real
# database transaction.
- sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'})
+ sess.connection(execution_options={"isolation_level": "SERIALIZABLE"})
# ... work with session in SERIALIZABLE isolation level...
# call connection() with options before any other operations proceed.
# this will procure a new connection from the bound engine and begin a
# real database transaction.
- sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'})
+ sess.connection(execution_options={"isolation_level": "SERIALIZABLE"})
# ... work with session in SERIALIZABLE isolation level...
# outside the block, the transaction has been committed. the connection is
# released and reverted to its previous isolation level.
-
-
Tracking Transaction State with Events
--------------------------------------
# global application scope. create Session class, engine
Session = sessionmaker()
- engine = create_engine('postgresql://...')
+ engine = create_engine("postgresql://...")
+
class SomeTest(TestCase):
def setUp(self):
# begin a non-ORM transaction
self.trans = self.connection.begin()
-
# bind an individual Session to the connection
self.session = Session(bind=self.connection)
-
### optional ###
# if the database supports SAVEPOINT (SQLite needs special
A quick check to verify that we are on at least **version 1.4** of SQLAlchemy::
>>> import sqlalchemy
- >>> sqlalchemy.__version__ # doctest:+SKIP
+ >>> sqlalchemy.__version__ # doctest:+SKIP
1.4.0
Connecting
use :func:`~sqlalchemy.create_engine`::
>>> from sqlalchemy import create_engine
- >>> engine = create_engine('sqlite:///:memory:', echo=True)
+ >>> engine = create_engine("sqlite:///:memory:", echo=True)
The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is
accomplished via Python's standard ``logging`` module. With it enabled, we'll
>>> from sqlalchemy import Column, Integer, String
>>> class User(Base):
- ... __tablename__ = 'users'
+ ... __tablename__ = "users"
...
... id = Column(Integer, primary_key=True)
... name = Column(String)
... nickname = Column(String)
...
... def __repr__(self):
- ... return "<User(name='%s', fullname='%s', nickname='%s')>" % (
- ... self.name, self.fullname, self.nickname)
+ ... return "<User(name='%s', fullname='%s', nickname='%s')>" % (
+ ... self.name,
+ ... self.fullname,
+ ... self.nickname,
+ ... )
.. sidebar:: Tip
this information for a specific table is called the :class:`_schema.Table` object, and here Declarative has made
one for us. We can see this object by inspecting the ``__table__`` attribute::
- >>> User.__table__ # doctest: +NORMALIZE_WHITESPACE
+ >>> User.__table__ # doctest: +NORMALIZE_WHITESPACE
Table('users', MetaData(),
Column('id', Integer(), table=<users>, primary_key=True, nullable=False),
Column('name', String(), table=<users>),
without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct::
from sqlalchemy import Sequence
- Column(Integer, Sequence('user_id_seq'), primary_key=True)
+
+ Column(Integer, Sequence("user_id_seq"), primary_key=True)
A full, foolproof :class:`~sqlalchemy.schema.Table` generated via our declarative
mapping is therefore::
class User(Base):
- __tablename__ = 'users'
- id = Column(Integer, Sequence('user_id_seq'), primary_key=True)
+ __tablename__ = "users"
+ id = Column(Integer, Sequence("user_id_seq"), primary_key=True)
name = Column(String(50))
fullname = Column(String(50))
nickname = Column(String(50))
def __repr__(self):
return "<User(name='%s', fullname='%s', nickname='%s')>" % (
- self.name, self.fullname, self.nickname)
+ self.name,
+ self.fullname,
+ self.nickname,
+ )
We include this more verbose table definition separately
to highlight the difference between a minimal construct geared primarily
With mappings complete, let's now create and inspect a ``User`` object::
- >>> ed_user = User(name='ed', fullname='Ed Jones', nickname='edsnickname')
+ >>> ed_user = User(name="ed", fullname="Ed Jones", nickname="edsnickname")
>>> ed_user.name
'ed'
>>> ed_user.nickname
To persist our ``User`` object, we :meth:`~.Session.add` it to our :class:`~sqlalchemy.orm.session.Session`::
- >>> ed_user = User(name='ed', fullname='Ed Jones', nickname='edsnickname')
+ >>> ed_user = User(name="ed", fullname="Ed Jones", nickname="edsnickname")
>>> session.add(ed_user)
At this point, we say that the instance is **pending**; no SQL has yet been issued
.. sourcecode:: python+sql
- {sql}>>> our_user = session.query(User).filter_by(name='ed').first() # doctest:+NORMALIZE_WHITESPACE
+ {sql}>>> our_user = (
+ ... session.query(User).filter_by(name="ed").first()
+ ... ) # doctest:+NORMALIZE_WHITESPACE
BEGIN (implicit)
INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?)
[...] ('ed', 'Ed Jones', 'edsnickname')
.. sourcecode:: python+sql
- >>> session.add_all([
- ... User(name='wendy', fullname='Wendy Williams', nickname='windy'),
- ... User(name='mary', fullname='Mary Contrary', nickname='mary'),
- ... User(name='fred', fullname='Fred Flintstone', nickname='freddy')])
+ >>> session.add_all(
+ ... [
+ ... User(name="wendy", fullname="Wendy Williams", nickname="windy"),
+ ... User(name="mary", fullname="Mary Contrary", nickname="mary"),
+ ... User(name="fred", fullname="Fred Flintstone", nickname="freddy"),
+ ... ]
+ ... )
Also, we've decided Ed's nickname isn't that great, so lets change it:
.. sourcecode:: python+sql
- >>> ed_user.nickname = 'eddie'
+ >>> ed_user.nickname = "eddie"
The :class:`~sqlalchemy.orm.session.Session` is paying attention. It knows,
for example, that ``Ed Jones`` has been modified:
.. sourcecode:: python+sql
- {sql}>>> ed_user.id # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> ed_user.id # doctest: +NORMALIZE_WHITESPACE
BEGIN (implicit)
SELECT users.id AS users_id,
users.name AS users_name,
.. sourcecode:: python+sql
- >>> ed_user.name = 'Edwardo'
+ >>> ed_user.name = "Edwardo"
and we'll add another erroneous user, ``fake_user``:
.. sourcecode:: python+sql
- >>> fake_user = User(name='fakeuser', fullname='Invalid', nickname='12345')
+ >>> fake_user = User(name="fakeuser", fullname="Invalid", nickname="12345")
>>> session.add(fake_user)
Querying the session, we can see that they're flushed into the current transaction:
.. sourcecode:: python+sql
- {sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all()
+ {sql}>>> session.query(User).filter(User.name.in_(["Edwardo", "fakeuser"])).all()
UPDATE users SET name=? WHERE users.id = ?
[...] ('Edwardo', 1)
INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?)
.. sourcecode:: python+sql
- {sql}>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all()
+ {sql}>>> session.query(User).filter(User.name.in_(["ed", "fakeuser"])).all()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
.. sourcecode:: python+sql
{sql}>>> for row in session.query(User, User.name).all():
- ... print(row.User, row.name)
+ ... print(row.User, row.name)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
.. sourcecode:: python+sql
- {sql}>>> for row in session.query(User.name.label('name_label')).all():
- ... print(row.name_label)
+ {sql}>>> for row in session.query(User.name.label("name_label")).all():
+ ... print(row.name_label)
SELECT users.name AS name_label
FROM users
[...] (){stop}
.. sourcecode:: python+sql
>>> from sqlalchemy.orm import aliased
- >>> user_alias = aliased(User, name='user_alias')
+ >>> user_alias = aliased(User, name="user_alias")
{sql}>>> for row in session.query(user_alias, user_alias.name).all():
- ... print(row.user_alias)
+ ... print(row.user_alias)
SELECT user_alias.id AS user_alias_id,
user_alias.name AS user_alias_name,
user_alias.fullname AS user_alias_fullname,
.. sourcecode:: python+sql
{sql}>>> for u in session.query(User).order_by(User.id)[1:3]:
- ... print(u)
+ ... print(u)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
.. sourcecode:: python+sql
- {sql}>>> for name, in session.query(User.name).\
- ... filter_by(fullname='Ed Jones'):
- ... print(name)
+ {sql}>>> for (name,) in session.query(User.name).filter_by(fullname="Ed Jones"):
+ ... print(name)
SELECT users.name AS users_name FROM users
WHERE users.fullname = ?
[...] ('Ed Jones',)
.. sourcecode:: python+sql
- {sql}>>> for name, in session.query(User.name).\
- ... filter(User.fullname=='Ed Jones'):
- ... print(name)
+ {sql}>>> for (name,) in session.query(User.name).filter(User.fullname == "Ed Jones"):
+ ... print(name)
SELECT users.name AS users_name FROM users
WHERE users.fullname = ?
[...] ('Ed Jones',)
.. sourcecode:: python+sql
- {sql}>>> for user in session.query(User).\
- ... filter(User.name=='ed').\
- ... filter(User.fullname=='Ed Jones'):
- ... print(user)
+ {sql}>>> for user in (
+ ... session.query(User).filter(User.name == "ed").filter(User.fullname == "Ed Jones")
+ ... ):
+ ... print(user)
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
* :meth:`equals <.ColumnOperators.__eq__>`::
- query.filter(User.name == 'ed')
+ query.filter(User.name == "ed")
* :meth:`not equals <.ColumnOperators.__ne__>`::
- query.filter(User.name != 'ed')
+ query.filter(User.name != "ed")
* :meth:`LIKE <.ColumnOperators.like>`::
* :meth:`IN <.ColumnOperators.in_>`::
- query.filter(User.name.in_(['ed', 'wendy', 'jack']))
+ query.filter(User.name.in_(["ed", "wendy", "jack"]))
# works with query objects too:
- query.filter(User.name.in_(
- session.query(User.name).filter(User.name.like('%ed%'))
- ))
+ query.filter(User.name.in_(session.query(User.name).filter(User.name.like("%ed%"))))
# use tuple_() for composite (multi-column) queries
from sqlalchemy import tuple_
+
query.filter(
- tuple_(User.name, User.nickname).\
- in_([('ed', 'edsnickname'), ('wendy', 'windy')])
+ tuple_(User.name, User.nickname).in_([("ed", "edsnickname"), ("wendy", "windy")])
)
* :meth:`NOT IN <.ColumnOperators.not_in>`::
- query.filter(~User.name.in_(['ed', 'wendy', 'jack']))
+ query.filter(~User.name.in_(["ed", "wendy", "jack"]))
* :meth:`IS NULL <.ColumnOperators.is_>`::
.. sourcecode:: python+sql
- >>> query = session.query(User).filter(User.name.like('%ed')).order_by(User.id)
+ >>> query = session.query(User).filter(User.name.like("%ed")).order_by(User.id)
{sql}>>> query.all()
SELECT users.id AS users_id,
users.name AS users_name,
.. sourcecode:: python+sql
- >>> query = session.query(User.id).filter(User.name == 'ed').\
- ... order_by(User.id)
+ >>> query = session.query(User.id).filter(User.name == "ed").order_by(User.id)
{sql}>>> query.scalar()
SELECT users.id AS users_id
FROM users
.. sourcecode:: python+sql
>>> from sqlalchemy import text
- {sql}>>> for user in session.query(User).\
- ... filter(text("id<224")).\
- ... order_by(text("id")).all():
+ {sql}>>> for user in session.query(User).filter(text("id<224")).order_by(text("id")).all():
... print(user.name)
SELECT users.id AS users_id,
users.name AS users_name,
.. sourcecode:: python+sql
- {sql}>>> session.query(User).filter(text("id<:value and name=:name")).\
- ... params(value=224, name='fred').order_by(User.id).one()
+ {sql}>>> session.query(User).filter(text("id<:value and name=:name")).params(
+ ... value=224, name="fred"
+ ... ).order_by(User.id).one()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
.. sourcecode:: python+sql
- {sql}>>> session.query(User).from_statement(
- ... text("SELECT * FROM users where name=:name")).params(name='ed').all()
+ {sql}>>> session.query(User).from_statement(text("SELECT * FROM users where name=:name")).params(
+ ... name="ed"
+ ... ).all()
SELECT * FROM users where name=?
[...] ('ed',)
{stop}[<User(name='ed', fullname='Ed Jones', nickname='eddie')>]
.. sourcecode:: python+sql
- >>> stmt = text("SELECT name, id, fullname, nickname "
- ... "FROM users where name=:name")
+ >>> stmt = text("SELECT name, id, fullname, nickname " "FROM users where name=:name")
>>> stmt = stmt.columns(User.name, User.id, User.fullname, User.nickname)
- {sql}>>> session.query(User).from_statement(stmt).params(name='ed').all()
+ {sql}>>> session.query(User).from_statement(stmt).params(name="ed").all()
SELECT name, id, fullname, nickname FROM users where name=?
[...] ('ed',)
{stop}[<User(name='ed', fullname='Ed Jones', nickname='eddie')>]
>>> stmt = text("SELECT name, id FROM users where name=:name")
>>> stmt = stmt.columns(User.name, User.id)
- {sql}>>> session.query(User.id, User.name).\
- ... from_statement(stmt).params(name='ed').all()
+ {sql}>>> session.query(User.id, User.name).from_statement(stmt).params(name="ed").all()
SELECT name, id FROM users where name=?
[...] ('ed',)
{stop}[(1, u'ed')]
.. sourcecode:: python+sql
- {sql}>>> session.query(User).filter(User.name.like('%ed')).count()
+ {sql}>>> session.query(User).filter(User.name.like("%ed")).count()
SELECT count(*) AS count_1
FROM (SELECT users.id AS users_id,
users.name AS users_name,
.. sourcecode:: python+sql
- {sql}>>> session.query(func.count('*')).select_from(User).scalar()
+ {sql}>>> session.query(func.count("*")).select_from(User).scalar()
SELECT count(?) AS count_1
FROM users
[...] ('*',)
>>> from sqlalchemy.orm import relationship
>>> class Address(Base):
- ... __tablename__ = 'addresses'
+ ... __tablename__ = "addresses"
... id = Column(Integer, primary_key=True)
... email_address = Column(String, nullable=False)
- ... user_id = Column(Integer, ForeignKey('users.id'))
+ ... user_id = Column(Integer, ForeignKey("users.id"))
...
... user = relationship("User", back_populates="addresses")
...
... def __repr__(self):
... return "<Address(email_address='%s')>" % self.email_address
- >>> User.addresses = relationship(
- ... "Address", order_by=Address.id, back_populates="user")
+ >>> User.addresses = relationship("Address", order_by=Address.id, back_populates="user")
The above class introduces the :class:`_schema.ForeignKey` construct, which is a
directive applied to :class:`_schema.Column` that indicates that values in this
.. sourcecode:: python+sql
- >>> jack = User(name='jack', fullname='Jack Bean', nickname='gjffdd')
+ >>> jack = User(name="jack", fullname="Jack Bean", nickname="gjffdd")
>>> jack.addresses
[]
.. sourcecode:: python+sql
>>> jack.addresses = [
- ... Address(email_address='jack@google.com'),
- ... Address(email_address='j25@yahoo.com')]
+ ... Address(email_address="jack@google.com"),
+ ... Address(email_address="j25@yahoo.com"),
+ ... ]
When using a bidirectional relationship, elements added in one direction
automatically become visible in the other direction. This behavior occurs
.. sourcecode:: python+sql
- {sql}>>> jack = session.query(User).\
- ... filter_by(name='jack').one()
+ {sql}>>> jack = session.query(User).filter_by(name="jack").one()
BEGIN (implicit)
SELECT users.id AS users_id,
users.name AS users_name,
.. sourcecode:: python+sql
- {sql}>>> for u, a in session.query(User, Address).\
- ... filter(User.id==Address.user_id).\
- ... filter(Address.email_address=='jack@google.com').\
- ... all():
+ {sql}>>> for u, a in (
+ ... session.query(User, Address)
+ ... .filter(User.id == Address.user_id)
+ ... .filter(Address.email_address == "jack@google.com")
+ ... .all()
+ ... ):
... print(u)
... print(a)
SELECT users.id AS users_id,
.. sourcecode:: python+sql
- {sql}>>> session.query(User).join(Address).\
- ... filter(Address.email_address=='jack@google.com').\
- ... all()
+ {sql}>>> session.query(User).join(Address).filter(
+ ... Address.email_address == "jack@google.com"
+ ... ).all()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
were no foreign keys, or several, :meth:`_query.Query.join`
works better when one of the following forms are used::
- query.join(Address, User.id==Address.user_id) # explicit condition
- query.join(User.addresses) # specify relationship from left to right
- query.join(Address, User.addresses) # same, with explicit target
- query.join(User.addresses.and_(Address.name != 'foo')) # use relationship + additional ON criteria
+ query.join(Address, User.id == Address.user_id) # explicit condition
+ query.join(User.addresses) # specify relationship from left to right
+ query.join(Address, User.addresses) # same, with explicit target
+ query.join(
+ User.addresses.and_(Address.name != "foo")
+ ) # use relationship + additional ON criteria
As you would expect, the same idea is used for "outer" joins, using the
:meth:`_query.Query.outerjoin` function::
- query.outerjoin(User.addresses) # LEFT OUTER JOIN
+ query.outerjoin(User.addresses) # LEFT OUTER JOIN
The reference documentation for :meth:`_query.Query.join` contains detailed information
and examples of the calling styles accepted by this method; :meth:`_query.Query.join`
query = session.query(User, Address).select_from(Address).join(User)
-
.. _ormtutorial_aliases:
Using Aliases
>>> from sqlalchemy.orm import aliased
>>> adalias1 = aliased(Address)
>>> adalias2 = aliased(Address)
- {sql}>>> for username, email1, email2 in \
- ... session.query(User.name, adalias1.email_address, adalias2.email_address).\
- ... join(User.addresses.of_type(adalias1)).\
- ... join(User.addresses.of_type(adalias2)).\
- ... filter(adalias1.email_address=='jack@google.com').\
- ... filter(adalias2.email_address=='j25@yahoo.com'):
+ {sql}>>> for username, email1, email2 in (
+ ... session.query(User.name, adalias1.email_address, adalias2.email_address)
+ ... .join(User.addresses.of_type(adalias1))
+ ... .join(User.addresses.of_type(adalias2))
+ ... .filter(adalias1.email_address == "jack@google.com")
+ ... .filter(adalias2.email_address == "j25@yahoo.com")
+ ... ):
... print(username, email1, email2)
SELECT users.name AS users_name,
addresses_1.email_address AS addresses_1_email_address,
construct, which are described in :ref:`sqlexpression_toplevel`::
>>> from sqlalchemy.sql import func
- >>> stmt = session.query(Address.user_id, func.count('*').\
- ... label('address_count')).\
- ... group_by(Address.user_id).subquery()
+ >>> stmt = (
+ ... session.query(Address.user_id, func.count("*").label("address_count"))
+ ... .group_by(Address.user_id)
+ ... .subquery()
+ ... )
The ``func`` keyword generates SQL functions, and the ``subquery()`` method on
:class:`~sqlalchemy.orm.query.Query` produces a SQL expression construct
.. sourcecode:: python+sql
- {sql}>>> for u, count in session.query(User, stmt.c.address_count).\
- ... outerjoin(stmt, User.id==stmt.c.user_id).order_by(User.id):
+ {sql}>>> for u, count in (
+ ... session.query(User, stmt.c.address_count)
+ ... .outerjoin(stmt, User.id == stmt.c.user_id)
+ ... .order_by(User.id)
+ ... ):
... print(u, count)
SELECT users.id AS users_id,
users.name AS users_name,
.. sourcecode:: python+sql
- {sql}>>> stmt = session.query(Address).\
- ... filter(Address.email_address != 'j25@yahoo.com').\
- ... subquery()
+ {sql}>>> stmt = (
+ ... session.query(Address).filter(Address.email_address != "j25@yahoo.com").subquery()
+ ... )
>>> addr_alias = aliased(Address, stmt)
- >>> for user, address in session.query(User, addr_alias).\
- ... join(addr_alias, User.addresses):
+ >>> for user, address in session.query(User, addr_alias).join(addr_alias, User.addresses):
... print(user)
... print(address)
SELECT users.id AS users_id,
.. sourcecode:: python+sql
>>> from sqlalchemy.sql import exists
- >>> stmt = exists().where(Address.user_id==User.id)
- {sql}>>> for name, in session.query(User.name).filter(stmt):
+ >>> stmt = exists().where(Address.user_id == User.id)
+ {sql}>>> for (name,) in session.query(User.name).filter(stmt):
... print(name)
SELECT users.name AS users_name
FROM users
.. sourcecode:: python+sql
- {sql}>>> for name, in session.query(User.name).\
- ... filter(User.addresses.any()):
+ {sql}>>> for (name,) in session.query(User.name).filter(User.addresses.any()):
... print(name)
SELECT users.name AS users_name
FROM users
.. sourcecode:: python+sql
- {sql}>>> for name, in session.query(User.name).\
- ... filter(User.addresses.any(Address.email_address.like('%google%'))):
+ {sql}>>> for (name,) in session.query(User.name).filter(
+ ... User.addresses.any(Address.email_address.like("%google%"))
+ ... ):
... print(name)
SELECT users.name AS users_name
FROM users
.. sourcecode:: python+sql
- {sql}>>> session.query(Address).\
- ... filter(~Address.user.has(User.name=='jack')).all()
+ {sql}>>> session.query(Address).filter(~Address.user.has(User.name == "jack")).all()
SELECT addresses.id AS addresses_id,
addresses.email_address AS addresses_email_address,
addresses.user_id AS addresses_user_id
* :meth:`~.RelationshipProperty.Comparator.any` (used for collections)::
- query.filter(User.addresses.any(Address.email_address == 'bar'))
+ query.filter(User.addresses.any(Address.email_address == "bar"))
# also takes keyword arguments:
- query.filter(User.addresses.any(email_address='bar'))
+ query.filter(User.addresses.any(email_address="bar"))
* :meth:`~.RelationshipProperty.Comparator.has` (used for scalar references)::
- query.filter(Address.user.has(name='ed'))
+ query.filter(Address.user.has(name="ed"))
* :meth:`_query.Query.with_parent` (used for any relationship)::
- session.query(Address).with_parent(someuser, 'addresses')
+ session.query(Address).with_parent(someuser, "addresses")
Eager Loading
=============
.. sourcecode:: python+sql
>>> from sqlalchemy.orm import selectinload
- {sql}>>> jack = session.query(User).\
- ... options(selectinload(User.addresses)).\
- ... filter_by(name='jack').one()
+ {sql}>>> jack = (
+ ... session.query(User)
+ ... .options(selectinload(User.addresses))
+ ... .filter_by(name="jack")
+ ... .one()
+ ... )
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
>>> from sqlalchemy.orm import joinedload
- {sql}>>> jack = session.query(User).\
- ... options(joinedload(User.addresses)).\
- ... filter_by(name='jack').one()
+ {sql}>>> jack = (
+ ... session.query(User).options(joinedload(User.addresses)).filter_by(name="jack").one()
+ ... )
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
.. sourcecode:: python+sql
>>> from sqlalchemy.orm import contains_eager
- {sql}>>> jacks_addresses = session.query(Address).\
- ... join(Address.user).\
- ... filter(User.name=='jack').\
- ... options(contains_eager(Address.user)).\
- ... all()
+ {sql}>>> jacks_addresses = (
+ ... session.query(Address)
+ ... .join(Address.user)
+ ... .filter(User.name == "jack")
+ ... .options(contains_eager(Address.user))
+ ... .all()
+ ... )
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
.. sourcecode:: python+sql
>>> session.delete(jack)
- {sql}>>> session.query(User).filter_by(name='jack').count()
+ {sql}>>> session.query(User).filter_by(name="jack").count()
UPDATE addresses SET user_id=? WHERE addresses.id = ?
[...] ((None, 1), (None, 2))
DELETE FROM users WHERE users.id = ?
.. sourcecode:: python+sql
{sql}>>> session.query(Address).filter(
- ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com'])
- ... ).count()
+ ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"])
+ ... ).count()
SELECT count(*) AS count_1
FROM (SELECT addresses.id AS addresses_id,
addresses.email_address AS addresses_email_address,
including the cascade configuration (we'll leave the constructor out too)::
>>> class User(Base):
- ... __tablename__ = 'users'
+ ... __tablename__ = "users"
...
... id = Column(Integer, primary_key=True)
... name = Column(String)
... fullname = Column(String)
... nickname = Column(String)
...
- ... addresses = relationship("Address", back_populates='user',
- ... cascade="all, delete, delete-orphan")
+ ... addresses = relationship(
+ ... "Address", back_populates="user", cascade="all, delete, delete-orphan"
+ ... )
...
... def __repr__(self):
- ... return "<User(name='%s', fullname='%s', nickname='%s')>" % (
- ... self.name, self.fullname, self.nickname)
+ ... return "<User(name='%s', fullname='%s', nickname='%s')>" % (
+ ... self.name,
+ ... self.fullname,
+ ... self.nickname,
+ ... )
Then we recreate ``Address``, noting that in this case we've created
the ``Address.user`` relationship via the ``User`` class already::
>>> class Address(Base):
- ... __tablename__ = 'addresses'
+ ... __tablename__ = "addresses"
... id = Column(Integer, primary_key=True)
... email_address = Column(String, nullable=False)
- ... user_id = Column(Integer, ForeignKey('users.id'))
+ ... user_id = Column(Integer, ForeignKey("users.id"))
... user = relationship("User", back_populates="addresses")
...
... def __repr__(self):
# only one address remains
{sql}>>> session.query(Address).filter(
- ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com'])
+ ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"])
... ).count()
DELETE FROM addresses WHERE addresses.id = ?
[...] (2,)
>>> session.delete(jack)
- {sql}>>> session.query(User).filter_by(name='jack').count()
+ {sql}>>> session.query(User).filter_by(name="jack").count()
DELETE FROM addresses WHERE addresses.id = ?
[...] (1,)
DELETE FROM users WHERE users.id = ?
{stop}0
{sql}>>> session.query(Address).filter(
- ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com'])
+ ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"])
... ).count()
SELECT count(*) AS count_1
FROM (SELECT addresses.id AS addresses_id,
>>> from sqlalchemy import Table, Text
>>> # association table
- >>> post_keywords = Table('post_keywords', Base.metadata,
- ... Column('post_id', ForeignKey('posts.id'), primary_key=True),
- ... Column('keyword_id', ForeignKey('keywords.id'), primary_key=True)
+ >>> post_keywords = Table(
+ ... "post_keywords",
+ ... Base.metadata,
+ ... Column("post_id", ForeignKey("posts.id"), primary_key=True),
+ ... Column("keyword_id", ForeignKey("keywords.id"), primary_key=True),
... )
Above, we can see declaring a :class:`_schema.Table` directly is a little different
table as an association table::
>>> class BlogPost(Base):
- ... __tablename__ = 'posts'
+ ... __tablename__ = "posts"
...
... id = Column(Integer, primary_key=True)
- ... user_id = Column(Integer, ForeignKey('users.id'))
+ ... user_id = Column(Integer, ForeignKey("users.id"))
... headline = Column(String(255), nullable=False)
... body = Column(Text)
...
... # many to many BlogPost<->Keyword
- ... keywords = relationship('Keyword',
- ... secondary=post_keywords,
- ... back_populates='posts')
+ ... keywords = relationship("Keyword", secondary=post_keywords, back_populates="posts")
...
... def __init__(self, headline, body, author):
... self.author = author
>>> class Keyword(Base):
- ... __tablename__ = 'keywords'
+ ... __tablename__ = "keywords"
...
... id = Column(Integer, primary_key=True)
... keyword = Column(String(50), nullable=False, unique=True)
- ... posts = relationship('BlogPost',
- ... secondary=post_keywords,
- ... back_populates='keywords')
+ ... posts = relationship("BlogPost", secondary=post_keywords, back_populates="keywords")
...
... def __init__(self, keyword):
... self.keyword = keyword
.. sourcecode:: python+sql
- {sql}>>> wendy = session.query(User).\
- ... filter_by(name='wendy').\
- ... one()
+ {sql}>>> wendy = session.query(User).filter_by(name="wendy").one()
SELECT users.id AS users_id,
users.name AS users_name,
users.fullname AS users_fullname,
.. sourcecode:: python+sql
- >>> post.keywords.append(Keyword('wendy'))
- >>> post.keywords.append(Keyword('firstpost'))
+ >>> post.keywords.append(Keyword("wendy"))
+ >>> post.keywords.append(Keyword("firstpost"))
We can now look up all blog posts with the keyword 'firstpost'. We'll use the
``any`` operator to locate "blog posts where any of its keywords has the
.. sourcecode:: python+sql
- {sql}>>> session.query(BlogPost).\
- ... filter(BlogPost.keywords.any(keyword='firstpost')).\
- ... all()
+ {sql}>>> session.query(BlogPost).filter(BlogPost.keywords.any(keyword="firstpost")).all()
INSERT INTO keywords (keyword) VALUES (?)
[...] ('wendy',)
INSERT INTO keywords (keyword) VALUES (?)
.. sourcecode:: python+sql
- {sql}>>> session.query(BlogPost).\
- ... filter(BlogPost.author==wendy).\
- ... filter(BlogPost.keywords.any(keyword='firstpost')).\
- ... all()
+ {sql}>>> session.query(BlogPost).filter(BlogPost.author == wendy).filter(
+ ... BlogPost.keywords.any(keyword="firstpost")
+ ... ).all()
SELECT posts.id AS posts_id,
posts.user_id AS posts_user_id,
posts.headline AS posts_headline,
.. sourcecode:: python+sql
- {sql}>>> wendy.posts.\
- ... filter(BlogPost.keywords.any(keyword='firstpost')).\
- ... all()
+ {sql}>>> wendy.posts.filter(BlogPost.keywords.any(keyword="firstpost")).all()
SELECT posts.id AS posts_id,
posts.user_id AS posts_user_id,
posts.headline AS posts_headline,
mapper options::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
version_id = Column(Integer, nullable=False)
name = Column(String(50), nullable=False)
- __mapper_args__ = {
- "version_id_col": version_id
- }
+ __mapper_args__ = {"version_id_col": version_id}
.. note:: It is **strongly recommended** that the ``version_id`` column
be made NOT NULL. The versioning feature **does not support** a NULL
import uuid
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
version_uuid = Column(String(32), nullable=False)
name = Column(String(50), nullable=False)
__mapper_args__ = {
- 'version_id_col':version_uuid,
- 'version_id_generator':lambda version: uuid.uuid4().hex
+ "version_id_col": version_uuid,
+ "version_id_generator": lambda version: uuid.uuid4().hex,
}
The persistence engine will call upon ``uuid.uuid4()`` each time a
from sqlalchemy import FetchedValue
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
xmin = Column("xmin", String, system=True, server_default=FetchedValue())
- __mapper_args__ = {
- 'version_id_col': xmin,
- 'version_id_generator': False
- }
+ __mapper_args__ = {"version_id_col": xmin, "version_id_generator": False}
With the above mapping, the ORM will rely upon the ``xmin`` column for
automatically providing the new value of the version id counter.
import uuid
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
version_uuid = Column(String(32), nullable=False)
name = Column(String(50), nullable=False)
- __mapper_args__ = {
- 'version_id_col':version_uuid,
- 'version_id_generator': False
- }
+ __mapper_args__ = {"version_id_col": version_uuid, "version_id_generator": False}
+
- u1 = User(name='u1', version_uuid=uuid.uuid4())
+ u1 = User(name="u1", version_uuid=uuid.uuid4())
session.add(u1)
session.commit()
- u1.name = 'u2'
+ u1.name = "u2"
u1.version_uuid = uuid.uuid4()
session.commit()
issues::
# will leave version_uuid unchanged
- u1.name = 'u3'
+ u1.name = "u3"
session.commit()
.. versionadded:: 0.9.0
and the VALUES clause at once::
>>> from sqlalchemy import insert
- >>> stmt = insert(user_table).values(name='spongebob', fullname="Spongebob Squarepants")
+ >>> stmt = insert(user_table).values(name="spongebob", fullname="Spongebob Squarepants")
The above ``stmt`` variable is an instance of :class:`_sql.Insert`. Most
SQL expressions can be stringified in place as a means to see the general
... insert(user_table),
... [
... {"name": "sandy", "fullname": "Sandy Cheeks"},
- ... {"name": "patrick", "fullname": "Patrick Star"}
- ... ]
+ ... {"name": "patrick", "fullname": "Patrick Star"},
+ ... ],
... )
... conn.commit()
{opensql}BEGIN (implicit)
>>> from sqlalchemy import select, bindparam
>>> scalar_subq = (
- ... select(user_table.c.id).
- ... where(user_table.c.name==bindparam('username')).
- ... scalar_subquery()
+ ... select(user_table.c.id)
+ ... .where(user_table.c.name == bindparam("username"))
+ ... .scalar_subquery()
... )
>>> with engine.connect() as conn:
... result = conn.execute(
... insert(address_table).values(user_id=scalar_subq),
... [
- ... {"username": 'spongebob', "email_address": "spongebob@sqlalchemy.org"},
- ... {"username": 'sandy', "email_address": "sandy@sqlalchemy.org"},
- ... {"username": 'sandy', "email_address": "sandy@squirrelpower.org"},
- ... ]
+ ... {"username": "spongebob", "email_address": "spongebob@sqlalchemy.org"},
+ ... {"username": "sandy", "email_address": "sandy@sqlalchemy.org"},
+ ... {"username": "sandy", "email_address": "sandy@squirrelpower.org"},
+ ... ],
... )
... conn.commit()
{opensql}BEGIN (implicit)
object that's returned when the statement is executed has rows which
can be fetched::
- >>> insert_stmt = insert(address_table).returning(address_table.c.id, address_table.c.email_address)
+ >>> insert_stmt = insert(address_table).returning(
+ ... address_table.c.id, address_table.c.email_address
+ ... )
>>> print(insert_stmt)
{opensql}INSERT INTO address (id, user_id, email_address)
VALUES (:id, :user_id, :email_address)
it can be stringified in place::
>>> from sqlalchemy import select
- >>> stmt = select(user_table).where(user_table.c.name == 'spongebob')
+ >>> stmt = select(user_table).where(user_table.c.name == "spongebob")
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
.. sourcecode:: pycon+sql
- >>> stmt = select(User).where(User.name == 'spongebob')
+ >>> stmt = select(User).where(User.name == "spongebob")
>>> with Session(engine) as session:
... for row in session.execute(stmt):
... print(row)
it with full ``Address`` entities in the second element::
>>> session.execute(
- ... select(User.name, Address).
- ... where(User.id==Address.user_id).
- ... order_by(Address.id)
+ ... select(User.name, Address).where(User.id == Address.user_id).order_by(Address.id)
... ).all()
{opensql}SELECT user_account.name, address.id, address.email_address, address.user_id
FROM user_account, address
.. sourcecode:: pycon+sql
>>> from sqlalchemy import func, cast
- >>> stmt = (
- ... select(
- ... ("Username: " + user_table.c.name).label("username"),
- ... ).order_by(user_table.c.name)
- ... )
+ >>> stmt = select(
+ ... ("Username: " + user_table.c.name).label("username"),
+ ... ).order_by(user_table.c.name)
>>> with engine.connect() as conn:
... for row in conn.execute(stmt):
... print(f"{row.username}")
SELECT statement::
>>> from sqlalchemy import text
- >>> stmt = (
- ... select(
- ... text("'some phrase'"), user_table.c.name
- ... ).order_by(user_table.c.name)
- ... )
+ >>> stmt = select(text("'some phrase'"), user_table.c.name).order_by(user_table.c.name)
>>> with engine.connect() as conn:
... print(conn.execute(stmt).all())
{opensql}BEGIN (implicit)
>>> from sqlalchemy import literal_column
- >>> stmt = (
- ... select(
- ... literal_column("'some phrase'").label("p"), user_table.c.name
- ... ).order_by(user_table.c.name)
+ >>> stmt = select(literal_column("'some phrase'").label("p"), user_table.c.name).order_by(
+ ... user_table.c.name
... )
>>> with engine.connect() as conn:
... for row in conn.execute(stmt):
Python operators such as ``==``, ``!=``, ``<``, ``>=`` etc. generate new
SQL Expression objects, rather than plain boolean ``True``/``False`` values::
- >>> print(user_table.c.name == 'squidward')
+ >>> print(user_table.c.name == "squidward")
user_account.name = :name_1
>>> print(address_table.c.user_id > 10)
We can use expressions like these to generate the WHERE clause by passing
the resulting objects to the :meth:`_sql.Select.where` method::
- >>> print(select(user_table).where(user_table.c.name == 'squidward'))
+ >>> print(select(user_table).where(user_table.c.name == "squidward"))
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
WHERE user_account.name = :name_1
method may be invoked any number of times::
>>> print(
- ... select(address_table.c.email_address).
- ... where(user_table.c.name == 'squidward').
- ... where(address_table.c.user_id == user_table.c.id)
+ ... select(address_table.c.email_address)
+ ... .where(user_table.c.name == "squidward")
+ ... .where(address_table.c.user_id == user_table.c.id)
... )
{opensql}SELECT address.email_address
FROM address, user_account
with the same effect::
>>> print(
- ... select(address_table.c.email_address).
- ... where(
- ... user_table.c.name == 'squidward',
- ... address_table.c.user_id == user_table.c.id
+ ... select(address_table.c.email_address).where(
+ ... user_table.c.name == "squidward", address_table.c.user_id == user_table.c.id
... )
... )
{opensql}SELECT address.email_address
>>> from sqlalchemy import and_, or_
>>> print(
- ... select(Address.email_address).
- ... where(
+ ... select(Address.email_address).where(
... and_(
- ... or_(User.name == 'squidward', User.name == 'sandy'),
- ... Address.user_id == User.id
+ ... or_(User.name == "squidward", User.name == "sandy"),
+ ... Address.user_id == User.id,
... )
... )
... )
arguments that match to column keys or ORM attribute names. It will filter
against the leftmost FROM clause or the last entity joined::
- >>> print(
- ... select(User).filter_by(name='spongebob', fullname='Spongebob Squarepants')
- ... )
+ >>> print(select(User).filter_by(name="spongebob", fullname="Spongebob Squarepants"))
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
WHERE user_account.name = :name_1 AND user_account.fullname = :fullname_1
explicitly::
>>> print(
- ... select(user_table.c.name, address_table.c.email_address).
- ... join_from(user_table, address_table)
+ ... select(user_table.c.name, address_table.c.email_address).join_from(
+ ... user_table, address_table
+ ... )
... )
{opensql}SELECT user_account.name, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
The other is the the :meth:`_sql.Select.join` method, which indicates only the
right side of the JOIN, the left hand-side is inferred::
- >>> print(
- ... select(user_table.c.name, address_table.c.email_address).
- ... join(address_table)
- ... )
+ >>> print(select(user_table.c.name, address_table.c.email_address).join(address_table))
{opensql}SELECT user_account.name, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
clause and :meth:`_sql.Select.join` to establish ``address_table`` as
the second::
- >>> print(
- ... select(address_table.c.email_address).
- ... select_from(user_table).join(address_table)
- ... )
+ >>> print(select(address_table.c.email_address).select_from(user_table).join(address_table))
{opensql}SELECT address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
produce the SQL ``count()`` function::
>>> from sqlalchemy import func
- >>> print (
- ... select(func.count('*')).select_from(user_table)
- ... )
+ >>> print(select(func.count("*")).select_from(user_table))
{opensql}SELECT count(:count_2) AS count_1
FROM user_account
same SQL Expression mechanics as we saw about in :ref:`tutorial_select_where_clause`::
>>> print(
- ... select(address_table.c.email_address).
- ... select_from(user_table).
- ... join(address_table, user_table.c.id == address_table.c.user_id)
+ ... select(address_table.c.email_address)
+ ... .select_from(user_table)
+ ... .join(address_table, user_table.c.id == address_table.c.user_id)
... )
{opensql}SELECT address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
:paramref:`_sql.Select.join.full` which will render LEFT OUTER JOIN
and FULL OUTER JOIN, respectively::
- >>> print(
- ... select(user_table).join(address_table, isouter=True)
- ... )
+ >>> print(select(user_table).join(address_table, isouter=True))
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account LEFT OUTER JOIN address ON user_account.id = address.user_id{stop}
- >>> print(
- ... select(user_table).join(address_table, full=True)
- ... )
+ >>> print(select(user_table).join(address_table, full=True))
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account FULL OUTER JOIN address ON user_account.id = address.user_id{stop}
>>> with engine.connect() as conn:
... result = conn.execute(
- ... select(User.name, func.count(Address.id).label("count")).
- ... join(Address).
- ... group_by(User.name).
- ... having(func.count(Address.id) > 1)
+ ... select(User.name, func.count(Address.id).label("count"))
+ ... .join(Address)
+ ... .group_by(User.name)
+ ... .having(func.count(Address.id) > 1)
... )
... print(result.all())
{opensql}BEGIN (implicit)
.. sourcecode:: pycon+sql
>>> from sqlalchemy import func, desc
- >>> stmt = select(
- ... Address.user_id,
- ... func.count(Address.id).label('num_addresses')).\
- ... group_by("user_id").order_by("user_id", desc("num_addresses"))
+ >>> stmt = (
+ ... select(Address.user_id, func.count(Address.id).label("num_addresses"))
+ ... .group_by("user_id")
+ ... .order_by("user_id", desc("num_addresses"))
+ ... )
>>> print(stmt)
{opensql}SELECT address.user_id, count(address.id) AS num_addresses
FROM address GROUP BY address.user_id ORDER BY address.user_id, num_addresses DESC
>>> user_alias_1 = user_table.alias()
>>> user_alias_2 = user_table.alias()
>>> print(
- ... select(user_alias_1.c.name, user_alias_2.c.name).
- ... join_from(user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id)
+ ... select(user_alias_1.c.name, user_alias_2.c.name).join_from(
+ ... user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id
+ ... )
... )
{opensql}SELECT user_account_1.name, user_account_2.name AS name_1
FROM user_account AS user_account_1
>>> address_alias_1 = aliased(Address)
>>> address_alias_2 = aliased(Address)
>>> print(
- ... select(User).
- ... join_from(User, address_alias_1).
- ... where(address_alias_1.email_address == 'patrick@aol.com').
- ... join_from(User, address_alias_2).
- ... where(address_alias_2.email_address == 'patrick@gmail.com')
+ ... select(User)
+ ... .join_from(User, address_alias_1)
+ ... .where(address_alias_1.email_address == "patrick@aol.com")
+ ... .join_from(User, address_alias_2)
+ ... .where(address_alias_2.email_address == "patrick@gmail.com")
... )
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
of rows from the ``address`` table (aggregate functions and GROUP BY were
introduced previously at :ref:`tutorial_group_by_w_aggregates`):
- >>> subq = select(
- ... func.count(address_table.c.id).label("count"),
- ... address_table.c.user_id
- ... ).group_by(address_table.c.user_id).subquery()
+ >>> subq = (
+ ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id)
+ ... .group_by(address_table.c.user_id)
+ ... .subquery()
+ ... )
Stringifying the subquery by itself without it being embedded inside of another
:class:`_sql.Select` or other statement produces the plain SELECT statement
the object to a larger :class:`_sql.Select` that will join the data to
the ``user_account`` table::
- >>> stmt = select(
- ... user_table.c.name,
- ... user_table.c.fullname,
- ... subq.c.count
- ... ).join_from(user_table, subq)
+ >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from(
+ ... user_table, subq
+ ... )
>>> print(stmt)
{opensql}SELECT user_account.name, user_account.fullname, anon_1.count
element in the same way, but the SQL rendered is the very different common
table expression syntax::
- >>> subq = select(
- ... func.count(address_table.c.id).label("count"),
- ... address_table.c.user_id
- ... ).group_by(address_table.c.user_id).cte()
+ >>> subq = (
+ ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id)
+ ... .group_by(address_table.c.user_id)
+ ... .cte()
+ ... )
- >>> stmt = select(
- ... user_table.c.name,
- ... user_table.c.fullname,
- ... subq.c.count
- ... ).join_from(user_table, subq)
+ >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from(
+ ... user_table, subq
+ ... )
>>> print(stmt)
{opensql}WITH anon_1 AS
.. sourcecode:: python+sql
- >>> subq = select(Address).where(~Address.email_address.like('%@aol.com')).subquery()
+ >>> subq = select(Address).where(~Address.email_address.like("%@aol.com")).subquery()
>>> address_subq = aliased(Address, subq)
- >>> stmt = select(User, address_subq).join_from(User, address_subq).order_by(User.id, address_subq.id)
+ >>> stmt = (
+ ... select(User, address_subq)
+ ... .join_from(User, address_subq)
+ ... .order_by(User.id, address_subq.id)
+ ... )
>>> with Session(engine) as session:
... for user, address in session.execute(stmt):
... print(f"{user} {address}")
.. sourcecode:: python+sql
- >>> cte_obj = select(Address).where(~Address.email_address.like('%@aol.com')).cte()
+ >>> cte_obj = select(Address).where(~Address.email_address.like("%@aol.com")).cte()
>>> address_cte = aliased(Address, cte_obj)
- >>> stmt = select(User, address_cte).join_from(User, address_cte).order_by(User.id, address_cte.id)
+ >>> stmt = (
+ ... select(User, address_cte)
+ ... .join_from(User, address_cte)
+ ... .order_by(User.id, address_cte.id)
+ ... )
>>> with Session(engine) as session:
... for user, address in session.execute(stmt):
... print(f"{user} {address}")
method as below. It's default string form when stringified by itself
renders as an ordinary SELECT statement that is selecting from two tables::
- >>> subq = select(func.count(address_table.c.id)).\
- ... where(user_table.c.id == address_table.c.user_id).\
- ... scalar_subquery()
+ >>> subq = (
+ ... select(func.count(address_table.c.id))
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .scalar_subquery()
+ ... )
>>> print(subq)
{opensql}(SELECT count(address.id) AS count_1
FROM address, user_account
However, in the case where the correlation is ambiguous, SQLAlchemy will let
us know that more clarity is needed::
- >>> stmt = select(
- ... user_table.c.name,
- ... address_table.c.email_address,
- ... subq.label("address_count")
- ... ).\
- ... join_from(user_table, address_table).\
- ... order_by(user_table.c.id, address_table.c.id)
+ >>> stmt = (
+ ... select(
+ ... user_table.c.name, address_table.c.email_address, subq.label("address_count")
+ ... )
+ ... .join_from(user_table, address_table)
+ ... .order_by(user_table.c.id, address_table.c.id)
+ ... )
>>> print(stmt)
Traceback (most recent call last):
...
this using the :meth:`_sql.ScalarSelect.correlate` or
:meth:`_sql.ScalarSelect.correlate_except` methods::
- >>> subq = select(func.count(address_table.c.id)).\
- ... where(user_table.c.id == address_table.c.user_id).\
- ... scalar_subquery().correlate(user_table)
+ >>> subq = (
+ ... select(func.count(address_table.c.id))
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .scalar_subquery()
+ ... .correlate(user_table)
+ ... )
The statement then can return the data for this column like any other:
... select(
... user_table.c.name,
... address_table.c.email_address,
- ... subq.label("address_count")
- ... ).
- ... join_from(user_table, address_table).
- ... order_by(user_table.c.id, address_table.c.id)
+ ... subq.label("address_count"),
+ ... )
+ ... .join_from(user_table, address_table)
+ ... .order_by(user_table.c.id, address_table.c.id)
... )
... print(result.all())
{opensql}BEGIN (implicit)
was discussed in the previous section::
>>> subq = (
- ... select(
- ... func.count(address_table.c.id).label("address_count"),
- ... address_table.c.email_address,
- ... address_table.c.user_id,
- ... ).
- ... where(user_table.c.id == address_table.c.user_id).
- ... lateral()
+ ... select(
+ ... func.count(address_table.c.id).label("address_count"),
+ ... address_table.c.email_address,
+ ... address_table.c.user_id,
+ ... )
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .lateral()
+ ... )
+ >>> stmt = (
+ ... select(user_table.c.name, subq.c.address_count, subq.c.email_address)
+ ... .join_from(user_table, subq)
+ ... .order_by(user_table.c.id, subq.c.email_address)
... )
- >>> stmt = select(
- ... user_table.c.name,
- ... subq.c.address_count,
- ... subq.c.email_address
- ... ).\
- ... join_from(user_table, subq).\
- ... order_by(user_table.c.id, subq.c.email_address)
>>> print(stmt)
{opensql}SELECT user_account.name, anon_1.address_count, anon_1.email_address
FROM user_account
:meth:`_engine.Connection.execute`::
>>> from sqlalchemy import union_all
- >>> stmt1 = select(user_table).where(user_table.c.name == 'sandy')
- >>> stmt2 = select(user_table).where(user_table.c.name == 'spongebob')
+ >>> stmt1 = select(user_table).where(user_table.c.name == "sandy")
+ >>> stmt2 = select(user_table).where(user_table.c.name == "spongebob")
>>> u = union_all(stmt1, stmt2)
>>> with engine.connect() as conn:
... result = conn.execute(u)
>>> u_subq = u.subquery()
>>> stmt = (
- ... select(u_subq.c.name, address_table.c.email_address).
- ... join_from(address_table, u_subq).
- ... order_by(u_subq.c.name, address_table.c.email_address)
+ ... select(u_subq.c.name, address_table.c.email_address)
+ ... .join_from(address_table, u_subq)
+ ... .order_by(u_subq.c.name, address_table.c.email_address)
... )
>>> with engine.connect() as conn:
... result = conn.execute(stmt)
execute; this statement should be composed against the target
ORM entities or their underlying mapped :class:`_schema.Table` objects::
- >>> stmt1 = select(User).where(User.name == 'sandy')
- >>> stmt2 = select(User).where(User.name == 'spongebob')
+ >>> stmt1 = select(User).where(User.name == "sandy")
+ >>> stmt2 = select(User).where(User.name == "spongebob")
>>> u = union_all(stmt1, stmt2)
For a simple SELECT with UNION that is not already nested inside of a
.. sourcecode:: pycon+sql
>>> subq = (
- ... select(func.count(address_table.c.id)).
- ... where(user_table.c.id == address_table.c.user_id).
- ... group_by(address_table.c.user_id).
- ... having(func.count(address_table.c.id) > 1)
+ ... select(func.count(address_table.c.id))
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .group_by(address_table.c.user_id)
+ ... .having(func.count(address_table.c.id) > 1)
... ).exists()
>>> with engine.connect() as conn:
- ... result = conn.execute(
- ... select(user_table.c.name).where(subq)
- ... )
+ ... result = conn.execute(select(user_table.c.name).where(subq))
... print(result.all())
{opensql}BEGIN (implicit)
SELECT user_account.name
.. sourcecode:: pycon+sql
>>> subq = (
- ... select(address_table.c.id).
- ... where(user_table.c.id == address_table.c.user_id)
+ ... select(address_table.c.id).where(user_table.c.id == address_table.c.user_id)
... ).exists()
>>> with engine.connect() as conn:
- ... result = conn.execute(
- ... select(user_table.c.name).where(~subq)
- ... )
+ ... result = conn.execute(select(user_table.c.name).where(~subq))
... print(result.all())
{opensql}BEGIN (implicit)
SELECT user_account.name
.. sourcecode:: pycon+sql
- >>> stmt = select(
- ... func.row_number().over(partition_by=user_table.c.name),
- ... user_table.c.name,
- ... address_table.c.email_address
- ... ).select_from(user_table).join(address_table)
+ >>> stmt = (
+ ... select(
+ ... func.row_number().over(partition_by=user_table.c.name),
+ ... user_table.c.name,
+ ... address_table.c.email_address,
+ ... )
+ ... .select_from(user_table)
+ ... .join(address_table)
+ ... )
>>> with engine.connect() as conn: # doctest:+SKIP
... result = conn.execute(stmt)
... print(result.all())
.. sourcecode:: pycon+sql
- >>> stmt = select(
- ... func.count().over(order_by=user_table.c.name),
- ... user_table.c.name,
- ... address_table.c.email_address).select_from(user_table).join(address_table)
+ >>> stmt = (
+ ... select(
+ ... func.count().over(order_by=user_table.c.name),
+ ... user_table.c.name,
+ ... address_table.c.email_address,
+ ... )
+ ... .select_from(user_table)
+ ... .join(address_table)
+ ... )
>>> with engine.connect() as conn: # doctest:+SKIP
... result = conn.execute(stmt)
... print(result.all())
>>> print(
... func.unnest(
- ... func.percentile_disc([0.25,0.5,0.75,1]).within_group(user_table.c.name)
+ ... func.percentile_disc([0.25, 0.5, 0.75, 1]).within_group(user_table.c.name)
... )
... )
unnest(percentile_disc(:percentile_disc_1) WITHIN GROUP (ORDER BY user_account.name))
particular subset of rows compared to the total range of rows returned, available
using the :meth:`_functions.FunctionElement.filter` method::
- >>> stmt = select(
- ... func.count(address_table.c.email_address).filter(user_table.c.name == 'sandy'),
- ... func.count(address_table.c.email_address).filter(user_table.c.name == 'spongebob')
- ... ).select_from(user_table).join(address_table)
+ >>> stmt = (
+ ... select(
+ ... func.count(address_table.c.email_address).filter(user_table.c.name == "sandy"),
+ ... func.count(address_table.c.email_address).filter(
+ ... user_table.c.name == "spongebob"
+ ... ),
+ ... )
+ ... .select_from(user_table)
+ ... .join(address_table)
+ ... )
>>> with engine.connect() as conn: # doctest:+SKIP
... result = conn.execute(stmt)
... print(result.all())
>>> from sqlalchemy import JSON
>>> from sqlalchemy import type_coerce
>>> from sqlalchemy.dialects import mysql
- >>> s = select(
- ... type_coerce(
- ... {'some_key': {'foo': 'bar'}}, JSON
- ... )['some_key']
- ... )
+ >>> s = select(type_coerce({"some_key": {"foo": "bar"}}, JSON)["some_key"])
>>> print(s.compile(dialect=mysql.dialect()))
SELECT JSON_EXTRACT(%s, %s) AS anon_1
>>> from sqlalchemy import update
>>> stmt = (
- ... update(user_table).where(user_table.c.name == 'patrick').
- ... values(fullname='Patrick the Star')
+ ... update(user_table)
+ ... .where(user_table.c.name == "patrick")
+ ... .values(fullname="Patrick the Star")
... )
>>> print(stmt)
{opensql}UPDATE user_account SET fullname=:fullname WHERE user_account.name = :name_1
UPDATE supports all the major SQL forms of UPDATE, including updates against expressions,
where we can make use of :class:`_schema.Column` expressions::
- >>> stmt = (
- ... update(user_table).
- ... values(fullname="Username: " + user_table.c.name)
- ... )
+ >>> stmt = update(user_table).values(fullname="Username: " + user_table.c.name)
>>> print(stmt)
{opensql}UPDATE user_account SET fullname=(:name_1 || user_account.name)
>>> from sqlalchemy import bindparam
>>> stmt = (
- ... update(user_table).
- ... where(user_table.c.name == bindparam('oldname')).
- ... values(name=bindparam('newname'))
+ ... update(user_table)
+ ... .where(user_table.c.name == bindparam("oldname"))
+ ... .values(name=bindparam("newname"))
... )
>>> with engine.begin() as conn:
- ... conn.execute(
- ... stmt,
- ... [
- ... {'oldname':'jack', 'newname':'ed'},
- ... {'oldname':'wendy', 'newname':'mary'},
- ... {'oldname':'jim', 'newname':'jake'},
- ... ]
- ... )
+ ... conn.execute(
+ ... stmt,
+ ... [
+ ... {"oldname": "jack", "newname": "ed"},
+ ... {"oldname": "wendy", "newname": "mary"},
+ ... {"oldname": "jim", "newname": "jake"},
+ ... ],
+ ... )
{opensql}BEGIN (implicit)
UPDATE user_account SET name=? WHERE user_account.name = ?
[...] (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim'))
anywhere a column expression might be placed::
>>> scalar_subq = (
- ... select(address_table.c.email_address).
- ... where(address_table.c.user_id == user_table.c.id).
- ... order_by(address_table.c.id).
- ... limit(1).
- ... scalar_subquery()
+ ... select(address_table.c.email_address)
+ ... .where(address_table.c.user_id == user_table.c.id)
+ ... .order_by(address_table.c.id)
+ ... .limit(1)
+ ... .scalar_subquery()
... )
>>> update_stmt = update(user_table).values(fullname=scalar_subq)
>>> print(update_stmt)
WHERE clause of the statement::
>>> update_stmt = (
- ... update(user_table).
- ... where(user_table.c.id == address_table.c.user_id).
- ... where(address_table.c.email_address == 'patrick@aol.com').
- ... values(fullname='Pat')
- ... )
+ ... update(user_table)
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .where(address_table.c.email_address == "patrick@aol.com")
+ ... .values(fullname="Pat")
+ ... )
>>> print(update_stmt)
{opensql}UPDATE user_account SET fullname=:fullname FROM address
WHERE user_account.id = address.user_id AND address.email_address = :email_address_1
order to refer to additional tables::
>>> update_stmt = (
- ... update(user_table).
- ... where(user_table.c.id == address_table.c.user_id).
- ... where(address_table.c.email_address == 'patrick@aol.com').
- ... values(
- ... {
- ... user_table.c.fullname: "Pat",
- ... address_table.c.email_address: "pat@aol.com"
- ... }
- ... )
- ... )
+ ... update(user_table)
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .where(address_table.c.email_address == "patrick@aol.com")
+ ... .values(
+ ... {user_table.c.fullname: "Pat", address_table.c.email_address: "pat@aol.com"}
+ ... )
+ ... )
>>> from sqlalchemy.dialects import mysql
>>> print(update_stmt.compile(dialect=mysql.dialect()))
{opensql}UPDATE user_account, address
case, the :meth:`_sql.Update.ordered_values` method accepts a sequence of
tuples so that this order may be controlled [2]_::
- >>> update_stmt = (
- ... update(some_table).
- ... ordered_values(
- ... (some_table.c.y, 20),
- ... (some_table.c.x, some_table.c.y + 10)
- ... )
+ >>> update_stmt = update(some_table).ordered_values(
+ ... (some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10)
... )
>>> print(update_stmt)
{opensql}UPDATE some_table SET y=:y, x=(some_table.y + :y_1)
::
>>> from sqlalchemy import delete
- >>> stmt = delete(user_table).where(user_table.c.name == 'patrick')
+ >>> stmt = delete(user_table).where(user_table.c.name == "patrick")
>>> print(stmt)
{opensql}DELETE FROM user_account WHERE user_account.name = :name_1
syntaxes, such as ``DELETE FROM..USING`` on MySQL::
>>> delete_stmt = (
- ... delete(user_table).
- ... where(user_table.c.id == address_table.c.user_id).
- ... where(address_table.c.email_address == 'patrick@aol.com')
- ... )
+ ... delete(user_table)
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .where(address_table.c.email_address == "patrick@aol.com")
+ ... )
>>> from sqlalchemy.dialects import mysql
>>> print(delete_stmt.compile(dialect=mysql.dialect()))
{opensql}DELETE FROM user_account USING user_account, address
>>> with engine.begin() as conn:
... result = conn.execute(
- ... update(user_table).
- ... values(fullname="Patrick McStar").
- ... where(user_table.c.name == 'patrick')
+ ... update(user_table)
+ ... .values(fullname="Patrick McStar")
+ ... .where(user_table.c.name == "patrick")
... )
... print(result.rowcount)
{opensql}BEGIN (implicit)
>>> update_stmt = (
- ... update(user_table).where(user_table.c.name == 'patrick').
- ... values(fullname='Patrick the Star').
- ... returning(user_table.c.id, user_table.c.name)
+ ... update(user_table)
+ ... .where(user_table.c.name == "patrick")
+ ... .values(fullname="Patrick the Star")
+ ... .returning(user_table.c.id, user_table.c.name)
... )
>>> print(update_stmt)
{opensql}UPDATE user_account SET fullname=:fullname
RETURNING user_account.id, user_account.name{stop}
>>> delete_stmt = (
- ... delete(user_table).where(user_table.c.name == 'patrick').
- ... returning(user_table.c.id, user_table.c.name)
+ ... delete(user_table)
+ ... .where(user_table.c.name == "patrick")
+ ... .returning(user_table.c.id, user_table.c.name)
... )
>>> print(delete_stmt)
{opensql}DELETE FROM user_account
... conn.execute(text("CREATE TABLE some_table (x int, y int)"))
... conn.execute(
... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"),
- ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}]
+ ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}],
... )
... conn.commit()
{opensql}BEGIN (implicit)
>>> with engine.begin() as conn:
... conn.execute(
... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"),
- ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}]
+ ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}],
... )
{opensql}BEGIN (implicit)
INSERT INTO some_table (x, y) VALUES (?, ?)
result = conn.execute(text("select x, y from some_table"))
for dict_row in result.mappings():
- x = dict_row['x']
- y = dict_row['y']
+ x = dict_row["x"]
+ y = dict_row["y"]
..
.. sourcecode:: pycon+sql
>>> with engine.connect() as conn:
- ... result = conn.execute(
- ... text("SELECT x, y FROM some_table WHERE y > :y"),
- ... {"y": 2}
- ... )
+ ... result = conn.execute(text("SELECT x, y FROM some_table WHERE y > :y"), {"y": 2})
... for row in result:
- ... print(f"x: {row.x} y: {row.y}")
+ ... print(f"x: {row.x} y: {row.y}")
{opensql}BEGIN (implicit)
SELECT x, y FROM some_table WHERE y > ?
[...] (2,)
>>> with engine.connect() as conn:
... conn.execute(
... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"),
- ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}]
+ ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}],
... )
... conn.commit()
{opensql}BEGIN (implicit)
>>> with Session(engine) as session:
... result = session.execute(stmt, {"y": 6})
... for row in result:
- ... print(f"x: {row.x} y: {row.y}")
+ ... print(f"x: {row.x} y: {row.y}")
{opensql}BEGIN (implicit)
SELECT x, y FROM some_table WHERE y > ? ORDER BY x, y
[...] (6,){stop}
>>> with Session(engine) as session:
... result = session.execute(
... text("UPDATE some_table SET y=:y WHERE x=:x"),
- ... [{"x": 9, "y":11}, {"x": 13, "y": 15}]
+ ... [{"x": 9, "y": 11}, {"x": 13, "y": 15}],
... )
... session.commit()
{opensql}BEGIN (implicit)
>>> user_table = Table(
... "user_account",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('name', String(30)),
- ... Column('fullname', String)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("name", String(30)),
+ ... Column("fullname", String),
... )
We can observe that the above :class:`_schema.Table` construct looks a lot like
>>> address_table = Table(
... "address",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('user_id', ForeignKey('user_account.id'), nullable=False),
- ... Column('email_address', String, nullable=False)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("user_id", ForeignKey("user_account.id"), nullable=False),
+ ... Column("email_address", String, nullable=False),
... )
The table above also features a third kind of constraint, which in SQL is the
:func:`_orm.declarative_base` function::
from sqlalchemy.orm import declarative_base
+
Base = declarative_base()
..
>>> from sqlalchemy.orm import relationship
>>> class User(Base):
- ... __tablename__ = 'user_account'
+ ... __tablename__ = "user_account"
...
... id = Column(Integer, primary_key=True)
... name = Column(String(30))
... addresses = relationship("Address", back_populates="user")
...
... def __repr__(self):
- ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
+ ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
>>> class Address(Base):
- ... __tablename__ = 'address'
+ ... __tablename__ = "address"
...
... id = Column(Integer, primary_key=True)
... email_address = Column(String, nullable=False)
- ... user_id = Column(Integer, ForeignKey('user_account.id'))
+ ... user_id = Column(Integer, ForeignKey("user_account.id"))
...
... user = relationship("User", back_populates="addresses")
...
# declarative base
Base.metadata.create_all(engine)
-
Combining Core Table Declarations with ORM Declarative
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
mapper_registry = registry()
Base = mapper_registry.generate_base()
+
class User(Base):
__table__ = user_table
def __repr__(self):
return f"User({self.name!r}, {self.fullname!r})"
+
class Address(Base):
__table__ = address_table
.. sourcecode:: pycon+sql
- >>> sandy_fullname = session.execute(
- ... select(User.fullname).where(User.id == 2)
- ... ).scalar_one()
+ >>> sandy_fullname = session.execute(select(User.fullname).where(User.id == 2)).scalar_one()
{opensql}UPDATE user_account SET fullname=? WHERE user_account.id = ?
[...] ('Sandy Squirrel', 2)
SELECT user_account.fullname
.. sourcecode:: pycon+sql
>>> session.execute(
- ... update(User).
- ... where(User.name == "sandy").
- ... values(fullname="Sandy Squirrel Extraordinaire")
+ ... update(User)
+ ... .where(User.name == "sandy")
+ ... .values(fullname="Sandy Squirrel Extraordinaire")
... )
{opensql}UPDATE user_account SET fullname=? WHERE user_account.name = ?
[...] ('Sandy Squirrel Extraordinaire', 'sandy'){stop}
.. sourcecode:: pycon+sql
- {sql}>>> session.execute(select(User).where(User.name == 'patrick')).scalar_one() is patrick
+ {sql}>>> session.execute(select(User).where(User.name == "patrick")).scalar_one() is patrick
SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
WHERE user_account.name = ?
.. sourcecode:: python
from sqlalchemy.orm import relationship
+
+
class User(Base):
- __tablename__ = 'user_account'
+ __tablename__ = "user_account"
# ... Column mappings
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
# ... Column mappings
user = relationship("User", back_populates="addresses")
-
Above, the ``User`` class now has an attribute ``User.addresses`` and the
``Address`` class has an attribute ``Address.user``. The
:func:`_orm.relationship` construct will be used to inspect the table
of objects. If we make a new ``User`` object, we can note that there is a
Python list when we access the ``.addresses`` element::
- >>> u1 = User(name='pkrabs', fullname='Pearl Krabs')
+ >>> u1 = User(name="pkrabs", fullname="Pearl Krabs")
>>> u1.addresses
[]
argument** to :meth:`_sql.Select.join`, where it serves to indicate both the
right side of the join as well as the ON clause at once::
- >>> print(
- ... select(Address.email_address).
- ... select_from(User).
- ... join(User.addresses)
- ... )
+ >>> print(select(Address.email_address).select_from(User).join(User.addresses))
{opensql}SELECT address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
between the two mapped :class:`_schema.Table` objects, not because of the
:func:`_orm.relationship` objects on the ``User`` and ``Address`` classes::
- >>> print(
- ... select(Address.email_address).
- ... join_from(User, Address)
- ... )
+ >>> print(select(Address.email_address).join_from(User, Address))
{opensql}SELECT address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
using the :func:`_orm.relationship` attributes to join instead::
>>> print(
- ... select(User).
- ... join(User.addresses.of_type(address_alias_1)).
- ... where(address_alias_1.email_address == 'patrick@aol.com').
- ... join(User.addresses.of_type(address_alias_2)).
- ... where(address_alias_2.email_address == 'patrick@gmail.com')
- ... )
+ ... select(User)
+ ... .join(User.addresses.of_type(address_alias_1))
+ ... .where(address_alias_1.email_address == "patrick@aol.com")
+ ... .join(User.addresses.of_type(address_alias_2))
+ ... .where(address_alias_2.email_address == "patrick@gmail.com")
+ ... )
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
JOIN address AS address_1 ON user_account.id = address_1.user_id
construct directly::
>>> user_alias_1 = aliased(User)
- >>> print(
- ... select(user_alias_1.name).
- ... join(user_alias_1.addresses)
- ... )
+ >>> print(select(user_alias_1.name).join(user_alias_1.addresses))
{opensql}SELECT user_account_1.name
FROM user_account AS user_account_1
JOIN address ON user_account_1.id = address.user_id
.. sourcecode:: pycon+sql
- >>> stmt = (
- ... select(User.fullname).
- ... join(User.addresses.and_(Address.email_address == 'pearl.krabs@gmail.com'))
+ >>> stmt = select(User.fullname).join(
+ ... User.addresses.and_(Address.email_address == "pearl.krabs@gmail.com")
... )
>>> session.execute(stmt).all()
{opensql}SELECT user_account.fullname
.. sourcecode:: pycon+sql
- >>> stmt = (
- ... select(User.fullname).
- ... where(User.addresses.any(Address.email_address == 'pearl.krabs@gmail.com'))
+ >>> stmt = select(User.fullname).where(
+ ... User.addresses.any(Address.email_address == "pearl.krabs@gmail.com")
... )
>>> session.execute(stmt).all()
{opensql}SELECT user_account.fullname
.. sourcecode:: pycon+sql
- >>> stmt = (
- ... select(User.fullname).
- ... where(~User.addresses.any())
- ... )
+ >>> stmt = select(User.fullname).where(~User.addresses.any())
>>> session.execute(stmt).all()
{opensql}SELECT user_account.fullname
FROM user_account
.. sourcecode:: pycon+sql
- >>> stmt = (
- ... select(Address.email_address).
- ... where(Address.user.has(User.name=="pkrabs"))
- ... )
+ >>> stmt = select(Address.email_address).where(Address.user.has(User.name == "pkrabs"))
>>> session.execute(stmt).all()
{opensql}SELECT address.email_address
FROM address
.. sourcecode:: python
from sqlalchemy.orm import relationship
+
+
class User(Base):
- __tablename__ = 'user_account'
+ __tablename__ = "user_account"
addresses = relationship("Address", back_populates="user", lazy="selectin")
.. sourcecode:: pycon+sql
>>> from sqlalchemy.orm import selectinload
- >>> stmt = (
- ... select(User).options(selectinload(User.addresses)).order_by(User.id)
- ... )
+ >>> stmt = select(User).options(selectinload(User.addresses)).order_by(User.id)
>>> for row in session.execute(stmt):
- ... print(f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})")
+ ... print(
+ ... f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})"
+ ... )
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account ORDER BY user_account.id
[...] ()
>>> from sqlalchemy.orm import joinedload
>>> stmt = (
- ... select(Address).options(joinedload(Address.user, innerjoin=True)).order_by(Address.id)
+ ... select(Address)
+ ... .options(joinedload(Address.user, innerjoin=True))
+ ... .order_by(Address.id)
... )
>>> for row in session.execute(stmt):
... print(f"{row.Address.email_address} {row.Address.user.name}")
>>> from sqlalchemy.orm import contains_eager
>>> stmt = (
- ... select(Address).
- ... join(Address.user).
- ... where(User.name == 'pkrabs').
- ... options(contains_eager(Address.user)).order_by(Address.id)
+ ... select(Address)
+ ... .join(Address.user)
+ ... .where(User.name == "pkrabs")
+ ... .options(contains_eager(Address.user))
+ ... .order_by(Address.id)
... )
>>> for row in session.execute(stmt):
... print(f"{row.Address.email_address} {row.Address.user.name}")
SQL query that unnecessarily joins twice::
>>> stmt = (
- ... select(Address).
- ... join(Address.user).
- ... where(User.name == 'pkrabs').
- ... options(joinedload(Address.user)).order_by(Address.id)
+ ... select(Address)
+ ... .join(Address.user)
+ ... .where(User.name == "pkrabs")
+ ... .options(joinedload(Address.user))
+ ... .order_by(Address.id)
... )
>>> print(stmt) # SELECT has a JOIN and LEFT OUTER JOIN unnecessarily
{opensql}SELECT address.id, address.email_address, address.user_id,
>>> from sqlalchemy.orm import selectinload
>>> stmt = (
- ... select(User).
- ... options(
- ... selectinload(
- ... User.addresses.and_(
- ... ~Address.email_address.endswith("sqlalchemy.org")
- ... )
- ... )
- ... ).
- ... order_by(User.id).
- ... execution_options(populate_existing=True)
+ ... select(User)
+ ... .options(
+ ... selectinload(
+ ... User.addresses.and_(~Address.email_address.endswith("sqlalchemy.org"))
+ ... )
+ ... )
+ ... .order_by(User.id)
+ ... .execution_options(populate_existing=True)
... )
>>> for row in session.execute(stmt):
- ... print(f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})")
+ ... print(
+ ... f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})"
+ ... )
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account ORDER BY user_account.id
[...] ()
.. sourcecode:: python
class User(Base):
- __tablename__ = 'user_account'
+ __tablename__ = "user_account"
# ... Column mappings
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
# ... Column mappings
user = relationship("User", back_populates="addresses", lazy="raise_on_sql")
-
Using such a mapping, the application is blocked from lazy loading,
indicating that a particular query would need to specify a loader strategy:
--- /dev/null
+from argparse import ArgumentParser
+from argparse import RawDescriptionHelpFormatter
+from collections.abc import Iterator
+from pathlib import Path
+import re
+
+from black import format_str
+from black.const import DEFAULT_LINE_LENGTH
+from black.files import parse_pyproject_toml
+from black.mode import Mode
+from black.mode import TargetVersion
+
+
+home = Path(__file__).parent.parent
+
+_Block = list[
+ tuple[
+ str,
+ int,
+ str | None,
+ str | None,
+ str,
+ ]
+]
+
+
+def _format_block(
+ input_block: _Block,
+ exit_on_error: bool,
+ errors: list[tuple[int, str, Exception]],
+ is_doctest: bool,
+) -> list[str]:
+ if not is_doctest:
+ # The first line may have additional padding. Remove then restore later
+ add_padding = start_space.match(input_block[0][4]).groups()[0]
+ skip = len(add_padding)
+ code = "\n".join(
+ c[skip:] if c.startswith(add_padding) else c
+ for *_, c in input_block
+ )
+ else:
+ add_padding = None
+ code = "\n".join(c for *_, c in input_block)
+
+ try:
+ formatted = format_str(code, mode=BLACK_MODE)
+ except Exception as e:
+ start_line = input_block[0][1]
+ errors.append((start_line, code, e))
+ if is_doctest:
+ print(
+ "Could not format code block starting at "
+ f"line {start_line}:\n{code}\nError: {e}"
+ )
+ if exit_on_error:
+ print("Exiting since --exit-on-error was passed")
+ raise
+ else:
+ print("Ignoring error")
+ elif VERBOSE:
+ print(
+ "Could not format code block starting at "
+ f"line {start_line}:\n---\n{code}\n---Error: {e}"
+ )
+ return [line for line, *_ in input_block]
+ else:
+ formatted_code_lines = formatted.splitlines()
+ padding = input_block[0][2]
+ sql_prefix = input_block[0][3] or ""
+
+ if is_doctest:
+ formatted_lines = [
+ f"{padding}{sql_prefix}>>> {formatted_code_lines[0]}",
+ *(
+ f"{padding}...{' ' if fcl else ''}{fcl}"
+ for fcl in formatted_code_lines[1:]
+ ),
+ ]
+ else:
+ formatted_lines = [
+ f"{padding}{add_padding}{sql_prefix}{formatted_code_lines[0]}",
+ *(
+ f"{padding}{add_padding}{fcl}" if fcl else fcl
+ for fcl in formatted_code_lines[1:]
+ ),
+ ]
+ if not input_block[-1][0] and formatted_lines[-1]:
+ # last line was empty and black removed it. restore it
+ formatted_lines.append("")
+ return formatted_lines
+
+
+format_directive = re.compile(r"^\.\.\s*format\s*:\s*(on|off)\s*$")
+
+doctest_code_start = re.compile(r"^(\s+)({(?:opensql|sql|stop)})?>>>\s?(.+)")
+doctest_code_continue = re.compile(r"^\s+\.\.\.\s?(\s*.*)")
+sql_code_start = re.compile(r"^(\s+){(?:open)?sql}")
+sql_code_stop = re.compile(r"^(\s+){stop}")
+
+start_code_section = re.compile(
+ r"^(((?!\.\.).+::)|(\.\.\s*sourcecode::(.*py.*)?)|(::))$"
+)
+start_space = re.compile(r"^(\s*)[^ ]?")
+
+
+def format_file(
+ file: Path, exit_on_error: bool, check: bool, no_plain: bool
+) -> tuple[bool, int]:
+ buffer = []
+ if not check:
+ print(f"Running file {file} ..", end="")
+ original = file.read_text("utf-8")
+ doctest_block: _Block | None = None
+ plain_block: _Block | None = None
+
+ plain_code_section = False
+ plain_padding = None
+ plain_padding_len = None
+ sql_section = False
+
+ errors = []
+
+ disable_format = False
+ for line_no, line in enumerate(original.splitlines(), 1):
+ # start_code_section requires no spaces at the start
+
+ if start_code_section.match(line.strip()):
+ if plain_block:
+ buffer.extend(
+ _format_block(
+ plain_block, exit_on_error, errors, is_doctest=False
+ )
+ )
+ plain_block = None
+ plain_code_section = True
+ assert not sql_section
+ plain_padding = start_space.match(line).groups()[0]
+ plain_padding_len = len(plain_padding)
+ buffer.append(line)
+ continue
+ elif (
+ plain_code_section
+ and line.strip()
+ and not line.startswith(" " * (plain_padding_len + 1))
+ ):
+ plain_code_section = sql_section = False
+ elif match := format_directive.match(line):
+ disable_format = match.groups()[0] == "off"
+
+ if doctest_block:
+ assert not plain_block
+ if match := doctest_code_continue.match(line):
+ doctest_block.append(
+ (line, line_no, None, None, match.groups()[0])
+ )
+ continue
+ else:
+ buffer.extend(
+ _format_block(
+ doctest_block, exit_on_error, errors, is_doctest=True
+ )
+ )
+ doctest_block = None
+ elif plain_block:
+ if (
+ plain_code_section
+ and not doctest_code_start.match(line)
+ and not sql_code_start.match(line)
+ ):
+ plain_block.append(
+ (line, line_no, None, None, line[plain_padding_len:])
+ )
+ continue
+ else:
+ buffer.extend(
+ _format_block(
+ plain_block, exit_on_error, errors, is_doctest=False
+ )
+ )
+ plain_block = None
+
+ if line and (match := doctest_code_start.match(line)):
+ plain_code_section = sql_section = False
+ if plain_block:
+ buffer.extend(
+ _format_block(
+ plain_block, exit_on_error, errors, is_doctest=False
+ )
+ )
+ plain_block = None
+ padding, code = match.group(1, 3)
+ doctest_block = [(line, line_no, padding, match.group(2), code)]
+ elif (
+ line
+ and plain_code_section
+ and (match := sql_code_start.match(line))
+ ):
+ if plain_block:
+ buffer.extend(
+ _format_block(
+ plain_block, exit_on_error, errors, is_doctest=False
+ )
+ )
+ plain_block = None
+
+ sql_section = True
+ buffer.append(line)
+ elif line and sql_section and (match := sql_code_stop.match(line)):
+ sql_section = False
+ line = line.replace("{stop}", "")
+ assert not doctest_block
+ # start of a plain block
+ if line.strip():
+ plain_block = [
+ (
+ line,
+ line_no,
+ plain_padding,
+ "{stop}",
+ line[plain_padding_len:],
+ )
+ ]
+
+ elif (
+ line
+ and not no_plain
+ and not disable_format
+ and plain_code_section
+ and not sql_section
+ ):
+ assert not doctest_block
+ # start of a plain block
+ plain_block = [
+ (line, line_no, plain_padding, None, line[plain_padding_len:])
+ ]
+ else:
+ buffer.append(line)
+
+ if doctest_block:
+ buffer.extend(
+ _format_block(
+ doctest_block, exit_on_error, errors, is_doctest=True
+ )
+ )
+ if plain_block:
+ buffer.extend(
+ _format_block(plain_block, exit_on_error, errors, is_doctest=False)
+ )
+ if buffer:
+ # if there is nothing in the buffer something strange happened so
+ # don't do anything
+ buffer.append("")
+ updated = "\n".join(buffer)
+ equal = original == updated
+ if not check:
+ print(
+ f"..done. {len(errors)} error(s).",
+ "No changes" if equal else "Changes detected",
+ )
+ if not equal:
+ # write only if there are changes to write
+ file.write_text(updated, "utf-8", newline="\n")
+ else:
+ if not check:
+ print(".. Nothing to write")
+ equal = bool(original) is False
+
+ if check:
+ if not equal:
+ print(f"File {file} would be formatted")
+ return equal, len(errors)
+
+
+def iter_files(directory) -> Iterator[Path]:
+ yield from (home / directory).glob("./**/*.rst")
+
+
+def main(
+ file: str | None,
+ directory: str,
+ exit_on_error: bool,
+ check: bool,
+ no_plain: bool,
+):
+ if file is not None:
+ result = [format_file(Path(file), exit_on_error, check, no_plain)]
+ else:
+ result = [
+ format_file(doc, exit_on_error, check, no_plain)
+ for doc in iter_files(directory)
+ ]
+
+ if check:
+ formatting_error_counts = [e for _, e in result if e]
+ to_reformat = len([b for b, _ in result if not b])
+
+ if not to_reformat and not formatting_error_counts:
+ print("All files are correctly formatted")
+ exit(0)
+ else:
+ print(
+ f"{to_reformat} file(s) would be reformatted;",
+ (
+ f"{sum(formatting_error_counts)} formatting errors "
+ f"reported in {len(formatting_error_counts)} files"
+ )
+ if formatting_error_counts
+ else "no formatting errors reported",
+ )
+
+ # interim, until we fix all formatting errors
+ if not to_reformat:
+ exit(0)
+ exit(1)
+
+
+if __name__ == "__main__":
+ parser = ArgumentParser(
+ description="""Formats code inside docs using black. Supports \
+doctest code blocks and also tries to format plain code block identifies as \
+all indented blocks of at least 4 spaces, unless '--no-plain' is specified.
+
+Plain code block may lead to false positive. To disable formatting on a \
+file section the comment ``.. format: off`` disables formatting until \
+``.. format: on`` is encountered or the file ends.
+Another alterative is to use less than 4 spaces to indent the code block.
+""",
+ formatter_class=RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "-f", "--file", help="Format only this file instead of all docs"
+ )
+ parser.add_argument(
+ "-d",
+ "--directory",
+ help="Find documents in this directory and its sub dirs",
+ default="doc/build",
+ )
+ parser.add_argument(
+ "-c",
+ "--check",
+ help="Don't write the files back, just return the "
+ "status. Return code 0 means nothing would change. "
+ "Return code 1 means some files would be reformatted.",
+ action="store_true",
+ )
+ parser.add_argument(
+ "-e",
+ "--exit-on-error",
+ help="Exit in case of black format error instead of ignoring it. "
+ "This option is only valid for doctest code blocks",
+ action="store_true",
+ )
+ parser.add_argument(
+ "-l",
+ "--project-line-length",
+ help="Configure the line length to the project value instead "
+ "of using the black default of 88",
+ action="store_true",
+ )
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ help="Increase verbosity",
+ action="store_true",
+ )
+ parser.add_argument(
+ "-n",
+ "--no-plain",
+ help="Disable plain code blocks formatting that's more difficult "
+ "to parse compared to doctest code blocks",
+ action="store_true",
+ )
+ args = parser.parse_args()
+
+ config = parse_pyproject_toml(home / "pyproject.toml")
+ BLACK_MODE = Mode(
+ target_versions=set(
+ TargetVersion[val.upper()]
+ for val in config.get("target_version", [])
+ if val != "py27"
+ ),
+ line_length=config.get("line_length", DEFAULT_LINE_LENGTH)
+ if args.project_line_length
+ else DEFAULT_LINE_LENGTH,
+ )
+ VERBOSE = args.verbose
+
+ main(
+ args.file,
+ args.directory,
+ args.exit_on_error,
+ args.check,
+ args.no_plain,
+ )