Added script to format code in the rst documentation using black.
This is also added to the lint tox job to ensure that the code
in the docs is properly formatted.
Change-Id: I799444f22da153484ca5f095d57755762348da40
0.7 Changelog
=============
+
.. changelog::
:version: 0.7.11
:released:
::
- myengine = create_engine('sqlite://')
+ myengine = create_engine("sqlite://")
meta = MetaData(myengine)
from sqlalchemy import *
+
class UTCDateTime(types.TypeDecorator):
pass
from sqlalchemy import *
from sqlalchemy import types
+
class UTCDateTime(types.TypeDecorator):
pass
::
- session.query(User).filter(and_(User.name == 'fred', User.id > 17))
+ session.query(User).filter(and_(User.name == "fred", User.id > 17))
While simple column-based comparisons are no big deal, the
class attributes have some new "higher level" constructs
# return all users who contain a particular address with
# the email_address like '%foo%'
- filter(User.addresses.any(Address.email_address.like('%foo%')))
+ filter(User.addresses.any(Address.email_address.like("%foo%")))
# same, email address equals 'foo@bar.com'. can fall back to keyword
# args for simple comparisons
- filter(User.addresses.any(email_address = 'foo@bar.com'))
+ filter(User.addresses.any(email_address="foo@bar.com"))
# return all Addresses whose user attribute has the username 'ed'
- filter(Address.user.has(name='ed'))
+ filter(Address.user.has(name="ed"))
# return all Addresses whose user attribute has the username 'ed'
# and an id > 5 (mixing clauses with kwargs)
- filter(Address.user.has(User.id > 5, name='ed'))
+ filter(Address.user.has(User.id > 5, name="ed"))
The ``Column`` collection remains available on mapped
classes in the ``.c`` attribute. Note that property-based
::
# standard self-referential TreeNode mapper with backref
- mapper(TreeNode, tree_nodes, properties={
- 'children':relation(TreeNode, backref=backref('parent', remote_side=tree_nodes.id))
- })
+ mapper(
+ TreeNode,
+ tree_nodes,
+ properties={
+ "children": relation(
+ TreeNode, backref=backref("parent", remote_side=tree_nodes.id)
+ )
+ },
+ )
# query for node with child containing "bar" two levels deep
- session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(name='bar')
+ session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(
+ name="bar"
+ )
To add criterion for each table along the way in an aliased
join, you can use ``from_joinpoint`` to keep joining against
# search for the treenode along the path "n1/n12/n122"
# first find a Node with name="n122"
- q = sess.query(Node).filter_by(name='n122')
+ q = sess.query(Node).filter_by(name="n122")
# then join to parent with "n12"
- q = q.join('parent', aliased=True).filter_by(name='n12')
+ q = q.join("parent", aliased=True).filter_by(name="n12")
# join again to the next parent with 'n1'. use 'from_joinpoint'
# so we join from the previous point, instead of joining off the
# root table
- q = q.join('parent', aliased=True, from_joinpoint=True).filter_by(name='n1')
+ q = q.join("parent", aliased=True, from_joinpoint=True).filter_by(name="n1")
node = q.first()
::
- nodes = Table('nodes', metadata,
- Column('id', Integer, primary_key=True),
- Column('parent_id', Integer, ForeignKey('nodes.id')),
- Column('name', String(30)))
+ nodes = Table(
+ "nodes",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("parent_id", Integer, ForeignKey("nodes.id")),
+ Column("name", String(30)),
+ )
+
class TreeNode(object):
pass
- mapper(TreeNode, nodes, properties={
- 'children':relation(TreeNode, lazy=False, join_depth=3)
- })
+
+ mapper(
+ TreeNode,
+ nodes,
+ properties={"children": relation(TreeNode, lazy=False, join_depth=3)},
+ )
So what happens when we say:
def __init__(self, x, y):
self.x = x
self.y = y
+
def __composite_values__(self):
return self.x, self.y
+
def __eq__(self, other):
return other.x == self.x and other.y == self.y
+
def __ne__(self, other):
return not self.__eq__(other)
::
- vertices = Table('vertices', metadata,
- Column('id', Integer, primary_key=True),
- Column('x1', Integer),
- Column('y1', Integer),
- Column('x2', Integer),
- Column('y2', Integer),
- )
+ vertices = Table(
+ "vertices",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("x1", Integer),
+ Column("y1", Integer),
+ Column("x2", Integer),
+ Column("y2", Integer),
+ )
Then, map it ! We'll create a ``Vertex`` object which
stores two ``Point`` objects:
self.start = start
self.end = end
- mapper(Vertex, vertices, properties={
- 'start':composite(Point, vertices.c.x1, vertices.c.y1),
- 'end':composite(Point, vertices.c.x2, vertices.c.y2)
- })
+
+ mapper(
+ Vertex,
+ vertices,
+ properties={
+ "start": composite(Point, vertices.c.x1, vertices.c.y1),
+ "end": composite(Point, vertices.c.x2, vertices.c.y2),
+ },
+ )
Once you've set up your composite type, it's usable just
like any other type:
::
- v = Vertex(Point(3, 4), Point(26,15))
+ v = Vertex(Point(3, 4), Point(26, 15))
session.save(v)
session.flush()
# a Document class which uses a composite Version
# object as primary key
- document = query.get(Version(1, 'a'))
+ document = query.get(Version(1, "a"))
``dynamic_loader()`` relations
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
- mapper(Foo, foo_table, properties={
- 'bar':relation(Bar)
- })
- mapper(Bar, bar_table, properties={
- 'bat':relation(Bat)
- })
+ mapper(Foo, foo_table, properties={"bar": relation(Bar)})
+ mapper(Bar, bar_table, properties={"bat": relation(Bat)})
mapper(Bat, bat_table)
# eager load bar and bat
- session.query(Foo).options(eagerload_all('bar.bat')).filter(...).all()
+ session.query(Foo).options(eagerload_all("bar.bat")).filter(...).all()
New Collection API
^^^^^^^^^^^^^^^^^^
# use a dictionary relation keyed by a column
relation(Item, collection_class=column_mapped_collection(items.c.keyword))
# or named attribute
- relation(Item, collection_class=attribute_mapped_collection('keyword'))
+ relation(Item, collection_class=attribute_mapped_collection("keyword"))
# or any function you like
relation(Item, collection_class=mapped_collection(lambda entity: entity.a + entity.b))
::
- mapper(User, users, properties={
- 'fullname': column_property((users.c.firstname + users.c.lastname).label('fullname')),
- 'numposts': column_property(
- select([func.count(1)], users.c.id==posts.c.user_id).correlate(users).label('posts')
- )
- })
+ mapper(
+ User,
+ users,
+ properties={
+ "fullname": column_property(
+ (users.c.firstname + users.c.lastname).label("fullname")
+ ),
+ "numposts": column_property(
+ select([func.count(1)], users.c.id == posts.c.user_id)
+ .correlate(users)
+ .label("posts")
+ ),
+ },
+ )
a typical query looks like:
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
- engine = create_engine('myengine://')
+ engine = create_engine("myengine://")
Session = sessionmaker(bind=engine, autoflush=True, transactional=True)
# use the new Session() freely
sess.save(someobject)
sess.flush()
-
If you need to post-configure your Session, say with an
engine, add it later with ``configure()``:
Session = scoped_session(sessionmaker(autoflush=True, transactional=True))
Session.configure(bind=engine)
- u = User(name='wendy')
+ u = User(name="wendy")
sess = Session()
sess.save(u)
sess2 = Session()
assert sess is sess2
-
When using a thread-local ``Session``, the returned class
has all of ``Session's`` interface implemented as
classmethods, and "assignmapper"'s functionality is
# "assignmapper"-like functionality available via ScopedSession.mapper
Session.mapper(User, users_table)
- u = User(name='wendy')
+ u = User(name="wendy")
Session.commit()
-
Sessions are again Weak Referencing By Default
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Session = sessionmaker(bind=engine, autoflush=True, transactional=True)
- u = User(name='wendy')
+ u = User(name="wendy")
sess = Session()
sess.save(u)
# wendy is flushed, comes right back from a query
- wendy = sess.query(User).filter_by(name='wendy').one()
+ wendy = sess.query(User).filter_by(name="wendy").one()
Transactional methods moved onto sessions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# use the session
- sess.commit() # commit transaction
+ sess.commit() # commit transaction
Sharing a ``Session`` with an enclosing engine-level (i.e.
non-ORM) transaction is easy:
::
- b = bindparam('foo', type_=String)
+ b = bindparam("foo", type_=String)
in\_ Function Changed to Accept Sequence or Selectable
------------------------------------------------------
::
- result = engine.execute(text("begin foo(:x, :y, :z); end;", bindparams=[bindparam('x', Numeric), outparam('y', Numeric), outparam('z', Numeric)]), x=5)
- assert result.out_parameters == {'y':10, 'z':75}
+ result = engine.execute(
+ text(
+ "begin foo(:x, :y, :z); end;",
+ bindparams=[
+ bindparam("x", Numeric),
+ outparam("y", Numeric),
+ outparam("z", Numeric),
+ ],
+ ),
+ x=5,
+ )
+ assert result.out_parameters == {"y": 10, "z": 75}
Connection-bound ``MetaData``, ``Sessions``
-------------------------------------------
::
- session.query(User.name, func.count(Address.id).label("numaddresses")).join(Address).group_by(User.name)
+ session.query(User.name, func.count(Address.id).label("numaddresses")).join(
+ Address
+ ).group_by(User.name)
The tuples returned by any multi-column/entity query are
*named*' tuples:
::
- for row in session.query(User.name, func.count(Address.id).label('numaddresses')).join(Address).group_by(User.name):
- print("name", row.name, "number", row.numaddresses)
+ for row in (
+ session.query(User.name, func.count(Address.id).label("numaddresses"))
+ .join(Address)
+ .group_by(User.name)
+ ):
+ print("name", row.name, "number", row.numaddresses)
``Query`` has a ``statement`` accessor, as well as a
``subquery()`` method which allow ``Query`` to be used to
::
- mapper(User, users, properties={
- 'addresses':relation(Address, order_by=addresses.c.id)
- }, order_by=users.c.id)
+ mapper(
+ User,
+ users,
+ properties={"addresses": relation(Address, order_by=addresses.c.id)},
+ order_by=users.c.id,
+ )
To set ordering on a backref, use the ``backref()``
function:
::
- 'keywords':relation(Keyword, secondary=item_keywords,
- order_by=keywords.c.name, backref=backref('items', order_by=items.c.id))
+ "keywords": relation(
+ Keyword,
+ secondary=item_keywords,
+ order_by=keywords.c.name,
+ backref=backref("items", order_by=items.c.id),
+ )
Using declarative ? To help with the new ``order_by``
requirement, ``order_by`` and friends can now be set using
class MyClass(MyDeclarativeBase):
...
- 'addresses':relation("Address", order_by="Address.id")
+ "addresses": relation("Address", order_by="Address.id")
It's generally a good idea to set ``order_by`` on
``relation()s`` which load list-based collections of
convert_result_value methods
"""
+
def bind_processor(self, dialect):
def convert(value):
return self.convert_bind_param(value, dialect)
+
return convert
def result_processor(self, dialect):
def convert(value):
return self.convert_result_value(value, dialect)
+
return convert
def convert_result_value(self, value, dialect):
::
- create_engine('postgresql://scott:tiger@localhost/test')
+ create_engine("postgresql://scott:tiger@localhost/test")
However to specify a specific DBAPI backend such as pg8000,
add it to the "protocol" section of the URL using a plus
::
- create_engine('postgresql+pg8000://scott:tiger@localhost/test')
+ create_engine("postgresql+pg8000://scott:tiger@localhost/test")
Important Dialect Links:
::
- from sqlalchemy.dialects.postgresql import INTEGER, BIGINT, SMALLINT,\
- VARCHAR, MACADDR, DATE, BYTEA
+ from sqlalchemy.dialects.postgresql import (
+ INTEGER,
+ BIGINT,
+ SMALLINT,
+ VARCHAR,
+ MACADDR,
+ DATE,
+ BYTEA,
+ )
Above, ``INTEGER`` is actually the plain ``INTEGER`` type
from ``sqlalchemy.types``, but the PG dialect makes it
::
>>> from sqlalchemy.sql import column
- >>> column('foo') == 5
+ >>> column("foo") == 5
<sqlalchemy.sql.expression._BinaryExpression object at 0x1252490>
This so that Python expressions produce SQL expressions when
::
- >>> str(column('foo') == 5)
+ >>> str(column("foo") == 5)
'foo = :foo_1'
But what happens if we say this?
::
- >>> if column('foo') == 5:
+ >>> if column("foo") == 5:
... print("yes")
- ...
In previous versions of SQLAlchemy, the returned
``_BinaryExpression`` was a plain Python object which
::
- >>> bool(column('foo') == 5)
+ >>> bool(column("foo") == 5)
False
- >>> bool(column('foo') == column('foo'))
+ >>> bool(column("foo") == column("foo"))
False
- >>> c = column('foo')
+ >>> c = column("foo")
>>> bool(c == c)
True
>>>
::
- connection.execute(table.insert(), {'data':'row1'}, {'data':'row2'}, {'data':'row3'})
+ connection.execute(table.insert(), {"data": "row1"}, {"data": "row2"}, {"data": "row3"})
When the ``Connection`` object sends off the given
``insert()`` construct for compilation, it passes to the
::
- connection.execute(table.insert(),
- {'timestamp':today, 'data':'row1'},
- {'timestamp':today, 'data':'row2'},
- {'data':'row3'})
+ connection.execute(
+ table.insert(),
+ {"timestamp": today, "data": "row1"},
+ {"timestamp": today, "data": "row2"},
+ {"data": "row3"},
+ )
Because the third row does not specify the 'timestamp'
column. Previous versions of SQLAlchemy would simply insert
from sqlalchemy.schema import DDL
- DDL('CREATE TRIGGER users_trigger ...').execute_at('after-create', metadata)
+ DDL("CREATE TRIGGER users_trigger ...").execute_at("after-create", metadata)
Now the full suite of DDL constructs are available under the
same system, including those for CREATE TABLE, ADD
from sqlalchemy.schema import Constraint, AddConstraint
- AddContraint(CheckConstraint("value > 5")).execute_at('after-create', mytable)
+ AddContraint(CheckConstraint("value > 5")).execute_at("after-create", mytable)
Additionally, all the DDL objects are now regular
``ClauseElement`` objects just like any other SQLAlchemy
from sqlalchemy.schema import DDLElement
from sqlalchemy.ext.compiler import compiles
- class AlterColumn(DDLElement):
+ class AlterColumn(DDLElement):
def __init__(self, column, cmd):
self.column = column
self.cmd = cmd
+
@compiles(AlterColumn)
def visit_alter_column(element, compiler, **kw):
return "ALTER TABLE %s ALTER COLUMN %s %s ..." % (
element.column.table.name,
element.column.name,
- element.cmd
+ element.cmd,
)
+
engine.execute(AlterColumn(table.c.mycolumn, "SET DEFAULT 'test'"))
Deprecated/Removed Schema Elements
::
from sqlalchemy.engine.reflection import Inspector
+
insp = Inspector.from_engine(my_engine)
print(insp.get_schema_names())
::
- my_engine = create_engine('postgresql://...')
+ my_engine = create_engine("postgresql://...")
pg_insp = Inspector.from_engine(my_engine)
- print(pg_insp.get_table_oid('my_table'))
+ print(pg_insp.get_table_oid("my_table"))
RETURNING Support
=================
result = connection.execute(
- table.insert().values(data='some data').returning(table.c.id, table.c.timestamp)
- )
+ table.insert().values(data="some data").returning(table.c.id, table.c.timestamp)
+ )
row = result.first()
- print("ID:", row['id'], "Timestamp:", row['timestamp'])
+ print("ID:", row["id"], "Timestamp:", row["timestamp"])
The implementation of RETURNING across the four supported
backends varies wildly, in the case of Oracle requiring an
def process_result_value(self, value, dialect):
if isinstance(value, unicode):
- value = value.encode('utf-8')
+ value = value.encode("utf-8")
return value
Note that the ``assert_unicode`` flag is now deprecated.
::
mapper(Child, child)
- mapper(Parent, parent, properties={
- 'child':relationship(Child, lazy='joined', innerjoin=True)
- })
+ mapper(
+ Parent,
+ parent,
+ properties={"child": relationship(Child, lazy="joined", innerjoin=True)},
+ )
At query time level:
::
- query.join(SomeClass, SomeClass.id==ParentClass.some_id)
+ query.join(SomeClass, SomeClass.id == ParentClass.some_id)
In 0.6, this usage was considered to be an error, because
``join()`` accepts multiple arguments corresponding to
::
- Table('mytable', metadata,
- Column('id',Integer, primary_key=True),
- Column('name', String(50), nullable=False),
- Index('idx_name', 'name')
+ Table(
+ "mytable",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50), nullable=False),
+ Index("idx_name", "name"),
)
The primary rationale here is for the benefit of declarative
::
class HasNameMixin(object):
- name = Column('name', String(50), nullable=False)
+ name = Column("name", String(50), nullable=False)
+
@declared_attr
def __table_args__(cls):
- return (Index('name'), {})
+ return (Index("name"), {})
+
class User(HasNameMixin, Base):
- __tablename__ = 'user'
- id = Column('id', Integer, primary_key=True)
+ __tablename__ = "user"
+ id = Column("id", Integer, primary_key=True)
`Indexes <https://www.sqlalchemy.org/docs/07/core/schema.html
#indexes>`_
from sqlalchemy.sql import table, column, select, func
- empsalary = table('empsalary',
- column('depname'),
- column('empno'),
- column('salary'))
+ empsalary = table("empsalary", column("depname"), column("empno"), column("salary"))
- s = select([
+ s = select(
+ [
empsalary,
- func.avg(empsalary.c.salary).
- over(partition_by=empsalary.c.depname).
- label('avg')
- ])
+ func.avg(empsalary.c.salary)
+ .over(partition_by=empsalary.c.depname)
+ .label("avg"),
+ ]
+ )
print(s)
::
- query.from_self(func.count(literal_column('1'))).scalar()
+ query.from_self(func.count(literal_column("1"))).scalar()
Previously, internal logic attempted to rewrite the columns
clause of the query itself, and upon detection of a
::
from sqlalchemy import func
+
session.query(func.count(MyClass.id)).scalar()
or for ``count(*)``:
::
from sqlalchemy import func, literal_column
- session.query(func.count(literal_column('*'))).select_from(MyClass).scalar()
+
+ session.query(func.count(literal_column("*"))).select_from(MyClass).scalar()
LIMIT/OFFSET clauses now use bind parameters
--------------------------------------------
from sqlalchemy import select, func
from sqlalchemy.orm import mapper
+
class Subset(object):
pass
+
+
selectable = select(["x", "y", "z"]).select_from(func.some_db_function()).alias()
mapper(Subset, selectable, primary_key=[selectable.c.x])
::
- Table('mytable', metadata,
+ Table(
+ "mytable",
+ metadata,
# ....
-
- Column('pickled_data', PickleType(mutable=True))
+ Column("pickled_data", PickleType(mutable=True)),
)
The ``mutable=True`` flag is being phased out, in favor of
::
- foobar = foo.join(bar, foo.c.id==bar.c.foo_id)
+ foobar = foo.join(bar, foo.c.id == bar.c.foo_id)
mapper(FooBar, foobar)
This because the ``mapper()`` refuses to guess what column
::
- foobar = foo.join(bar, foo.c.id==bar.c.foo_id)
- mapper(FooBar, foobar, properties={
- 'id':[foo.c.id, bar.c.id]
- })
+ foobar = foo.join(bar, foo.c.id == bar.c.foo_id)
+ mapper(FooBar, foobar, properties={"id": [foo.c.id, bar.c.id]})
:ticket:`1896`
::
- select([mytable], distinct='ALL', prefixes=['HIGH_PRIORITY'])
+ select([mytable], distinct="ALL", prefixes=["HIGH_PRIORITY"])
The ``prefixes`` keyword or ``prefix_with()`` method should
be used for non-standard or unusual prefixes:
::
- select([mytable]).prefix_with('HIGH_PRIORITY', 'ALL')
+ select([mytable]).prefix_with("HIGH_PRIORITY", "ALL")
``useexisting`` superseded by ``extend_existing`` and ``keep_existing``
-----------------------------------------------------------------------
::
class Folder(Base):
- __tablename__ = 'folder'
+ __tablename__ = "folder"
__table_args__ = (
- ForeignKeyConstraint(
- ['account_id', 'parent_id'],
- ['folder.account_id', 'folder.folder_id']),
+ ForeignKeyConstraint(
+ ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"]
+ ),
)
account_id = Column(Integer, primary_key=True)
parent_id = Column(Integer)
name = Column(String)
- parent_folder = relationship("Folder",
- backref="child_folders",
- remote_side=[account_id, folder_id]
- )
+ parent_folder = relationship(
+ "Folder", backref="child_folders", remote_side=[account_id, folder_id]
+ )
Above, the ``Folder`` refers to its parent ``Folder``
joining from ``account_id`` to itself, and ``parent_id``
expected in most cases::
class HostEntry(Base):
- __tablename__ = 'host_entry'
+ __tablename__ = "host_entry"
id = Column(Integer, primary_key=True)
ip_address = Column(INET)
content = Column(String(50))
# relationship() using explicit foreign_keys, remote_side
- parent_host = relationship("HostEntry",
- primaryjoin=ip_address == cast(content, INET),
- foreign_keys=content,
- remote_side=ip_address
- )
+ parent_host = relationship(
+ "HostEntry",
+ primaryjoin=ip_address == cast(content, INET),
+ foreign_keys=content,
+ remote_side=ip_address,
+ )
The new :func:`_orm.relationship` mechanics make use of a
SQLAlchemy concept known as :term:`annotations`. These annotations
from sqlalchemy.orm import foreign, remote
+
class HostEntry(Base):
- __tablename__ = 'host_entry'
+ __tablename__ = "host_entry"
id = Column(Integer, primary_key=True)
ip_address = Column(INET)
# relationship() using explicit foreign() and remote() annotations
# in lieu of separate arguments
- parent_host = relationship("HostEntry",
- primaryjoin=remote(ip_address) == \
- cast(foreign(content), INET),
- )
-
+ parent_host = relationship(
+ "HostEntry",
+ primaryjoin=remote(ip_address) == cast(foreign(content), INET),
+ )
.. seealso::
A walkthrough of some key capabilities follows::
>>> class User(Base):
- ... __tablename__ = 'user'
+ ... __tablename__ = "user"
... id = Column(Integer, primary_key=True)
... name = Column(String)
... name_syn = synonym(name)
... addresses = relationship("Address")
- ...
>>> # universal entry point is inspect()
>>> b = inspect(User)
"user".id = address.user_id
>>> # inspect works on instances
- >>> u1 = User(id=3, name='x')
+ >>> u1 = User(id=3, name="x")
>>> b = inspect(u1)
>>> # it returns the InstanceState
::
from sqlalchemy.orm import with_polymorphic
+
palias = with_polymorphic(Person, [Engineer, Manager])
- session.query(Company).\
- join(palias, Company.employees).\
- filter(or_(Engineer.language=='java', Manager.hair=='pointy'))
+ session.query(Company).join(palias, Company.employees).filter(
+ or_(Engineer.language == "java", Manager.hair == "pointy")
+ )
.. seealso::
# use eager loading in conjunction with with_polymorphic targets
Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True)
- q = s.query(DataContainer).\
- join(DataContainer.jobs.of_type(Job_P)).\
- options(contains_eager(DataContainer.jobs.of_type(Job_P)))
+ q = (
+ s.query(DataContainer)
+ .join(DataContainer.jobs.of_type(Job_P))
+ .options(contains_eager(DataContainer.jobs.of_type(Job_P)))
+ )
The method now works equally well in most places a regular relationship
attribute is accepted, including with loader functions like
# use eager loading in conjunction with with_polymorphic targets
Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True)
- q = s.query(DataContainer).\
- join(DataContainer.jobs.of_type(Job_P)).\
- options(contains_eager(DataContainer.jobs.of_type(Job_P)))
+ q = (
+ s.query(DataContainer)
+ .join(DataContainer.jobs.of_type(Job_P))
+ .options(contains_eager(DataContainer.jobs.of_type(Job_P)))
+ )
# pass subclasses to eager loads (implicitly applies with_polymorphic)
- q = s.query(ParentThing).\
- options(
- joinedload_all(
- ParentThing.container,
- DataContainer.jobs.of_type(SubJob)
- ))
+ q = s.query(ParentThing).options(
+ joinedload_all(ParentThing.container, DataContainer.jobs.of_type(SubJob))
+ )
# control self-referential aliasing with any()/has()
Job_A = aliased(Job)
- q = s.query(Job).join(DataContainer.jobs).\
- filter(
- DataContainer.jobs.of_type(Job_A).\
- any(and_(Job_A.id < Job.id, Job_A.type=='fred')
- )
- )
+ q = (
+ s.query(Job)
+ .join(DataContainer.jobs)
+ .filter(
+ DataContainer.jobs.of_type(Job_A).any(
+ and_(Job_A.id < Job.id, Job_A.type == "fred")
+ )
+ )
+ )
.. seealso::
Base = declarative_base()
+
@event.listens_for("load", Base, propagate=True)
def on_load(target, context):
print("New instance loaded:", target)
+
# on_load() will be applied to SomeClass
class SomeClass(Base):
- __tablename__ = 'sometable'
+ __tablename__ = "sometable"
# ...
class Snack(Base):
# ...
- peanuts = relationship("nuts.Peanut",
- primaryjoin="nuts.Peanut.snack_id == Snack.id")
+ peanuts = relationship(
+ "nuts.Peanut", primaryjoin="nuts.Peanut.snack_id == Snack.id"
+ )
The resolution allows that any full or partial
disambiguating package name can be used. If the
class ReflectedOne(DeferredReflection, Base):
__abstract__ = True
+
class ReflectedTwo(DeferredReflection, Base):
__abstract__ = True
+
class MyClass(ReflectedOne):
- __tablename__ = 'mytable'
+ __tablename__ = "mytable"
+
class MyOtherClass(ReflectedOne):
- __tablename__ = 'myothertable'
+ __tablename__ = "myothertable"
+
class YetAnotherClass(ReflectedTwo):
- __tablename__ = 'yetanothertable'
+ __tablename__ = "yetanothertable"
+
ReflectedOne.prepare(engine_one)
ReflectedTwo.prepare(engine_two)
a FROM clause (or equivalent, depending on backend)
against ``SomeOtherEntity``::
- query(SomeEntity).\
- filter(SomeEntity.id==SomeOtherEntity.id).\
- filter(SomeOtherEntity.foo=='bar').\
- update({"data":"x"})
+ query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter(
+ SomeOtherEntity.foo == "bar"
+ ).update({"data": "x"})
In particular, updates to joined-inheritance
entities are supported, provided the target of the UPDATE is local to the
::
- query(Engineer).\
- filter(Person.id==Engineer.id).\
- filter(Person.name=='dilbert').\
- update({"engineer_data":"java"})
+ query(Engineer).filter(Person.id == Engineer.id).filter(
+ Person.name == "dilbert"
+ ).update({"engineer_data": "java"})
would produce:
from sqlalchemy.types import Numeric
from sqlalchemy.sql import func
+
class CustomNumeric(Numeric):
class comparator_factory(Numeric.Comparator):
def log(self, other):
::
- data = Table('data', metadata,
- Column('id', Integer, primary_key=True),
- Column('x', CustomNumeric(10, 5)),
- Column('y', CustomNumeric(10, 5))
- )
+ data = Table(
+ "data",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("x", CustomNumeric(10, 5)),
+ Column("y", CustomNumeric(10, 5)),
+ )
stmt = select([data.c.x.log(data.c.y)]).where(data.c.x.log(2) < value)
print(conn.execute(stmt).fetchall())
-
New features which have come from this immediately include
support for PostgreSQL's HSTORE type, as well as new
operations associated with PostgreSQL's ARRAY
not the same thing as the usual ``executemany()`` style of INSERT which
remains unchanged::
- users.insert().values([
- {"name": "some name"},
- {"name": "some other name"},
- {"name": "yet another name"},
- ])
+ users.insert().values(
+ [
+ {"name": "some name"},
+ {"name": "some other name"},
+ {"name": "yet another name"},
+ ]
+ )
.. seealso::
from sqlalchemy.types import String
from sqlalchemy import func, Table, Column, MetaData
+
class LowerString(String):
def bind_expression(self, bindvalue):
return func.lower(bindvalue)
def column_expression(self, col):
return func.lower(col)
+
metadata = MetaData()
- test_table = Table(
- 'test_table',
- metadata,
- Column('data', LowerString)
- )
+ test_table = Table("test_table", metadata, Column("data", LowerString))
Above, the ``LowerString`` type defines a SQL expression that will be emitted
whenever the ``test_table.c.data`` column is rendered in the columns
clause of a SELECT statement::
- >>> print(select([test_table]).where(test_table.c.data == 'HI'))
+ >>> print(select([test_table]).where(test_table.c.data == "HI"))
SELECT lower(test_table.data) AS data
FROM test_table
WHERE test_table.data = lower(:data_1)
signatures = relationship("Signature", lazy=False)
+
class Signature(Base):
__tablename__ = "signature"
id = Column(Integer, primary_key=True)
sig_count = column_property(
- select([func.count('*')]).\
- where(SnortEvent.signature == id).
- correlate_except(SnortEvent)
- )
+ select([func.count("*")])
+ .where(SnortEvent.signature == id)
+ .correlate_except(SnortEvent)
+ )
.. seealso::
from sqlalchemy.dialects.postgresql import HSTORE
- data = Table('data_table', metadata,
- Column('id', Integer, primary_key=True),
- Column('hstore_data', HSTORE)
- )
-
- engine.execute(
- select([data.c.hstore_data['some_key']])
- ).scalar()
+ data = Table(
+ "data_table",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("hstore_data", HSTORE),
+ )
- engine.execute(
- select([data.c.hstore_data.matrix()])
- ).scalar()
+ engine.execute(select([data.c.hstore_data["some_key"]])).scalar()
+ engine.execute(select([data.c.hstore_data.matrix()])).scalar()
.. seealso::
The type also introduces new operators, using the new type-specific
operator framework. New operations include indexed access::
- result = conn.execute(
- select([mytable.c.arraycol[2]])
- )
+ result = conn.execute(select([mytable.c.arraycol[2]]))
slice access in SELECT::
- result = conn.execute(
- select([mytable.c.arraycol[2:4]])
- )
+ result = conn.execute(select([mytable.c.arraycol[2:4]]))
slice updates in UPDATE::
- conn.execute(
- mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]})
- )
+ conn.execute(mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]}))
freestanding array literals::
>>> from sqlalchemy.dialects import postgresql
- >>> conn.scalar(
- ... select([
- ... postgresql.array([1, 2]) + postgresql.array([3, 4, 5])
- ... ])
- ... )
+ >>> conn.scalar(select([postgresql.array([1, 2]) + postgresql.array([3, 4, 5])]))
[1, 2, 3, 4, 5]
array concatenation, where below, the right side ``[4, 5, 6]`` is coerced into an array literal::
::
- Column('sometimestamp', sqlite.DATETIME(truncate_microseconds=True))
- Column('sometimestamp', sqlite.DATETIME(
- storage_format=(
- "%(year)04d%(month)02d%(day)02d"
- "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d"
- ),
- regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})"
- )
- )
- Column('somedate', sqlite.DATE(
- storage_format="%(month)02d/%(day)02d/%(year)04d",
- regexp="(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)",
- )
- )
+ Column("sometimestamp", sqlite.DATETIME(truncate_microseconds=True))
+ Column(
+ "sometimestamp",
+ sqlite.DATETIME(
+ storage_format=(
+ "%(year)04d%(month)02d%(day)02d"
+ "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d"
+ ),
+ regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})",
+ ),
+ )
+ Column(
+ "somedate",
+ sqlite.DATE(
+ storage_format="%(month)02d/%(day)02d/%(year)04d",
+ regexp="(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)",
+ ),
+ )
Huge thanks to Nate Dub for the sprinting on this at Pycon 2012.
on all :class:`.String` types and will render on any backend, including
when features such as :meth:`_schema.MetaData.create_all` and :func:`.cast` is used::
- >>> stmt = select([cast(sometable.c.somechar, String(20, collation='utf8'))])
+ >>> stmt = select([cast(sometable.c.somechar, String(20, collation="utf8"))])
>>> print(stmt)
SELECT CAST(sometable.somechar AS VARCHAR(20) COLLATE "utf8") AS anon_1
FROM sometable
Base = declarative_base()
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String(64))
+
class UserKeyword(Base):
- __tablename__ = 'user_keyword'
- user_id = Column(Integer, ForeignKey('user.id'), primary_key=True)
- keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True)
+ __tablename__ = "user_keyword"
+ user_id = Column(Integer, ForeignKey("user.id"), primary_key=True)
+ keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True)
- user = relationship(User,
- backref=backref("user_keywords",
- cascade="all, delete-orphan")
- )
+ user = relationship(
+ User, backref=backref("user_keywords", cascade="all, delete-orphan")
+ )
- keyword = relationship("Keyword",
- backref=backref("user_keywords",
- cascade="all, delete-orphan")
- )
+ keyword = relationship(
+ "Keyword", backref=backref("user_keywords", cascade="all, delete-orphan")
+ )
# uncomment this to enable the old behavior
# __mapper_args__ = {"legacy_is_orphan": True}
+
class Keyword(Base):
- __tablename__ = 'keyword'
+ __tablename__ = "keyword"
id = Column(Integer, primary_key=True)
- keyword = Column('keyword', String(64))
+ keyword = Column("keyword", String(64))
+
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
session.commit()
-
:ticket:`2655`
The after_attach event fires after the item is associated with the Session instead of before; before_attach added
@event.listens_for(Session, "before_attach")
def before_attach(session, instance):
- instance.some_necessary_attribute = session.query(Widget).\
- filter_by(instance.widget_name).\
- first()
+ instance.some_necessary_attribute = (
+ session.query(Widget).filter_by(instance.widget_name).first()
+ )
:ticket:`2464`
::
- subq = session.query(Entity.value).\
- filter(Entity.id==Parent.entity_id).\
- correlate(Parent).\
- as_scalar()
- session.query(Parent).filter(subq=="some value")
+ subq = (
+ session.query(Entity.value)
+ .filter(Entity.id == Parent.entity_id)
+ .correlate(Parent)
+ .as_scalar()
+ )
+ session.query(Parent).filter(subq == "some value")
This was the opposite behavior of a plain ``select()``
construct which would assume auto-correlation by default.
::
- subq = session.query(Entity.value).\
- filter(Entity.id==Parent.entity_id).\
- as_scalar()
- session.query(Parent).filter(subq=="some value")
+ subq = session.query(Entity.value).filter(Entity.id == Parent.entity_id).as_scalar()
+ session.query(Parent).filter(subq == "some value")
like in ``select()``, correlation can be disabled by calling
``query.correlate(None)`` or manually set by passing an
from sqlalchemy.sql import table, column, select
- t1 = table('t1', column('x'))
- t2 = table('t2', column('y'))
+ t1 = table("t1", column("x"))
+ t2 = table("t2", column("y"))
s = select([t1, t2]).correlate(t1)
print(s)
::
- scalar_subq = select([someothertable.c.id]).where(someothertable.c.data=='foo')
- select([sometable]).where(sometable.c.id==scalar_subq)
+ scalar_subq = select([someothertable.c.id]).where(someothertable.c.data == "foo")
+ select([sometable]).where(sometable.c.id == scalar_subq)
SQL Server doesn't allow an equality comparison to a scalar
SELECT, that is, "x = (SELECT something)". The MSSQL dialect
::
# before 0.8
- table1 = Table('t1', metadata,
- Column('col1', Integer, key='column_one')
- )
+ table1 = Table("t1", metadata, Column("col1", Integer, key="column_one"))
s = select([table1])
- s.c.column_one # would be accessible like this
- s.c.col1 # would raise AttributeError
+ s.c.column_one # would be accessible like this
+ s.c.col1 # would raise AttributeError
s = select([table1]).apply_labels()
- s.c.table1_column_one # would raise AttributeError
- s.c.table1_col1 # would be accessible like this
+ s.c.table1_column_one # would raise AttributeError
+ s.c.table1_col1 # would be accessible like this
In 0.8, :attr:`_schema.Column.key` is honored in both cases:
::
# with 0.8
- table1 = Table('t1', metadata,
- Column('col1', Integer, key='column_one')
- )
+ table1 = Table("t1", metadata, Column("col1", Integer, key="column_one"))
s = select([table1])
- s.c.column_one # works
- s.c.col1 # AttributeError
+ s.c.column_one # works
+ s.c.col1 # AttributeError
s = select([table1]).apply_labels()
- s.c.table1_column_one # works
- s.c.table1_col1 # AttributeError
+ s.c.table1_column_one # works
+ s.c.table1_col1 # AttributeError
All other behavior regarding "name" and "key" are the same,
including that the rendered SQL will still use the form
::
- t1 = table('t1', column('x'))
- t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z"
+ t1 = table("t1", column("x"))
+ t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z"
:ticket:`2415`
::
>>> row = result.fetchone()
- >>> row['foo'] == row['FOO'] == row['Foo']
+ >>> row["foo"] == row["FOO"] == row["Foo"]
True
This was for the benefit of a few dialects which in the
type maintained by that composite, rather than being broken out into individual
columns. Using the mapping setup at :ref:`mapper_composite`::
- >>> session.query(Vertex.start, Vertex.end).\
- ... filter(Vertex.start == Point(3, 4)).all()
+ >>> session.query(Vertex.start, Vertex.end).filter(Vertex.start == Point(3, 4)).all()
[(Point(x=3, y=4), Point(x=5, y=6))]
This change is backwards-incompatible with code that expects the individual attribute
accessor::
- >>> session.query(Vertex.start.clauses, Vertex.end.clauses).\
- ... filter(Vertex.start == Point(3, 4)).all()
+ >>> session.query(Vertex.start.clauses, Vertex.end.clauses).filter(
+ ... Vertex.start == Point(3, 4)
+ ... ).all()
[(3, 4, 5, 6)]
.. seealso::
select_stmt = select([User]).where(User.id == 7).alias()
- q = session.query(User).\
- join(select_stmt, User.id == select_stmt.c.id).\
- filter(User.name == 'ed')
+ q = (
+ session.query(User)
+ .join(select_stmt, User.id == select_stmt.c.id)
+ .filter(User.name == "ed")
+ )
The above statement predictably renders SQL like the following::
JOIN, the documentation would lead us to believe we could use
:meth:`_query.Query.select_from` to do so::
- q = session.query(User).\
- select_from(select_stmt).\
- join(User, User.id == select_stmt.c.id).\
- filter(User.name == 'ed')
+ q = (
+ session.query(User)
+ .select_from(select_stmt)
+ .join(User, User.id == select_stmt.c.id)
+ .filter(User.name == "ed")
+ )
However, in version 0.8 and earlier, the above use of :meth:`_query.Query.select_from`
would apply the ``select_stmt`` to **replace** the ``User`` entity, as it
select_stmt = select([User]).where(User.id == 7)
user_from_stmt = aliased(User, select_stmt.alias())
- q = session.query(user_from_stmt).filter(user_from_stmt.name == 'ed')
+ q = session.query(user_from_stmt).filter(user_from_stmt.name == "ed")
So with SQLAlchemy 0.9, our query that selects from ``select_stmt`` produces
the SQL we expect::
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(Integer, ForeignKey('a.id'))
+ a_id = Column(Integer, ForeignKey("a.id"))
a = relationship("A", backref=backref("bs", viewonly=True))
+
e = create_engine("sqlite://")
Base.metadata.create_all(e)
Consider this mapping::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
- b_id = Column(Integer, ForeignKey('b.id'), primary_key=True)
+ b_id = Column(Integer, ForeignKey("b.id"), primary_key=True)
b = relationship("B")
b_value = association_proxy("b", "value")
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
value = Column(String)
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
b = relationship("B", uselist=False)
bname = association_proxy("b", "name")
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(Integer, ForeignKey('a.id'))
+ a_id = Column(Integer, ForeignKey("a.id"))
name = Column(String)
+
a1 = A()
# this is how m2o's always have worked
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String)
+
e = create_engine("sqlite://", echo=True)
Base.metadata.create_all(e)
sess = Session(e)
- a1 = A(data='a1')
+ a1 = A(data="a1")
sess.add(a1)
sess.commit() # a1 is now expired
assert inspect(a1).attrs.data.history == (None, None, None)
# in 0.8, this would fail to load the unloaded state.
- assert attributes.get_history(a1, 'data') == ((), ['a1',], ())
+ assert attributes.get_history(a1, "data") == (
+ (),
+ [
+ "a1",
+ ],
+ (),
+ )
# load_history() is now equivalent to get_history() with
# passive=PASSIVE_OFF ^ INIT_OK
- assert inspect(a1).attrs.data.load_history() == ((), ['a1',], ())
+ assert inspect(a1).attrs.data.load_history() == (
+ (),
+ [
+ "a1",
+ ],
+ (),
+ )
:ticket:`2787`
from sqlalchemy.dialects.mysql import INTEGER
d = Date().with_variant(
- DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"),
- "sqlite"
- )
+ DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"), "sqlite"
+ )
- i = Integer().with_variant(
- INTEGER(display_width=5),
- "mysql"
- )
+ i = Integer().with_variant(INTEGER(display_width=5), "mysql")
:meth:`.TypeEngine.with_variant` isn't new, it was added in SQLAlchemy
0.7.2. So code that is running on the 0.8 series can be corrected to use
Previously, an expression like the following::
- print((column('x') == 'somevalue').collate("en_EN"))
+ print((column("x") == "somevalue").collate("en_EN"))
would produce an expression like this::
:meth:`.ColumnOperators.collate` operator is being applied to the right-hand
column, as follows::
- print(column('x') == literal('somevalue').collate("en_EN"))
+ print(column("x") == literal("somevalue").collate("en_EN"))
In 0.8, this produces::
generated::
>>> # 0.8
- >>> print(column('x').collate('en_EN').desc())
+ >>> print(column("x").collate("en_EN").desc())
(x COLLATE en_EN) DESC
>>> # 0.9
- >>> print(column('x').collate('en_EN').desc())
+ >>> print(column("x").collate("en_EN").desc())
x COLLATE en_EN DESC
:ticket:`2879`
signs within the enumerated values::
>>> from sqlalchemy.dialects import postgresql
- >>> type = postgresql.ENUM('one', 'two', "three's", name="myenum")
+ >>> type = postgresql.ENUM("one", "two", "three's", name="myenum")
>>> from sqlalchemy.dialects.postgresql import base
>>> print(base.CreateEnumType(type).compile(dialect=postgresql.dialect()))
CREATE TYPE myenum AS ENUM ('one','two','three''s')
"""listen for before_insert"""
# ...
+
event.remove(MyClass, "before_insert", my_before_insert)
In the example above, the ``propagate=True`` flag is set. This
links in the path be spelled out as class bound attributes, since the
:meth:`.PropComparator.of_type` method needs to be called::
- session.query(Company).\
- options(
- subqueryload_all(
- Company.employees.of_type(Engineer),
- Engineer.machines
- )
- )
+ session.query(Company).options(
+ subqueryload_all(Company.employees.of_type(Engineer), Engineer.machines)
+ )
**New Way**
query(User).options(defaultload("orders").defaultload("items").subqueryload("keywords"))
-
The dotted style can still be taken advantage of, particularly in the case
of skipping over several path elements::
# undefer all Address columns
query(User).options(defaultload(User.addresses).undefer("*"))
-
:ticket:`1418`
where it will be used to render an ``INSERT .. SELECT`` construct::
>>> from sqlalchemy.sql import table, column
- >>> t1 = table('t1', column('a'), column('b'))
- >>> t2 = table('t2', column('x'), column('y'))
- >>> print(t1.insert().from_select(['a', 'b'], t2.select().where(t2.c.y == 5)))
+ >>> t1 = table("t1", column("a"), column("b"))
+ >>> t2 = table("t2", column("x"), column("y"))
+ >>> print(t1.insert().from_select(["a", "b"], t2.select().where(t2.c.y == 5)))
INSERT INTO t1 (a, b) SELECT t2.x, t2.y
FROM t2
WHERE t2.y = :y_1
and :class:`_query.Query` objects::
s = Session()
- q = s.query(User.id, User.name).filter_by(name='ed')
+ q = s.query(User.id, User.name).filter_by(name="ed")
ins = insert(Address).from_select((Address.id, Address.email_address), q)
rendering::
from sqlalchemy.dialects.mysql import DOUBLE
import decimal
- data = Table('data', metadata,
- Column('double_value',
- mysql.DOUBLE(decimal_return_scale=12, asdecimal=True))
+ data = Table(
+ "data",
+ metadata,
+ Column("double_value", mysql.DOUBLE(decimal_return_scale=12, asdecimal=True)),
)
conn.execute(
# much precision for DOUBLE
assert result == decimal.Decimal("45.768392065789")
-
:ticket:`2867`
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B", backref="a")
print("A.bs validator")
return item
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(Integer, ForeignKey('a.id'))
+ a_id = Column(Integer, ForeignKey("a.id"))
@validates("a", include_backrefs=False)
def validate_a(self, key, item):
print("B.a validator")
return item
+
a1 = A()
a1.bs.append(B()) # prints only "A.bs validator"
-
:ticket:`1535`
employee_alias = with_polymorphic(Person, [Engineer, Manager], flat=True)
- session.query(Company).join(
- Company.employees.of_type(employee_alias)
- ).filter(
- or_(
- Engineer.primary_language == 'python',
- Manager.manager_name == 'dilbert'
- )
- )
+ session.query(Company).join(Company.employees.of_type(employee_alias)).filter(
+ or_(Engineer.primary_language == "python", Manager.manager_name == "dilbert")
+ )
Generates (everywhere except SQLite)::
Normally, a joined eager load chain like the following::
- query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True))
+ query(User).options(
+ joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)
+ )
Would not produce an inner join; because of the LEFT OUTER JOIN from user->order,
joined eager loading could not use an INNER join from order->items without changing
Since we missed the boat on that, to avoid further regressions we've added the above
functionality by specifying the string ``"nested"`` to :paramref:`_orm.joinedload.innerjoin`::
- query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested"))
+ query(User).options(
+ joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested")
+ )
This feature is new in 0.9.4.
previous collection::
class Parent(Base):
- __tablename__ = 'parent'
+ __tablename__ = "parent"
id = Column(Integer, primary_key=True)
children = relationship("Child", backref="parent")
+
class Child(Base):
- __tablename__ = 'child'
+ __tablename__ = "child"
id = Column(Integer, primary_key=True)
- parent_id = Column(ForeignKey('parent.id'))
+ parent_id = Column(ForeignKey("parent.id"))
+
p1 = Parent()
p2 = Parent()
from sqlalchemy import Table, Boolean, Integer, Column, MetaData
- t1 = Table('t', MetaData(), Column('x', Boolean()), Column('y', Integer))
+ t1 = Table("t", MetaData(), Column("x", Boolean()), Column("y", Integer))
A select construct will now render the boolean column as a binary expression
on backends that don't feature ``true``/``false`` constant behavior::
"short circuit" behavior, that is truncating a rendered expression, when a
:func:`.true` or :func:`.false` constant is present::
- >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile(
- ... dialect=postgresql.dialect()))
+ >>> print(
+ ... select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=postgresql.dialect())
+ ... )
SELECT t.x, t.y FROM t WHERE false
:func:`.true` can be used as the base to build up an expression::
The boolean constants :func:`.true` and :func:`.false` themselves render as
``0 = 1`` and ``1 = 1`` for a backend with no boolean constants::
- >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile(
- ... dialect=mysql.dialect()))
+ >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=mysql.dialect()))
SELECT t.x, t.y FROM t WHERE 0 = 1
Interpretation of ``None``, while not particularly valid SQL, is at least
from sqlalchemy.sql import table, column, select, func
- t = table('t', column('c1'), column('c2'))
+ t = table("t", column("c1"), column("c2"))
expr = (func.foo(t.c.c1) + t.c.c2).label("expr")
stmt = select([expr]).order_by(expr)
an ``__lt__()`` method has been added::
users.insert().execute(
- dict(user_id=1, user_name='foo'),
- dict(user_id=2, user_name='bar'),
- dict(user_id=3, user_name='def'),
- )
+ dict(user_id=1, user_name="foo"),
+ dict(user_id=2, user_name="bar"),
+ dict(user_id=3, user_name="def"),
+ )
rows = users.select().order_by(users.c.user_name).execute().fetchall()
- eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')])
+ eq_(rows, [(2, "bar"), (3, "def"), (1, "foo")])
- eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')])
+ eq_(sorted(rows), [(1, "foo"), (2, "bar"), (3, "def")])
:ticket:`2848`
the statement is executed, which we can see by examining the ``binds`` dictionary::
>>> compiled = stmt.compile()
- >>> compiled.binds['some_col'].type
+ >>> compiled.binds["some_col"].type
String
The feature allows custom types to take their expected effect within INSERT/UPDATE
>>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey
>>> metadata = MetaData()
- >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id')))
+ >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id")))
>>> t2.c.t1id.type
NullType()
- >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True))
+ >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True))
>>> t2.c.t1id.type
Integer()
>>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKeyConstraint
>>> metadata = MetaData()
- >>> t2 = Table('t2', metadata,
- ... Column('t1a'), Column('t1b'),
- ... ForeignKeyConstraint(['t1a', 't1b'], ['t1.a', 't1.b']))
+ >>> t2 = Table(
+ ... "t2",
+ ... metadata,
+ ... Column("t1a"),
+ ... Column("t1b"),
+ ... ForeignKeyConstraint(["t1a", "t1b"], ["t1.a", "t1.b"]),
+ ... )
>>> t2.c.t1a.type
NullType()
>>> t2.c.t1b.type
NullType()
- >>> t1 = Table('t1', metadata,
- ... Column('a', Integer, primary_key=True),
- ... Column('b', Integer, primary_key=True))
+ >>> t1 = Table(
+ ... "t1",
+ ... metadata,
+ ... Column("a", Integer, primary_key=True),
+ ... Column("b", Integer, primary_key=True),
+ ... )
>>> t2.c.t1a.type
Integer()
>>> t2.c.t1b.type
>>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey
>>> metadata = MetaData()
- >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id')))
- >>> t3 = Table('t3', metadata, Column('t2t1id', ForeignKey('t2.t1id')))
+ >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id")))
+ >>> t3 = Table("t3", metadata, Column("t2t1id", ForeignKey("t2.t1id")))
>>> t2.c.t1id.type
NullType()
>>> t3.c.t2t1id.type
NullType()
- >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True))
+ >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True))
>>> t2.c.t1id.type
Integer()
>>> t3.c.t2t1id.type
bakery = baked.bakery()
+
def search_for_user(session, username, email=None):
baked_query = bakery(lambda session: session.query(User))
- baked_query += lambda q: q.filter(User.name == bindparam('username'))
+ baked_query += lambda q: q.filter(User.name == bindparam("username"))
baked_query += lambda q: q.order_by(User.id)
if email:
- baked_query += lambda q: q.filter(User.email == bindparam('email'))
+ baked_query += lambda q: q.filter(User.email == bindparam("email"))
result = baked_query(session).params(username=username, email=email).all()
@declared_attr
def foobar_prop(cls):
- return column_property('foobar: ' + cls.foobar)
+ return column_property("foobar: " + cls.foobar)
+
class SomeClass(HasFooBar, Base):
- __tablename__ = 'some_table'
+ __tablename__ = "some_table"
id = Column(Integer, primary_key=True)
Above, ``SomeClass.foobar_prop`` will be invoked against ``SomeClass``,
@declared_attr
def foobar_prop(cls):
- return column_property('foobar: ' + cls.foobar)
+ return column_property("foobar: " + cls.foobar)
+
class SomeClass(HasFooBar, Base):
- __tablename__ = 'some_table'
+ __tablename__ = "some_table"
id = Column(Integer, primary_key=True)
Previously, ``SomeClass`` would be mapped with one particular copy of
@declared_attr.cascading
def id(cls):
if has_inherited_table(cls):
- return Column(ForeignKey('myclass.id'), primary_key=True)
+ return Column(ForeignKey("myclass.id"), primary_key=True)
else:
return Column(Integer, primary_key=True)
+
class MyClass(HasIdMixin, Base):
- __tablename__ = 'myclass'
+ __tablename__ = "myclass"
# ...
+
class MySubClass(MyClass):
- ""
+ """ """
+
# ...
.. seealso::
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
- from sqlalchemy.ext.declarative import (declarative_base, declared_attr,
- AbstractConcreteBase)
+ from sqlalchemy.ext.declarative import (
+ declarative_base,
+ declared_attr,
+ AbstractConcreteBase,
+ )
Base = declarative_base()
+
class Something(Base):
- __tablename__ = u'something'
+ __tablename__ = "something"
id = Column(Integer, primary_key=True)
class Concrete(Abstract):
- __tablename__ = u'cca'
- __mapper_args__ = {'polymorphic_identity': 'cca', 'concrete': True}
-
+ __tablename__ = "cca"
+ __mapper_args__ = {"polymorphic_identity": "cca", "concrete": True}
The above mapping will set up a table ``cca`` with both an ``id`` and
a ``something_id`` column, and ``Concrete`` will also have a relationship
Base = declarative_base()
+
class Foo(Base):
__table__ = Table(
- 'foo', Base.metadata,
- Column('id', Integer, primary_key=True),
- Column('a', Integer(), nullable=False),
- Column('b', Integer(), nullable=False),
- Column('c', Integer(), nullable=False),
+ "foo",
+ Base.metadata,
+ Column("id", Integer, primary_key=True),
+ Column("a", Integer(), nullable=False),
+ Column("b", Integer(), nullable=False),
+ Column("c", Integer(), nullable=False),
)
- engine = create_engine(
- 'mysql+mysqldb://scott:tiger@localhost/test', echo=True)
+
+ engine = create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo=True)
sess = Session(engine)
return self.value + 5
- inspect(SomeObject).all_orm_descriptors.some_prop.info['foo'] = 'bar'
+ inspect(SomeObject).all_orm_descriptors.some_prop.info["foo"] = "bar"
It is also available as a constructor argument for all :class:`.SchemaItem`
objects (e.g. :class:`_schema.ForeignKey`, :class:`.UniqueConstraint` etc.) as well
Given a mapping like the following::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
- A.b = column_property(
- select([func.max(B.id)]).where(B.a_id == A.id).correlate(A)
- )
+ A.b = column_property(select([func.max(B.id)]).where(B.a_id == A.id).correlate(A))
A simple scenario that included "A.b" twice would fail to render
correctly::
to order by label, for example if the mapping were "polymorphic"::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
type = Column(String)
- __mapper_args__ = {'polymorphic_on': type, 'with_polymorphic': '*'}
+ __mapper_args__ = {"polymorphic_on": type, "with_polymorphic": "*"}
The order_by would fail to use the label, as it would be anonymized due
to the polymorphic loading::
this is used to allow a bound parameter to be passed, which can be substituted
with a value later::
- sel = select([table]).limit(bindparam('mylimit')).offset(bindparam('myoffset'))
+ sel = select([table]).limit(bindparam("mylimit")).offset(bindparam("myoffset"))
Dialects which don't support non-integer LIMIT or OFFSET expressions may continue
to not support this behavior; third party dialects may also need modification
The ``%(column_0_name)s`` will derive from the first column found in the
expression of a :class:`.CheckConstraint`::
- metadata = MetaData(
- naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}
- )
+ metadata = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"})
- foo = Table('foo', metadata,
- Column('value', Integer),
- )
+ foo = Table("foo", metadata, Column("value", Integer))
CheckConstraint(foo.c.value > 5)
m = MetaData()
- t = Table('t', m,
- Column('a', Integer),
- Column('b', Integer)
- )
+ t = Table("t", m, Column("a", Integer), Column("b", Integer))
uq = UniqueConstraint(t.c.a, t.c.b) # will auto-attach to Table
m = MetaData()
- a = Column('a', Integer)
- b = Column('b', Integer)
+ a = Column("a", Integer)
+ b = Column("b", Integer)
uq = UniqueConstraint(a, b)
- t = Table('t', m, a, b)
+ t = Table("t", m, a, b)
assert uq in t.constraints # constraint auto-attached
m = MetaData()
- a = Column('a', Integer)
- b = Column('b', Integer)
+ a = Column("a", Integer)
+ b = Column("b", Integer)
- uq = UniqueConstraint(a, 'b')
+ uq = UniqueConstraint(a, "b")
- t = Table('t', m, a, b)
+ t = Table("t", m, a, b)
# constraint *not* auto-attached, as we do not have tracking
# to locate when a name 'b' becomes available on the table
m = MetaData()
- a = Column('a', Integer)
- b = Column('b', Integer)
+ a = Column("a", Integer)
+ b = Column("b", Integer)
- t = Table('t', m, a, b)
+ t = Table("t", m, a, b)
- uq = UniqueConstraint(a, 'b')
+ uq = UniqueConstraint(a, "b")
# constraint auto-attached normally as in older versions
assert uq in t.constraints
-
:ticket:`3341`
:ticket:`3411`
m = MetaData()
t = Table(
- 't', m,
- Column('x', Integer),
- Column('y', Integer, default=func.somefunction()))
+ "t", m, Column("x", Integer), Column("y", Integer, default=func.somefunction())
+ )
stmt = select([t.c.x])
- print(t.insert().from_select(['x'], stmt))
+ print(t.insert().from_select(["x"], stmt))
Will render::
metadata = MetaData()
- tbl = Table("derp", metadata,
- Column("arr", ARRAY(Text),
- server_default=array(["foo", "bar", "baz"])),
+ tbl = Table(
+ "derp",
+ metadata,
+ Column("arr", ARRAY(Text), server_default=array(["foo", "bar", "baz"])),
)
print(CreateTable(tbl).compile(dialect=postgresql.dialect()))
warnings.filterwarnings("once")
for i in range(1000):
- e.execute(select([cast(
- ('foo_%d' % random.randint(0, 1000000)).encode('ascii'), Unicode)]))
+ e.execute(
+ select([cast(("foo_%d" % random.randint(0, 1000000)).encode("ascii"), Unicode)])
+ )
The format of the warning here is::
The string names are now resolved as attribute names in earnest::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
- name = Column('user_name', String(50))
+ name = Column("user_name", String(50))
Above, the column ``user_name`` is mapped as ``name``. Previously,
a call to :meth:`_query.Query.update` that was passed strings would have to
have been called as follows::
- session.query(User).update({'user_name': 'moonbeam'})
+ session.query(User).update({"user_name": "moonbeam"})
The given string is now resolved against the entity::
- session.query(User).update({'name': 'moonbeam'})
+ session.query(User).update({"name": "moonbeam"})
It is typically preferable to use the attribute directly, to avoid any
ambiguity::
- session.query(User).update({User.name: 'moonbeam'})
+ session.query(User).update({User.name: "moonbeam"})
The change also indicates that synonyms and hybrid attributes can be referred
to by string name as well::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
- name = Column('user_name', String(50))
+ name = Column("user_name", String(50))
@hybrid_property
def fullname(self):
return self.name
- session.query(User).update({'fullname': 'moonbeam'})
+
+ session.query(User).update({"fullname": "moonbeam"})
:ticket:`3228`
Given a mapping::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
a = relationship("A")
Given ``A``, with primary key of 7, but which we changed to be 10
Given a mapping::
class A(Base):
- __tablename__ = 'table_a'
+ __tablename__ = "table_a"
id = Column(Integer, primary_key=True)
+
class B(Base):
- __tablename__ = 'table_b'
+ __tablename__ = "table_b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('table_a.id'))
+ a_id = Column(ForeignKey("table_a.id"))
a = relationship(A)
In 1.0, the relationship-bound attribute takes precedence over the FK-bound
session.flush()
b1 = B()
- b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0
+ b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0
b2 = B()
b2.a = None # we expect a_id to be None; takes precedence only in 1.0
When this error is raised, the :func:`.lazyload` option can be sent with
an asterisk::
- q = sess.query(Object).options(lazyload('*')).yield_per(100)
+ q = sess.query(Object).options(lazyload("*")).yield_per(100)
or use :meth:`_query.Query.enable_eagerloads`::
The :func:`.lazyload` option has the advantage that additional many-to-one
joined loader options can still be used::
- q = sess.query(Object).options(
- lazyload('*'), joinedload("some_manytoone")).yield_per(100)
+ q = (
+ sess.query(Object)
+ .options(lazyload("*"), joinedload("some_manytoone"))
+ .yield_per(100)
+ )
.. _bug_3233:
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B")
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
A query that joins to ``A.bs`` twice::
The query deduplicates the redundant ``A.bs`` because it is attempting
to support a case like the following::
- s.query(A).join(A.bs).\
- filter(B.foo == 'bar').\
- reset_joinpoint().join(A.bs, B.cs).filter(C.bar == 'bat')
+ s.query(A).join(A.bs).filter(B.foo == "bar").reset_joinpoint().join(A.bs, B.cs).filter(
+ C.bar == "bat"
+ )
That is, the ``A.bs`` is part of a "path". As part of :ticket:`3367`,
arriving at the same endpoint twice without it being part of a
Base = declarative_base()
+
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
type = Column(String)
- __mapper_args__ = {'polymorphic_on': type, 'polymorphic_identity': 'a'}
+ __mapper_args__ = {"polymorphic_on": type, "polymorphic_identity": "a"}
class ASub1(A):
- __mapper_args__ = {'polymorphic_identity': 'asub1'}
+ __mapper_args__ = {"polymorphic_identity": "asub1"}
class ASub2(A):
- __mapper_args__ = {'polymorphic_identity': 'asub2'}
+ __mapper_args__ = {"polymorphic_identity": "asub2"}
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(Integer, ForeignKey("a.id"))
- a = relationship("A", primaryjoin="B.a_id == A.id", backref='b')
+ a = relationship("A", primaryjoin="B.a_id == A.id", backref="b")
+
s = Session()
from sqlalchemy.orm import Bundle
+
class DictBundle(Bundle):
def create_row_processor(self, query, procs, labels):
"""Override create_row_processor to return values as dictionaries"""
+
def proc(row, result):
- return dict(
- zip(labels, (proc(row, result) for proc in procs))
- )
+ return dict(zip(labels, (proc(row, result) for proc in procs)))
+
return proc
The unused ``result`` member is now removed::
from sqlalchemy.orm import Bundle
+
class DictBundle(Bundle):
def create_row_processor(self, query, procs, labels):
"""Override create_row_processor to return values as dictionaries"""
+
def proc(row):
- return dict(
- zip(labels, (proc(row) for proc in procs))
- )
+ return dict(zip(labels, (proc(row) for proc in procs)))
+
return proc
.. seealso::
when using ``innerjoin=True``::
query(User).options(
- joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True))
+ joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)
+ )
With the new default, this will render the FROM clause in the form::
To get the older behavior, use ``innerjoin="unnested"``::
query(User).options(
- joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested"))
+ joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested")
+ )
This will avoid right-nested joins and chain the joins together using all
OUTER joins despite the innerjoin directive::
Given a joined eager load like the following::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
b = relationship("B", uselist=False)
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
+
s = Session()
print(s.query(A).options(joinedload(A.b)).limit(5))
Given a single-table inheritance mapping, such as::
class Widget(Base):
- __table__ = 'widget_table'
+ __table__ = "widget_table"
+
class FooWidget(Widget):
pass
mapping as::
class Widget(Base):
- __tablename__ = 'widget'
+ __tablename__ = "widget"
id = Column(Integer, primary_key=True)
type = Column(String)
- related_id = Column(ForeignKey('related.id'))
+ related_id = Column(ForeignKey("related.id"))
related = relationship("Related", backref="widget")
- __mapper_args__ = {'polymorphic_on': type}
+ __mapper_args__ = {"polymorphic_on": type}
class FooWidget(Widget):
- __mapper_args__ = {'polymorphic_identity': 'foo'}
+ __mapper_args__ = {"polymorphic_identity": "foo"}
class Related(Base):
- __tablename__ = 'related'
+ __tablename__ = "related"
id = Column(Integer, primary_key=True)
It's been the behavior for quite some time that a JOIN on the relationship
# This is a normal Core expression with a string argument -
# we aren't talking about this!!
- stmt = select([sometable]).where(sometable.c.somecolumn == 'value')
+ stmt = select([sometable]).where(sometable.c.somecolumn == "value")
The Core tutorial has long featured an example of the use of this technique,
using a :func:`_expression.select` construct where virtually all components of it
should be used::
import warnings
- warnings.simplefilter("error") # all warnings raise an exception
+
+ warnings.simplefilter("error") # all warnings raise an exception
Given the above warnings, our statement works just fine, but
to get rid of the warnings we would rewrite our statement as follows::
from sqlalchemy import select, text
- stmt = select([
- text("a"),
- text("b")
- ]).where(text("a = b")).select_from(text("sometable"))
+
+ stmt = (
+ select([text("a"), text("b")]).where(text("a = b")).select_from(text("sometable"))
+ )
and as the warnings suggest, we can give our statement more specificity
about the text if we use :func:`_expression.column` and :func:`.table`::
from sqlalchemy import select, text, column, table
- stmt = select([column("a"), column("b")]).\
- where(text("a = b")).select_from(table("sometable"))
+ stmt = (
+ select([column("a"), column("b")])
+ .where(text("a = b"))
+ .select_from(table("sometable"))
+ )
Where note also that :func:`.table` and :func:`_expression.column` can now
be imported from "sqlalchemy" without the "sql" part.
:func:`_expression.select` or :class:`_query.Query` that refers to some column name or named
label, we might want to GROUP BY and/or ORDER BY known columns or labels::
- stmt = select([
- user.c.name,
- func.count(user.c.id).label("id_count")
- ]).group_by("name").order_by("id_count")
+ stmt = (
+ select([user.c.name, func.count(user.c.id).label("id_count")])
+ .group_by("name")
+ .order_by("id_count")
+ )
In the above statement we expect to see "ORDER BY id_count", as opposed to a
re-statement of the function. The string argument given is actively
However, if we refer to a name that cannot be located, then we get
the warning again, as below::
- stmt = select([
- user.c.name,
- func.count(user.c.id).label("id_count")
- ]).order_by("some_label")
+ stmt = select([user.c.name, func.count(user.c.id).label("id_count")]).order_by(
+ "some_label"
+ )
The output does what we say, but again it warns us::
counter = itertools.count(1)
t = Table(
- 'my_table', metadata,
- Column('id', Integer, default=lambda: next(counter)),
- Column('data', String)
+ "my_table",
+ metadata,
+ Column("id", Integer, default=lambda: next(counter)),
+ Column("data", String),
)
- conn.execute(t.insert().values([
- {"data": "d1"},
- {"data": "d2"},
- {"data": "d3"},
- ]))
+ conn.execute(
+ t.insert().values(
+ [
+ {"data": "d1"},
+ {"data": "d2"},
+ {"data": "d3"},
+ ]
+ )
+ )
The above example will invoke ``next(counter)`` for each row individually
as would be expected::
an exception is raised::
t = Table(
- 'my_table', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', String, server_default='some default')
+ "my_table",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("data", String, server_default="some default"),
)
- conn.execute(t.insert().values([
- {"data": "d1"},
- {"data": "d2"},
- {},
- ]))
+ conn.execute(
+ t.insert().values(
+ [
+ {"data": "d1"},
+ {"data": "d2"},
+ {},
+ ]
+ )
+ )
will raise::
A :class:`_schema.Table` can be set up for reflection by passing
:paramref:`_schema.Table.autoload_with` alone::
- my_table = Table('my_table', metadata, autoload_with=some_engine)
+ my_table = Table("my_table", metadata, autoload_with=some_engine)
:ticket:`3027`
associated with a :class:`_schema.MetaData` object will be created *and* dropped
corresponding to :meth:`_schema.Table.create` and :meth:`_schema.Table.drop`::
- table = Table('sometable', metadata,
- Column('some_enum', ENUM('a', 'b', 'c', name='myenum'))
+ table = Table(
+ "sometable", metadata, Column("some_enum", ENUM("a", "b", "c", name="myenum"))
)
table.create(engine) # will emit CREATE TYPE and CREATE TABLE
the exception of :meth:`_schema.Table.create` called with the ``checkfirst=True``
flag::
- my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata)
+ my_enum = ENUM("a", "b", "c", name="myenum", metadata=metadata)
- table = Table('sometable', metadata,
- Column('some_enum', my_enum)
- )
+ table = Table("sometable", metadata, Column("some_enum", my_enum))
# will fail: ENUM 'my_enum' does not exist
table.create(engine)
table.drop(engine) # will emit DROP TABLE, *not* DROP TYPE
- metadata.drop_all(engine) # will emit DROP TYPE
-
- metadata.create_all(engine) # will emit CREATE TYPE
+ metadata.drop_all(engine) # will emit DROP TYPE
+ metadata.create_all(engine) # will emit CREATE TYPE
:ticket:`3319`
metadata = MetaData()
user_tmp = Table(
- "user_tmp", metadata,
+ "user_tmp",
+ metadata,
Column("id", INT, primary_key=True),
- Column('name', VARCHAR(50)),
- prefixes=['TEMPORARY']
+ Column("name", VARCHAR(50)),
+ prefixes=["TEMPORARY"],
)
- e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug')
+ e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug")
with e.begin() as conn:
user_tmp.create(conn, checkfirst=True)
metadata = MetaData()
user_tmp = Table(
- "user_tmp", metadata,
+ "user_tmp",
+ metadata,
Column("id", INT, primary_key=True),
- Column('name', VARCHAR(50)),
- prefixes=['TEMPORARY']
+ Column("name", VARCHAR(50)),
+ prefixes=["TEMPORARY"],
)
- e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug')
+ e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug")
with e.begin() as conn:
user_tmp.create(conn, checkfirst=True)
m2 = MetaData()
user = Table(
- "user_tmp", m2,
+ "user_tmp",
+ m2,
Column("id", INT, primary_key=True),
- Column('name', VARCHAR(50)),
+ Column("name", VARCHAR(50)),
)
# in 0.9, *will create* the new table, overwriting the old one.
on MySQL::
>>> connection.execute(
- ... select([
- ... matchtable.c.title.match('Agile Ruby Programming').label('ruby'),
- ... matchtable.c.title.match('Dive Python').label('python'),
- ... matchtable.c.title
- ... ]).order_by(matchtable.c.id)
+ ... select(
+ ... [
+ ... matchtable.c.title.match("Agile Ruby Programming").label("ruby"),
+ ... matchtable.c.title.match("Dive Python").label("python"),
+ ... matchtable.c.title,
+ ... ]
+ ... ).order_by(matchtable.c.id)
... )
[
(2.0, 0.0, 'Agile Web Development with Ruby On Rails'),
with an explicit hostname, now requires a driver name - SQLAlchemy will no
longer attempt to guess a default::
- engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0")
+ engine = create_engine(
+ "mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0"
+ )
SQLAlchemy's previously hardcoded default of "SQL Server" is obsolete on
Windows, and SQLAlchemy cannot be tasked with guessing the best driver
CTE support has been fixed up for Oracle, and there is also a new feature
:meth:`_expression.CTE.with_suffixes` that can assist with Oracle's special directives::
- included_parts = select([
- part.c.sub_part, part.c.part, part.c.quantity
- ]).where(part.c.part == "p1").\
- cte(name="included_parts", recursive=True).\
- suffix_with(
+ included_parts = (
+ select([part.c.sub_part, part.c.part, part.c.quantity])
+ .where(part.c.part == "p1")
+ .cte(name="included_parts", recursive=True)
+ .suffix_with(
"search depth first by part set ord1",
- "cycle part set y_cycle to 1 default 0", dialect='oracle')
+ "cycle part set y_cycle to 1 default 0",
+ dialect="oracle",
+ )
+ )
:ticket:`3220`
examples will return duplicate rows due to the joined eager load unless
explicit typing is applied::
- result = session.query(
- func.substr(A.some_thing, 0, 4), A
- ).options(joinedload(A.bs)).all()
+ result = (
+ session.query(func.substr(A.some_thing, 0, 4), A).options(joinedload(A.bs)).all()
+ )
- users = session.query(
- func.date(
- User.date_created, 'start of month'
- ).label('month'),
- User,
- ).options(joinedload(User.orders)).all()
+ users = (
+ session.query(
+ func.date(User.date_created, "start of month").label("month"),
+ User,
+ )
+ .options(joinedload(User.orders))
+ .all()
+ )
The above examples, in order to retain deduping, should be specified as::
- result = session.query(
- func.substr(A.some_thing, 0, 4, type_=String), A
- ).options(joinedload(A.bs)).all()
+ result = (
+ session.query(func.substr(A.some_thing, 0, 4, type_=String), A)
+ .options(joinedload(A.bs))
+ .all()
+ )
- users = session.query(
- func.date(
- User.date_created, 'start of month', type_=DateTime
- ).label('month'),
- User,
- ).options(joinedload(User.orders)).all()
+ users = (
+ session.query(
+ func.date(User.date_created, "start of month", type_=DateTime).label("month"),
+ User,
+ )
+ .options(joinedload(User.orders))
+ .all()
+ )
Additionally, the treatment of a so-called "unhashable" type is slightly
different than its been in previous releases; internally we are using
>>> some_user = User()
>>> q = s.query(User).filter(User.name == some_user)
- ...
sqlalchemy.exc.ArgumentError: Object <__main__.User object at 0x103167e90> is not legal as a SQL literal value
The exception is now immediate when the comparison is made between
or JSON field::
class Person(Base):
- __tablename__ = 'person'
+ __tablename__ = "person"
id = Column(Integer, primary_key=True)
data = Column(JSON)
- name = index_property('data', 'name')
+ name = index_property("data", "name")
Above, the ``name`` attribute will read/write the field ``"name"``
from the JSON column ``data``, after initializing it to an
empty dictionary::
- >>> person = Person(name='foobar')
+ >>> person = Person(name="foobar")
>>> person.name
foobar
query is against a subquery expression such as an exists::
class Widget(Base):
- __tablename__ = 'widget'
+ __tablename__ = "widget"
id = Column(Integer, primary_key=True)
type = Column(String)
data = Column(String)
- __mapper_args__ = {'polymorphic_on': type}
+ __mapper_args__ = {"polymorphic_on": type}
class FooWidget(Widget):
- __mapper_args__ = {'polymorphic_identity': 'foo'}
+ __mapper_args__ = {"polymorphic_identity": "foo"}
- q = session.query(FooWidget).filter(FooWidget.data == 'bar').exists()
+
+ q = session.query(FooWidget).filter(FooWidget.data == "bar").exists()
session.query(q).all()
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
e = create_engine("sqlite://", echo=True)
Base.metadata.create_all(e)
class A(Base):
__tablename__ = "a"
- id = Column('id', Integer, primary_key=True)
+ id = Column("id", Integer, primary_key=True)
type = Column(String)
__mapper_args__ = {
- 'polymorphic_on': type,
- 'polymorphic_identity': 'a',
- 'passive_deletes': True
+ "polymorphic_on": type,
+ "polymorphic_identity": "a",
+ "passive_deletes": True,
}
class B(A):
- __tablename__ = 'b'
- b_table_id = Column('b_table_id', Integer, primary_key=True)
- bid = Column('bid', Integer, ForeignKey('a.id', ondelete="CASCADE"))
- data = Column('data', String)
+ __tablename__ = "b"
+ b_table_id = Column("b_table_id", Integer, primary_key=True)
+ bid = Column("bid", Integer, ForeignKey("a.id", ondelete="CASCADE"))
+ data = Column("data", String)
- __mapper_args__ = {
- 'polymorphic_identity': 'b'
- }
+ __mapper_args__ = {"polymorphic_identity": "b"}
With the above mapping, the :paramref:`.orm.mapper.passive_deletes` option
is configured on the base mapper; it takes effect for all non-base mappers
The following mapping has always been possible without issue::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
b = relationship("B", foreign_keys="B.a_id", backref="a")
+
class A1(A):
- __tablename__ = 'a1'
+ __tablename__ = "a1"
id = Column(Integer, primary_key=True)
b = relationship("B", foreign_keys="B.a1_id", backref="a1")
- __mapper_args__ = {'concrete': True}
+ __mapper_args__ = {"concrete": True}
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
- a1_id = Column(ForeignKey('a1.id'))
+ a_id = Column(ForeignKey("a.id"))
+ a1_id = Column(ForeignKey("a1.id"))
Above, even though class ``A`` and class ``A1`` have a relationship
named ``b``, no conflict warning or error occurs because class ``A1`` is
would occur::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
class A1(A):
- __tablename__ = 'a1'
+ __tablename__ = "a1"
id = Column(Integer, primary_key=True)
- __mapper_args__ = {'concrete': True}
+ __mapper_args__ = {"concrete": True}
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
- a1_id = Column(ForeignKey('a1.id'))
+ a_id = Column(ForeignKey("a.id"))
+ a1_id = Column(ForeignKey("a1.id"))
a = relationship("A", backref="b")
a1 = relationship("A1", backref="b")
An example is as follows::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B")
class ASub(A):
- __tablename__ = 'a_sub'
- id = Column(Integer, ForeignKey('a.id'), primary_key=True)
+ __tablename__ = "a_sub"
+ id = Column(Integer, ForeignKey("a.id"), primary_key=True)
bs = relationship("B")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
-
+ a_id = Column(ForeignKey("a.id"))
This warning dates back to the 0.4 series in 2007 and is based on a version of
the unit of work code that has since been entirely rewritten. Currently, there
present in the original docstring::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
name = Column(String)
expression. That is, accessing ``A.some_name.info`` now returns the same
dictionary that you'd get from ``inspect(A).all_orm_descriptors['some_name'].info``::
- >>> A.some_name.info['foo'] = 'bar'
+ >>> A.some_name.info["foo"] = "bar"
>>> from sqlalchemy import inspect
- >>> inspect(A).all_orm_descriptors['some_name'].info
+ >>> inspect(A).all_orm_descriptors["some_name"].info
{'foo': 'bar'}
Note that this ``.info`` dictionary is **separate** from that of a mapped attribute
Given::
- u1 = User(id=7, name='x')
+ u1 = User(id=7, name="x")
u1.orders = [
- Order(description='o1', address=Address(id=1, email_address='a')),
- Order(description='o2', address=Address(id=1, email_address='b')),
- Order(description='o3', address=Address(id=1, email_address='c'))
+ Order(description="o1", address=Address(id=1, email_address="a")),
+ Order(description="o2", address=Address(id=1, email_address="b")),
+ Order(description="o3", address=Address(id=1, email_address="c")),
]
sess = Session()
deep use case that's hard to reproduce, but the general idea is as follows::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
- b_id = Column(ForeignKey('b.id'))
- c_id = Column(ForeignKey('c.id'))
+ b_id = Column(ForeignKey("b.id"))
+ c_id = Column(ForeignKey("c.id"))
b = relationship("B")
c = relationship("C")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- c_id = Column(ForeignKey('c.id'))
+ c_id = Column(ForeignKey("c.id"))
c = relationship("C")
class C(Base):
- __tablename__ = 'c'
+ __tablename__ = "c"
id = Column(Integer, primary_key=True)
- d_id = Column(ForeignKey('d.id'))
+ d_id = Column(ForeignKey("d.id"))
d = relationship("D")
class D(Base):
- __tablename__ = 'd'
+ __tablename__ = "d"
id = Column(Integer, primary_key=True)
q = s.query(A)
q = q.join(A.b).join(c_alias_1, B.c).join(c_alias_1.d)
- q = q.options(contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d))
+ q = q.options(
+ contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d)
+ )
q = q.join(c_alias_2, A.c)
q = q.options(contains_eager(A.c, alias=c_alias_2))
>>> from sqlalchemy import table, column, select, literal, exists
>>> orders = table(
- ... 'orders',
- ... column('region'),
- ... column('amount'),
- ... column('product'),
- ... column('quantity')
+ ... "orders",
+ ... column("region"),
+ ... column("amount"),
+ ... column("product"),
+ ... column("quantity"),
... )
>>>
>>> upsert = (
... orders.update()
- ... .where(orders.c.region == 'Region1')
- ... .values(amount=1.0, product='Product1', quantity=1)
- ... .returning(*(orders.c._all_columns)).cte('upsert'))
+ ... .where(orders.c.region == "Region1")
+ ... .values(amount=1.0, product="Product1", quantity=1)
+ ... .returning(*(orders.c._all_columns))
+ ... .cte("upsert")
+ ... )
>>>
>>> insert = orders.insert().from_select(
... orders.c.keys(),
- ... select([
- ... literal('Region1'), literal(1.0),
- ... literal('Product1'), literal(1)
- ... ]).where(~exists(upsert.select()))
+ ... select([literal("Region1"), literal(1.0), literal("Product1"), literal(1)]).where(
+ ... ~exists(upsert.select())
+ ... ),
... )
>>>
>>> print(insert) # note formatting added for clarity
>>> from sqlalchemy import func
- >>> print(func.row_number().over(order_by='x', range_=(-5, 10)))
+ >>> print(func.row_number().over(order_by="x", range_=(-5, 10)))
row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND :param_2 FOLLOWING)
- >>> print(func.row_number().over(order_by='x', rows=(None, 0)))
+ >>> print(func.row_number().over(order_by="x", rows=(None, 0)))
row_number() OVER (ORDER BY x ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
- >>> print(func.row_number().over(order_by='x', range_=(-2, None)))
+ >>> print(func.row_number().over(order_by="x", range_=(-2, None)))
row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND UNBOUNDED FOLLOWING)
:paramref:`.expression.over.range_` and :paramref:`.expression.over.rows` are specified as
selectable, e.g. lateral correlation::
>>> from sqlalchemy import table, column, select, true
- >>> people = table('people', column('people_id'), column('age'), column('name'))
- >>> books = table('books', column('book_id'), column('owner_id'))
- >>> subq = select([books.c.book_id]).\
- ... where(books.c.owner_id == people.c.people_id).lateral("book_subq")
+ >>> people = table("people", column("people_id"), column("age"), column("name"))
+ >>> books = table("books", column("book_id"), column("owner_id"))
+ >>> subq = (
+ ... select([books.c.book_id])
+ ... .where(books.c.owner_id == people.c.people_id)
+ ... .lateral("book_subq")
+ ... )
>>> print(select([people]).select_from(people.join(subq, true())))
SELECT people.people_id, people.age, people.name
FROM people JOIN LATERAL (SELECT books.book_id AS book_id
from sqlalchemy import func
- selectable = people.tablesample(
- func.bernoulli(1),
- name='alias',
- seed=func.random())
+ selectable = people.tablesample(func.bernoulli(1), name="alias", seed=func.random())
stmt = select([selectable.c.people_id])
Assuming ``people`` with a column ``people_id``, the above
*composite* primary key; previously, a table definition such as::
Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True)
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True),
)
Would have "autoincrement" semantics applied to the ``'x'`` column, only
# old way
Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True, autoincrement=False),
- Column('y', Integer, primary_key=True, autoincrement=False)
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True, autoincrement=False),
+ Column("y", Integer, primary_key=True, autoincrement=False),
)
With the new behavior, the composite primary key will not have autoincrement
# column 'y' will be SERIAL/AUTO_INCREMENT/ auto-generating
Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True, autoincrement=True)
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True, autoincrement=True),
)
In order to anticipate some potential backwards-incompatible scenarios,
have autoincrement set up; given a table such as::
Table(
- 'b', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True)
+ "b",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True),
)
An INSERT emitted with no values for this table will produce this warning::
value generator can be indicated using :class:`.FetchedValue`::
Table(
- 'b', metadata,
- Column('x', Integer, primary_key=True, server_default=FetchedValue()),
- Column('y', Integer, primary_key=True, server_default=FetchedValue())
+ "b",
+ metadata,
+ Column("x", Integer, primary_key=True, server_default=FetchedValue()),
+ Column("y", Integer, primary_key=True, server_default=FetchedValue()),
)
For the very unlikely case where a composite primary key is actually intended
specify the column with ``nullable=True``::
Table(
- 'b', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True, nullable=True)
+ "b",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True, nullable=True),
)
In a related change, the ``autoincrement`` flag may be set to True
:meth:`.ColumnOperators.isnot_distinct_from` allow the IS DISTINCT
FROM and IS NOT DISTINCT FROM sql operation::
- >>> print(column('x').is_distinct_from(None))
+ >>> print(column("x").is_distinct_from(None))
x IS DISTINCT FROM NULL
Handling is provided for NULL, True and False::
- >>> print(column('x').isnot_distinct_from(False))
+ >>> print(column("x").isnot_distinct_from(False))
x IS NOT DISTINCT FROM false
For SQLite, which doesn't have this operator, "IS" / "IS NOT" is rendered,
which on SQLite works for NULL unlike other backends::
>>> from sqlalchemy.dialects import sqlite
- >>> print(column('x').is_distinct_from(None).compile(dialect=sqlite.dialect()))
+ >>> print(column("x").is_distinct_from(None).compile(dialect=sqlite.dialect()))
x IS NOT NULL
.. _change_1957:
from sqlalchemy import text
- stmt = text("SELECT users.id, addresses.id, users.id, "
- "users.name, addresses.email_address AS email "
- "FROM users JOIN addresses ON users.id=addresses.user_id "
- "WHERE users.id = 1").columns(
- User.id,
- Address.id,
- Address.user_id,
- User.name,
- Address.email_address
- )
-
- query = session.query(User).from_statement(stmt).\
- options(contains_eager(User.addresses))
+
+ stmt = text(
+ "SELECT users.id, addresses.id, users.id, "
+ "users.name, addresses.email_address AS email "
+ "FROM users JOIN addresses ON users.id=addresses.user_id "
+ "WHERE users.id = 1"
+ ).columns(User.id, Address.id, Address.user_id, User.name, Address.email_address)
+
+ query = session.query(User).from_statement(stmt).options(contains_eager(User.addresses))
result = query.all()
Above, the textual SQL contains the column "id" three times, which would
to rely upon "positional" matching more fully for compiled SQL constructs
as well. Given a statement like the following::
- ua = users.alias('ua')
+ ua = users.alias("ua")
stmt = select([users.c.user_id, ua.c.user_id])
The above statement will compile to::
ua_id = row[ua.c.user_id]
# this still raises, however
- user_id = row['user_id']
+ user_id = row["user_id"]
Much less likely to get an "ambiguous column" error message
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
three = 3
- t = Table(
- 'data', MetaData(),
- Column('value', Enum(MyEnum))
- )
+ t = Table("data", MetaData(), Column("value", Enum(MyEnum)))
e = create_engine("sqlite://")
t.create(e)
>>> from sqlalchemy import Table, MetaData, Column, Enum, create_engine
>>> t = Table(
- ... 'data', MetaData(),
- ... Column('value', Enum("one", "two", "three", validate_strings=True))
+ ... "data",
+ ... MetaData(),
+ ... Column("value", Enum("one", "two", "three", validate_strings=True)),
... )
>>> e = create_engine("sqlite://")
>>> t.create(e)
>>> from sqlalchemy import create_engine
>>> import random
- >>> e = create_engine("sqlite://", echo='debug')
- >>> some_value = ''.join(chr(random.randint(52, 85)) for i in range(5000))
+ >>> e = create_engine("sqlite://", echo="debug")
+ >>> some_value = "".join(chr(random.randint(52, 85)) for i in range(5000))
>>> row = e.execute("select ?", [some_value]).first()
- ... (lines are wrapped for clarity) ...
+ ... # (lines are wrapped for clarity) ...
2016-02-17 13:23:03,027 INFO sqlalchemy.engine.base.Engine select ?
2016-02-17 13:23:03,027 INFO sqlalchemy.engine.base.Engine
('E6@?>9HPOJB<<BHR:@=TS:5ILU=;JLM<4?B9<S48PTNG9>:=TSTLA;9K;9FPM4M8M@;NM6GU
json_value = Column(JSON(none_as_null=False), default="some default")
+
# would insert "some default" instead of "'null'",
# now will insert "'null'"
obj = MyObject(json_value=None)
some_other_value = Column(String(50))
json_value = Column(JSON(none_as_null=False))
+
# would result in NULL for some_other_value,
# but json "'null'" for json_value. Now results in NULL for both
# (the json_value is omitted from the INSERT)
# would insert SQL NULL and/or trigger defaults,
# now inserts "'null'"
- session.bulk_insert_mappings(
- MyObject,
- [{"json_value": None}])
+ session.bulk_insert_mappings(MyObject, [{"json_value": None}])
The :class:`_types.JSON` type now implements the
:attr:`.TypeEngine.should_evaluate_none` flag,
PostgreSQL**, however it can be used directly, supporting special array
use cases such as indexed access, as well as support for the ANY and ALL::
- mytable = Table("mytable", metadata,
- Column("data", ARRAY(Integer, dimensions=2))
- )
+ mytable = Table("mytable", metadata, Column("data", ARRAY(Integer, dimensions=2)))
expr = mytable.c.data[5][6]
subq = select([mytable.c.value])
select([mytable]).where(12 > any_(subq))
-
:ticket:`3516`
.. _change_3132:
which is now available using :class:`_functions.array_agg`::
from sqlalchemy import func
+
stmt = select([func.array_agg(table.c.value)])
A PostgreSQL element for an aggregate ORDER BY is also added via
:class:`_postgresql.aggregate_order_by`::
from sqlalchemy.dialects.postgresql import aggregate_order_by
+
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select([expr])
ensure the :class:`_postgresql.ARRAY` type::
from sqlalchemy.dialects.postgresql import array_agg
- stmt = select([array_agg(table.c.value).contains('foo')])
+ stmt = select([array_agg(table.c.value).contains("foo")])
Additionally, functions like ``percentile_cont()``, ``percentile_disc()``,
``rank()``, ``dense_rank()`` and others that require an ordering via
:meth:`.FunctionElement.within_group` modifier::
from sqlalchemy import func
- stmt = select([
- department.c.id,
- func.percentile_cont(0.5).within_group(
- department.c.salary.desc()
- )
- ])
+
+ stmt = select(
+ [
+ department.c.id,
+ func.percentile_cont(0.5).within_group(department.c.salary.desc()),
+ ]
+ )
The above statement would produce SQL similar to::
# old way
class MyEnum(TypeDecorator, SchemaType):
- impl = postgresql.ENUM('one', 'two', 'three', name='myenum')
+ impl = postgresql.ENUM("one", "two", "three", name="myenum")
def _set_table(self, table):
self.impl._set_table(table)
# new way
class MyEnum(TypeDecorator):
- impl = postgresql.ENUM('one', 'two', 'three', name='myenum')
-
+ impl = postgresql.ENUM("one", "two", "three", name="myenum")
:ticket:`2919`
For example, if the ``User`` class were assigned the schema "per_user"::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
- __table_args__ = {'schema': 'per_user'}
+ __table_args__ = {"schema": "per_user"}
On each request, the :class:`.Session` can be set up to refer to a
different schema each time::
session = Session()
- session.connection(execution_options={
- "schema_translate_map": {"per_user": "account_one"}})
+ session.connection(
+ execution_options={"schema_translate_map": {"per_user": "account_one"}}
+ )
# will query from the ``account_one.user`` table
session.query(User).get(5)
table to an integer "id" column on the other::
class Person(Base):
- __tablename__ = 'person'
+ __tablename__ = "person"
id = Column(StringAsInt, primary_key=True)
pets = relationship(
- 'Pets',
+ "Pets",
primaryjoin=(
- 'foreign(Pets.person_id)'
- '==cast(type_coerce(Person.id, Integer), Integer)'
- )
+ "foreign(Pets.person_id)" "==cast(type_coerce(Person.id, Integer), Integer)"
+ ),
)
+
class Pets(Base):
- __tablename__ = 'pets'
- id = Column('id', Integer, primary_key=True)
- person_id = Column('person_id', Integer)
+ __tablename__ = "pets"
+ id = Column("id", Integer, primary_key=True)
+ person_id = Column("person_id", Integer)
Above, in the :paramref:`_orm.relationship.primaryjoin` expression, we are
using :func:`.type_coerce` to handle bound parameters passed via
class MyObject(Base):
# ...
- json_value = Column(
- JSON(none_as_null=False), nullable=False, default=JSON.NULL)
+ json_value = Column(JSON(none_as_null=False), nullable=False, default=JSON.NULL)
Or, ensure the value is present on the object::
# default=None is the same as omitting it entirely, does not apply JSON NULL
json_value = Column(JSON(none_as_null=False), nullable=False, default=None)
-
.. seealso::
:ref:`change_3514`
A query such as the following will now augment only those columns
that are missing from the SELECT list, without duplicates::
- q = session.query(User.id, User.name.label('name')).\
- distinct().\
- order_by(User.id, User.name, User.fullname)
+ q = (
+ session.query(User.id, User.name.label("name"))
+ .distinct()
+ .order_by(User.id, User.name, User.fullname)
+ )
Produces::
last defined validator::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String)
def _validate_data_two(self):
assert "y" in data
+
configure_mappers()
Will raise::
>>> from sqlalchemy.schema import MetaData, Table, Column, CreateTable
>>> from sqlalchemy.types import String
- >>> t = Table('t', MetaData(), Column('x', String(), server_default="hi ' there"))
+ >>> t = Table("t", MetaData(), Column("x", String(), server_default="hi ' there"))
>>> print(CreateTable(t))
CREATE TABLE t (
one less dimension. Given a column with type ``ARRAY(Integer, dimensions=3)``,
we can now perform this expression::
- int_expr = col[5][6][7] # returns an Integer expression object
+ int_expr = col[5][6][7] # returns an Integer expression object
Previously, the indexed access to ``col[5]`` would return an expression of
type :class:`.Integer` where we could no longer perform indexed access
the :class:`_postgresql.ARRAY` type, this means that it is now straightforward
to produce JSON expressions with multiple levels of indexed access::
- json_expr = json_col['key1']['attr1'][5]
+ json_expr = json_col["key1"]["attr1"][5]
* The "textual" type that is returned by indexed access of :class:`.HSTORE`
as well as the "textual" type that is returned by indexed access of
This means that in most cases, an application that was doing this::
- expr = json_col['somekey'].cast(Integer)
+ expr = json_col["somekey"].cast(Integer)
Will now need to change to this::
- expr = json_col['somekey'].astext.cast(Integer)
-
+ expr = json_col["somekey"].astext.cast(Integer)
.. _change_2729:
as expected::
enum = Enum(
- 'manager', 'place_admin', 'carwash_admin',
- 'parking_admin', 'service_admin', 'tire_admin',
- 'mechanic', 'carwasher', 'tire_mechanic', name="work_place_roles")
+ "manager",
+ "place_admin",
+ "carwash_admin",
+ "parking_admin",
+ "service_admin",
+ "tire_admin",
+ "mechanic",
+ "carwasher",
+ "tire_mechanic",
+ name="work_place_roles",
+ )
+
class WorkPlacement(Base):
- __tablename__ = 'work_placement'
+ __tablename__ = "work_placement"
id = Column(Integer, primary_key=True)
roles = Column(ARRAY(enum))
allows specification of which sub-types of views should be returned::
from sqlalchemy import inspect
+
insp = inspect(engine)
- plain_views = insp.get_view_names(include='plain')
- all_views = insp.get_view_names(include=('plain', 'materialized'))
+ plain_views = insp.get_view_names(include="plain")
+ all_views = insp.get_view_names(include=("plain", "materialized"))
:ticket:`3588`
parameters::
connection = engine.connect()
- connection = connection.execution_options(
- isolation_level="AUTOCOMMIT"
- )
+ connection = connection.execution_options(isolation_level="AUTOCOMMIT")
The isolation level makes use of the various "autocommit" attributes
provided by most MySQL DBAPIs.
not the first column, e.g.::
t = Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True, autoincrement=False),
- Column('y', Integer, primary_key=True, autoincrement=True),
- mysql_engine='InnoDB'
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True, autoincrement=False),
+ Column("y", Integer, primary_key=True, autoincrement=True),
+ mysql_engine="InnoDB",
)
DDL such as the following would be generated::
(along with a KEY for the autoincrement column as required by MySQL), e.g.::
t = Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True, autoincrement=True),
- PrimaryKeyConstraint('x', 'y'),
- UniqueConstraint('y'),
- mysql_engine='InnoDB'
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True, autoincrement=True),
+ PrimaryKeyConstraint("x", "y"),
+ UniqueConstraint("y"),
+ mysql_engine="InnoDB",
)
Along with the change :ref:`change_3216`, composite primary keys with
directives are no longer needed::
t = Table(
- 'some_table', metadata,
- Column('x', Integer, primary_key=True),
- Column('y', Integer, primary_key=True, autoincrement=True),
- mysql_engine='InnoDB'
+ "some_table",
+ metadata,
+ Column("x", Integer, primary_key=True),
+ Column("y", Integer, primary_key=True, autoincrement=True),
+ mysql_engine="InnoDB",
)
-
-
Dialect Improvements and Changes - SQLite
=========================================
``SNAPSHOT``::
engine = create_engine(
- "mssql+pyodbc://scott:tiger@ms_2008",
- isolation_level="REPEATABLE READ"
+ "mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ"
)
.. seealso::
copy the "length" parameter as the value ``"max"``::
>>> from sqlalchemy import create_engine, inspect
- >>> engine = create_engine('mssql+pyodbc://scott:tiger@ms_2008', echo=True)
+ >>> engine = create_engine("mssql+pyodbc://scott:tiger@ms_2008", echo=True)
>>> engine.execute("create table s (x varchar(max), y varbinary(max))")
>>> insp = inspect(engine)
>>> for col in insp.get_columns("s"):
- ... print(col['type'].__class__, col['type'].length)
- ...
+ ... print(col["type"].__class__, col["type"].length)
<class 'sqlalchemy.sql.sqltypes.VARCHAR'> max
<class 'sqlalchemy.dialects.mssql.base.VARBINARY'> max
out as None, so that the type objects work in non-SQL Server contexts::
>>> for col in insp.get_columns("s"):
- ... print(col['type'].__class__, col['type'].length)
- ...
+ ... print(col["type"].__class__, col["type"].length)
<class 'sqlalchemy.sql.sqltypes.VARCHAR'> None
<class 'sqlalchemy.dialects.mssql.base.VARBINARY'> None
given a table such as::
account_table = Table(
- 'account', metadata,
- Column('id', Integer, primary_key=True),
- Column('info', String(100)),
- schema="customer_schema"
+ "account",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("info", String(100)),
+ schema="customer_schema",
)
The legacy mode of behavior will attempt to turn a schema-qualified table
Given a query as below::
- q = session.query(User).\
- filter(User.name.like('%ed%')).\
- options(subqueryload(User.addresses))
+ q = (
+ session.query(User)
+ .filter(User.name.like("%ed%"))
+ .options(subqueryload(User.addresses))
+ )
The SQL produced would be the query against ``User`` followed by the
subqueryload for ``User.addresses`` (note the parameters are also listed)::
With "selectin" loading, we instead get a SELECT that refers to the
actual primary key values loaded in the parent query::
- q = session.query(User).\
- filter(User.name.like('%ed%')).\
- options(selectinload(User.addresses))
+ q = (
+ session.query(User)
+ .filter(User.name.like("%ed%"))
+ .options(selectinload(User.addresses))
+ )
Produces::
from sqlalchemy.orm import query_expression
from sqlalchemy.orm import with_expression
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
x = Column(Integer)
y = Column(Integer)
# will be None normally...
expr = query_expression()
+
# but let's give it x + y
- a1 = session.query(A).options(
- with_expression(A.expr, A.x + A.y)).first()
+ a1 = session.query(A).options(with_expression(A.expr, A.x + A.y)).first()
print(a1.expr)
.. seealso::
a FROM clause (or equivalent, depending on backend)
against ``SomeOtherEntity``::
- query(SomeEntity).\
- filter(SomeEntity.id==SomeOtherEntity.id).\
- filter(SomeOtherEntity.foo=='bar').\
- delete()
+ query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter(
+ SomeOtherEntity.foo == "bar"
+ ).delete()
.. seealso::
@hybrid.hybrid_property
def name(self):
- return self.first_name + ' ' + self.last_name
+ return self.first_name + " " + self.last_name
@name.expression
def name(cls):
- return func.concat(cls.first_name, ' ', cls.last_name)
+ return func.concat(cls.first_name, " ", cls.last_name)
@name.update_expression
def name(cls, value):
- f, l = value.split(' ', 1)
+ f, l = value.split(" ", 1)
return [(cls.first_name, f), (cls.last_name, l)]
Above, an UPDATE can be rendered using::
- session.query(Person).filter(Person.id == 5).update(
- {Person.name: "Dr. No"})
+ session.query(Person).filter(Person.id == 5).update({Person.name: "Dr. No"})
Similar functionality is available for composites, where composite values
will be broken out into their individual columns for bulk UPDATE::
session.query(Vertex).update({Edge.start: Point(3, 4)})
-
.. seealso::
:ref:`hybrid_bulk_update`
def name(self, value):
self.first_name = value
+
class FirstNameLastName(FirstNameOnly):
# ...
@FirstNameOnly.name.getter
def name(self):
- return self.first_name + ' ' + self.last_name
+ return self.first_name + " " + self.last_name
@name.setter
def name(self, value):
- self.first_name, self.last_name = value.split(' ', maxsplit=1)
+ self.first_name, self.last_name = value.split(" ", maxsplit=1)
@name.expression
def name(cls):
- return func.concat(cls.first_name, ' ', cls.last_name)
+ return func.concat(cls.first_name, " ", cls.last_name)
Above, the ``FirstNameOnly.name`` hybrid is referenced by the
``FirstNameLastName`` subclass in order to repurpose it specifically to the
from sqlalchemy.orm.attributes import OP_BULK_REPLACE
+
@event.listens_for(SomeObject.collection, "bulk_replace")
def process_collection(target, values, initiator):
values[:] = [_make_value(value) for value in values]
+
@event.listens_for(SomeObject.collection, "append", retval=True)
def process_collection(target, value, initiator):
# make sure bulk_replace didn't already do it
else:
return value
-
:ticket:`3896`
.. _change_3303:
Base = declarative_base()
+
class MyDataClass(Base):
- __tablename__ = 'my_data'
+ __tablename__ = "my_data"
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSONEncodedDict))
+
@event.listens_for(MyDataClass.data, "modified")
def modified_json(instance):
print("json value modified:", instance.data)
model = session.query(MyModel).first()
model.json_set &= {1, 3}
-
:ticket:`3853`
.. _change_3769:
itself an association proxy onto ``B``::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
b_values = association_proxy("atob", "b_value")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
value = Column(String)
c = relationship("C")
class C(Base):
- __tablename__ = 'c'
+ __tablename__ = "c"
id = Column(Integer, primary_key=True)
- b_id = Column(ForeignKey('b.id'))
+ b_id = Column(ForeignKey("b.id"))
value = Column(String)
class AtoB(Base):
- __tablename__ = 'atob'
+ __tablename__ = "atob"
- a_id = Column(ForeignKey('a.id'), primary_key=True)
- b_id = Column(ForeignKey('b.id'), primary_key=True)
+ a_id = Column(ForeignKey("a.id"), primary_key=True)
+ b_id = Column(ForeignKey("b.id"), primary_key=True)
a = relationship("A", backref="atob")
b = relationship("B", backref="atob")
.. sourcecode:: pycon+sql
- >>> s.query(A).filter(A.b_values.contains('hi')).all()
+ >>> s.query(A).filter(A.b_values.contains("hi")).all()
{opensql}SELECT a.id AS a_id
FROM a
WHERE EXISTS (SELECT 1
.. sourcecode:: pycon+sql
- >>> s.query(A).filter(A.c_values.any(value='x')).all()
+ >>> s.query(A).filter(A.c_values.any(value="x")).all()
{opensql}SELECT a.id AS a_id
FROM a
WHERE EXISTS (SELECT 1
field tracks this difference so that the two objects can co-exist in the
same identity map::
- tokyo = WeatherLocation('Asia', 'Tokyo')
- newyork = WeatherLocation('North America', 'New York')
+ tokyo = WeatherLocation("Asia", "Tokyo")
+ newyork = WeatherLocation("North America", "New York")
tokyo.reports.append(Report(80.0))
newyork.reports.append(Report(75))
newyork_report = newyork.reports[0]
tokyo_report = tokyo.reports[0]
- assert inspect(newyork_report).identity_key == (Report, (1, ), "north_america")
- assert inspect(tokyo_report).identity_key == (Report, (1, ), "asia")
+ assert inspect(newyork_report).identity_key == (Report, (1,), "north_america")
+ assert inspect(tokyo_report).identity_key == (Report, (1,), "asia")
# the token representing the originating shard is also available directly
assert inspect(newyork_report).identity_token == "north_america"
assert inspect(tokyo_report).identity_token == "asia"
-
:ticket:`4137`
New Features and Improvements - Core
from sqlalchemy import Boolean
from sqlalchemy import TypeDecorator
+
class LiberalBoolean(TypeDecorator):
impl = Boolean
value = bool(int(value))
return value
-
:ticket:`4102`
.. _change_3919:
have the effect of more parenthesization being generated when comparison
operators are combined together, such as::
- (column('q') == null()) != (column('y') == null())
+ (column("q") == null()) != (column("y") == null())
Will now generate ``(q IS NULL) != (y IS NULL)`` rather than
``q IS NULL != y IS NULL``.
:paramref:`_schema.Column.comment` arguments::
Table(
- 'my_table', metadata,
- Column('q', Integer, comment="the Q value"),
- comment="my Q table"
+ "my_table",
+ metadata,
+ Column("q", Integer, comment="the Q value"),
+ comment="my Q table",
)
Above, DDL will be rendered appropriately upon table create to associate
Given a statement as::
- stmt = users.delete().\
- where(users.c.id == addresses.c.id).\
- where(addresses.c.email_address.startswith('ed%'))
+ stmt = (
+ users.delete()
+ .where(users.c.id == addresses.c.id)
+ .where(addresses.c.email_address.startswith("ed%"))
+ )
conn.execute(stmt)
The resulting SQL from the above statement on a PostgreSQL backend
An expression such as::
- >>> column('x').startswith('total%score', autoescape=True)
+ >>> column("x").startswith("total%score", autoescape=True)
Renders as::
Similarly, an expression that has backslashes::
- >>> column('x').startswith('total/score', autoescape=True)
+ >>> column("x").startswith("total/score", autoescape=True)
Will render the same way, with the value of the parameter "x_1" as
``'total//score'``.
float_value = connection.scalar(
- select([literal(4.56)]) # the "BindParameter" will now be
- # Float, not Numeric(asdecimal=True)
+ select([literal(4.56)]) # the "BindParameter" will now be
+ # Float, not Numeric(asdecimal=True)
)
* Math operations between :class:`.Numeric`, :class:`.Float`, and
as well as if the type should be :class:`.Float`::
# asdecimal flag is maintained
- expr = column('a', Integer) * column('b', Numeric(asdecimal=False))
+ expr = column("a", Integer) * column("b", Numeric(asdecimal=False))
assert expr.type.asdecimal == False
# Float subclass of Numeric is maintained
- expr = column('a', Integer) * column('b', Float())
+ expr = column("a", Integer) * column("b", Float())
assert isinstance(expr.type, Float)
* The :class:`.Float` datatype will apply the ``float()`` processor to
are named in the documentation now::
>>> from sqlalchemy import select, table, column, func, tuple_
- >>> t = table('t',
- ... column('value'), column('x'),
- ... column('y'), column('z'), column('q'))
+ >>> t = table("t", column("value"), column("x"), column("y"), column("z"), column("q"))
>>> stmt = select([func.sum(t.c.value)]).group_by(
... func.grouping_sets(
... tuple_(t.c.x, t.c.y),
def mydefault(context):
- return context.get_current_parameters()['counter'] + 12
+ return context.get_current_parameters()["counter"] + 12
- mytable = Table('mytable', metadata_obj,
- Column('counter', Integer),
- Column('counter_plus_twelve',
- Integer, default=mydefault, onupdate=mydefault)
+
+ mytable = Table(
+ "mytable",
+ metadata_obj,
+ Column("counter", Integer),
+ Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault),
)
- stmt = mytable.insert().values(
- [{"counter": 5}, {"counter": 18}, {"counter": 20}])
+ stmt = mytable.insert().values([{"counter": 5}, {"counter": 18}, {"counter": 20}])
conn.execute(stmt)
sess = Session()
- user = sess.query(User).filter_by(name='x').first()
+ user = sess.query(User).filter_by(name="x").first()
+
@event.listens_for(sess, "after_rollback")
def after_rollback(session):
# to emit a lazy load.
print("user name: %s" % user.name)
+
@event.listens_for(sess, "after_commit")
def after_commit(session):
# 'user.name' is present, assuming it was already
# loaded. this is the existing behavior.
print("user name: %s" % user.name)
+
if should_rollback:
sess.rollback()
else:
the previous collection, a side effect of this was that the collection
being replaced would also be mutated, which is misleading and unnecessary::
- >>> a1, a2, a3 = Address('a1'), Address('a2'), Address('a3')
+ >>> a1, a2, a3 = Address("a1"), Address("a2"), Address("a3")
>>> user.addresses = [a1, a2]
>>> previous_collection = user.addresses
Given a mapping as::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B")
- @validates('bs')
+ @validates("bs")
def convert_dict_to_b(self, key, value):
- return B(data=value['data'])
+ return B(data=value["data"])
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
+ a_id = Column(ForeignKey("a.id"))
data = Column(String)
Above, we could use the validator as follows, to convert from an incoming
class A(Base):
# ...
- @validates('bs')
+ @validates("bs")
def validate_b(self, key, value):
assert value.data is not None
return value
An exception is now raised if the :func:`.attributes.flag_modified` function
is used to mark an attribute as modified that isn't actually loaded::
- a1 = A(data='adf')
+ a1 = A(data="adf")
s.add(a1)
s.flush()
# expire, similarly as though we said s.commit()
- s.expire(a1, 'data')
+ s.expire(a1, "data")
# will raise InvalidRequestError
- attributes.flag_modified(a1, 'data')
+ attributes.flag_modified(a1, "data")
This because the flush process will most likely fail in any case if the
attribute remains un-present by the time flush occurs. To mark an object
A very old and undocumented keyword argument ``scope`` has been removed::
from sqlalchemy.orm import scoped_session
+
Session = scoped_session(sessionmaker())
session = Session(scope=None)
overwrite it::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
- favorite_b_id = Column(ForeignKey('b.id', name="favorite_b_fk"))
+ favorite_b_id = Column(ForeignKey("b.id", name="favorite_b_fk"))
bs = relationship("B", primaryjoin="A.id == B.a_id")
favorite_b = relationship(
- "B", primaryjoin="A.favorite_b_id == B.id", post_update=True)
+ "B", primaryjoin="A.favorite_b_id == B.id", post_update=True
+ )
updated = Column(Integer, onupdate=my_onupdate_function)
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id', name="a_fk"))
+ a_id = Column(ForeignKey("a.id", name="a_fk"))
+
a1 = A()
b1 = B()
Given a mapping::
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = Column(Integer, primary_key=True)
version_id = Column(Integer, default=0)
- parent_id = Column(ForeignKey('node.id'))
- favorite_node_id = Column(ForeignKey('node.id'))
+ parent_id = Column(ForeignKey("node.id"))
+ favorite_node_id = Column(ForeignKey("node.id"))
nodes = relationship("Node", primaryjoin=remote(parent_id) == id)
favorite_node = relationship(
- "Node", primaryjoin=favorite_node_id == remote(id),
- post_update=True
+ "Node", primaryjoin=favorite_node_id == remote(id), post_update=True
)
- __mapper_args__ = {
- 'version_id_col': version_id
- }
+ __mapper_args__ = {"version_id_col": version_id}
An UPDATE of a node that associates another node as "favorite" will
now increment the version counter as well as match the current version::
a result with no return type (assume ``-%>`` is some special operator
supported by the database)::
- >>> column('x', types.DateTime).op('-%>')(None).type
+ >>> column("x", types.DateTime).op("-%>")(None).type
NullType()
Other types would use the default behavior of using the left-hand type
as the return type::
- >>> column('x', types.String(50)).op('-%>')(None).type
+ >>> column("x", types.String(50)).op("-%>")(None).type
String(length=50)
These behaviors were mostly by accident, so the behavior has been made
consistent with the second form, that is the default return type is the
same as the left-hand expression::
- >>> column('x', types.DateTime).op('-%>')(None).type
+ >>> column("x", types.DateTime).op("-%>")(None).type
DateTime()
As most user-defined operators tend to be "comparison" operators, often
its documented behavior of allowing the return type to be :class:`.Boolean`
in all cases, including for :class:`_types.ARRAY` and :class:`_types.JSON`::
- >>> column('x', types.String(50)).op('-%>', is_comparison=True)(None).type
+ >>> column("x", types.String(50)).op("-%>", is_comparison=True)(None).type
Boolean()
- >>> column('x', types.ARRAY(types.Integer)).op('-%>', is_comparison=True)(None).type
+ >>> column("x", types.ARRAY(types.Integer)).op("-%>", is_comparison=True)(None).type
Boolean()
- >>> column('x', types.JSON()).op('-%>', is_comparison=True)(None).type
+ >>> column("x", types.JSON()).op("-%>", is_comparison=True)(None).type
Boolean()
To assist with boolean comparison operators, a new shorthand method
:meth:`.Operators.bool_op` has been added. This method should be preferred
for on-the-fly boolean operators::
- >>> print(column('x', types.Integer).bool_op('-%>')(5))
+ >>> print(column("x", types.Integer).bool_op("-%>")(5))
x -%> :x_1
construct that stated a single percent sign::
>>> from sqlalchemy import literal_column
- >>> print(literal_column('some%symbol'))
+ >>> print(literal_column("some%symbol"))
some%%symbol
The percent sign is now unaffected for dialects that are not set to
as is appropriate::
>>> from sqlalchemy import literal_column
- >>> print(literal_column('some%symbol'))
+ >>> print(literal_column("some%symbol"))
some%symbol
>>> from sqlalchemy.dialects import mysql
- >>> print(literal_column('some%symbol').compile(dialect=mysql.dialect()))
+ >>> print(literal_column("some%symbol").compile(dialect=mysql.dialect()))
some%%symbol
As part of this change, the doubling that has been present when using
functions, used to supply ad-hoc column collations at the statement level,
is fixed, where a case sensitive name would not be quoted::
- stmt = select([mytable.c.x, mytable.c.y]).\
- order_by(mytable.c.somecolumn.collate("fr_FR"))
+ stmt = select([mytable.c.x, mytable.c.y]).order_by(
+ mytable.c.somecolumn.collate("fr_FR")
+ )
now renders::
``use_batch_mode`` argument on :func:`_sa.create_engine`::
engine = create_engine(
- "postgresql+psycopg2://scott:tiger@host/dbname",
- use_batch_mode=True)
+ "postgresql+psycopg2://scott:tiger@host/dbname", use_batch_mode=True
+ )
The feature is considered to be experimental for the moment but may become
on by default in a future release.
from sqlalchemy.dialects.postgresql import INTERVAL
- Table(
- 'my_table', metadata,
- Column("some_interval", INTERVAL(fields="DAY TO SECOND"))
- )
+ Table("my_table", metadata, Column("some_interval", INTERVAL(fields="DAY TO SECOND")))
Additionally, all INTERVAL datatypes can now be reflected independently
of the "fields" specifier present; the "fields" parameter in the datatype
from sqlalchemy.dialects.mysql import insert
- insert_stmt = insert(my_table). \
- values(id='some_id', data='some data to insert')
+ insert_stmt = insert(my_table).values(id="some_id", data="some data to insert")
on_conflict_stmt = insert_stmt.on_duplicate_key_update(
- data=insert_stmt.inserted.data,
- status='U'
+ data=insert_stmt.inserted.data, status="U"
)
conn.execute(on_conflict_stmt)
Previously, the foreign keys result would look like::
- [{'referred_table': u'users', 'referred_columns': [u'id'],
- 'referred_schema': None, 'name': 'USER_ID_FK',
- 'constrained_columns': [u'user_id']}]
+ [
+ {
+ "referred_table": "users",
+ "referred_columns": ["id"],
+ "referred_schema": None,
+ "name": "USER_ID_FK",
+ "constrained_columns": ["user_id"],
+ }
+ ]
Where the above could create problems particularly with Alembic autogenerate.
occurs, allowing database and/or owner names that themselves contain one
or more dots::
- Table(
- "some_table", metadata,
- Column("q", String(50)),
- schema="[MyDataBase.dbo]"
- )
+ Table("some_table", metadata, Column("q", String(50)), schema="[MyDataBase.dbo]")
The above table will consider the "owner" to be ``MyDataBase.dbo``, which
will also be quoted upon render, and the "database" as None. To individually
refer to database name and owner, use two pairs of brackets::
Table(
- "some_table", metadata,
+ "some_table",
+ metadata,
Column("q", String(50)),
- schema="[MyDataBase.SomeDB].[MyDB.owner]"
+ schema="[MyDataBase.SomeDB].[MyDB.owner]",
)
Additionally, the :class:`.quoted_name` construct is now honored when
j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id)
B_viacd = mapper(
- B, j, non_primary=True, primary_key=[j.c.b_id],
+ B,
+ j,
+ non_primary=True,
+ primary_key=[j.c.b_id],
properties={
"id": j.c.b_id, # so that 'id' looks the same as before
- "c_id": j.c.c_id, # needed for disambiguation
+ "c_id": j.c.c_id, # needed for disambiguation
"d_c_id": j.c.d_c_id, # needed for disambiguation
"b_id": [j.c.b_id, j.c.d_b_id],
"d_id": j.c.d_id,
- }
+ },
)
A.b = relationship(B_viacd, primaryjoin=A.b_id == B_viacd.c.b_id)
Given a mapping::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B", lazy="selectin")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
some_object = session.query(SomeObject).get(5)
- del some_object.some_attribute # from a SQL perspective, works like "= None"
+ del some_object.some_attribute # from a SQL perspective, works like "= None"
:ticket:`4354`
from sqlalchemy import inspect
- u1 = User(id=7, name='ed')
-
- inspect(u1).info['user_info'] = '7|ed'
+ u1 = User(id=7, name="ed")
+ inspect(u1).info["user_info"] = "7|ed"
:ticket:`4257`
Given a mapping as::
class A(Base):
- __tablename__ = 'test_a'
+ __tablename__ = "test_a"
id = Column(Integer, primary_key=True)
- ab = relationship(
- 'AB', backref='a', uselist=False)
+ ab = relationship("AB", backref="a", uselist=False)
b = association_proxy(
- 'ab', 'b', creator=lambda b: AB(b=b),
- cascade_scalar_deletes=True)
+ "ab", "b", creator=lambda b: AB(b=b), cascade_scalar_deletes=True
+ )
class B(Base):
- __tablename__ = 'test_b'
+ __tablename__ = "test_b"
id = Column(Integer, primary_key=True)
- ab = relationship('AB', backref='b', cascade='all, delete-orphan')
+ ab = relationship("AB", backref="b", cascade="all, delete-orphan")
class AB(Base):
- __tablename__ = 'test_ab'
+ __tablename__ = "test_ab"
a_id = Column(Integer, ForeignKey(A.id), primary_key=True)
b_id = Column(Integer, ForeignKey(B.id), primary_key=True)
class User(Base):
# ...
- keywords = association_proxy('kws', 'keyword')
+ keywords = association_proxy("kws", "keyword")
proxy_state = inspect(User).all_orm_descriptors["keywords"].for_class(User)
# column-based association proxy
values = association_proxy("elements", "value")
+
class Element(Base):
# ...
The ``User.values`` association proxy refers to the ``Element.value`` column.
Standard column operations are now available, such as ``like``::
- >>> print(s.query(User).filter(User.values.like('%foo%')))
+ >>> print(s.query(User).filter(User.values.like("%foo%")))
SELECT "user".id AS user_id
FROM "user"
WHERE EXISTS (SELECT 1
``equals``::
- >>> print(s.query(User).filter(User.values == 'foo'))
+ >>> print(s.query(User).filter(User.values == "foo"))
SELECT "user".id AS user_id
FROM "user"
WHERE EXISTS (SELECT 1
the association proxy used ``.contains`` as a list containment operator only.
With a column-oriented comparison, it now behaves like a "like"::
- >>> print(s.query(User).filter(User.values.contains('foo')))
+ >>> print(s.query(User).filter(User.values.contains("foo")))
SELECT "user".id AS user_id
FROM "user"
WHERE EXISTS (SELECT 1
as before, that of testing for collection membership, e.g. given a mapping::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = Column(Integer, primary_key=True)
user_elements = relationship("UserElement")
class UserElement(Base):
- __tablename__ = 'user_element'
+ __tablename__ = "user_element"
id = Column(Integer, primary_key=True)
user_id = Column(ForeignKey("user.id"))
class Element(Base):
- __tablename__ = 'element'
+ __tablename__ = "element"
id = Column(Integer, primary_key=True)
value = Column(String)
As an example, given a mapping with association proxy::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B")
- b_data = association_proxy('bs', 'data')
+ b_data = association_proxy("bs", "data")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
data = Column(String)
- a1 = A(bs=[B(data='b1'), B(data='b2')])
+ a1 = A(bs=[B(data="b1"), B(data="b2")])
b_data = a1.b_data
The change is that the ``b_data`` collection is now maintaining a strong
reference to the ``a1`` object, so that it remains present::
- assert b_data == ['b1', 'b2']
+ assert b_data == ["b1", "b2"]
This change introduces the side effect that if an application is passing around
the collection as above, **the parent object won't be garbage collected** until
id = Column(Integer, primary_key=True)
b_rel = relationship(
- "B", collection_class=set, cascade="all, delete-orphan",
+ "B",
+ collection_class=set,
+ cascade="all, delete-orphan",
)
b = association_proxy("b_rel", "value", creator=lambda x: B(value=x))
a_id = Column(Integer, ForeignKey("test_a.id"), nullable=False)
value = Column(String)
+
# ...
s = Session(e)
# against the deleted ones.
assert len(s.new) == 1
-
:ticket:`2642`
.. _change_1103:
"swap" operation. Given a standard one-to-many/many-to-one setup::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B", backref="a")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
>>> del a1.bs[1]
>>> a1.bs # collection is unaffected so far...
[<__main__.B object at 0x7f047af5fb70>]
- >>> b1.a # however b1.a is None
+ >>> b1.a # however b1.a is None
>>>
>>> session.add(a1)
>>> session.commit() # so upon flush + expire....
one-to-one relationships, in the following situation::
class User(Base):
- __tablename__ = 'users'
+ __tablename__ = "users"
id = Column(Integer, primary_key=True)
- addresses = relationship(
- "Address",
- passive_deletes="all")
+ addresses = relationship("Address", passive_deletes="all")
+
class Address(Base):
- __tablename__ = 'addresses'
+ __tablename__ = "addresses"
id = Column(Integer, primary_key=True)
email = Column(String)
- user_id = Column(Integer, ForeignKey('users.id'))
+ user_id = Column(Integer, ForeignKey("users.id"))
user = relationship("User")
+
u1 = session.query(User).first()
address = u1.addresses[0]
u1.addresses.remove(address)
separator. Below we define a convention that will name :class:`.UniqueConstraint`
constraints with a name that joins together the names of all columns::
- metadata_obj = MetaData(naming_convention={
- "uq": "uq_%(table_name)s_%(column_0_N_name)s"
- })
+ metadata_obj = MetaData(
+ naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"}
+ )
table = Table(
- 'info', metadata_obj,
- Column('a', Integer),
- Column('b', Integer),
- Column('c', Integer),
- UniqueConstraint('a', 'b', 'c')
+ "info",
+ metadata_obj,
+ Column("a", Integer),
+ Column("b", Integer),
+ Column("c", Integer),
+ UniqueConstraint("a", "b", "c"),
)
The CREATE TABLE for the above table will render as::
constraint name would normally be generated from the table definition below::
long_names = Table(
- 'long_names', metadata_obj,
- Column('information_channel_code', Integer, key='a'),
- Column('billing_convention_name', Integer, key='b'),
- Column('product_identifier', Integer, key='c'),
- UniqueConstraint('a', 'b', 'c')
+ "long_names",
+ metadata_obj,
+ Column("information_channel_code", Integer, key="a"),
+ Column("billing_convention_name", Integer, key="b"),
+ Column("product_identifier", Integer, key="c"),
+ UniqueConstraint("a", "b", "c"),
)
The truncation logic will ensure a too-long name isn't generated for the
side::
class Venue(Base):
- __tablename__ = 'venue'
+ __tablename__ = "venue"
id = Column(Integer, primary_key=True)
name = Column(String)
descendants = relationship(
"Venue",
- primaryjoin=func.instr(
- remote(foreign(name)), name + "/"
- ).as_comparison(1, 2) == 1,
+ primaryjoin=func.instr(remote(foreign(name)), name + "/").as_comparison(1, 2)
+ == 1,
viewonly=True,
- order_by=name
+ order_by=name,
)
Above, the :paramref:`_orm.relationship.primaryjoin` of the "descendants" relationship
and a joinedload, such as::
- v1 = s.query(Venue).filter_by(name="parent1").options(
- joinedload(Venue.descendants)).one()
+ v1 = (
+ s.query(Venue)
+ .filter_by(name="parent1")
+ .options(joinedload(Venue.descendants))
+ .one()
+ )
to work as::
>>> from sqlalchemy import select, literal_column, bindparam
>>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
>>> with e.connect() as conn:
- ... conn.execute(
- ... select([literal_column('1')]).
- ... where(literal_column('1').in_(bindparam('q', expanding=True))),
- ... q=[]
- ... )
- ...
+ ... conn.execute(
+ ... select([literal_column("1")]).where(
+ ... literal_column("1").in_(bindparam("q", expanding=True))
+ ... ),
+ ... q=[],
+ ... )
SELECT 1 WHERE 1 IN (SELECT CAST(NULL AS INTEGER) WHERE 1!=1)
The feature also works for tuple-oriented IN statements, where the "empty IN"
>>> from sqlalchemy import select, literal_column, tuple_, bindparam
>>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
>>> with e.connect() as conn:
- ... conn.execute(
- ... select([literal_column('1')]).
- ... where(tuple_(50, "somestring").in_(bindparam('q', expanding=True))),
- ... q=[]
- ... )
- ...
+ ... conn.execute(
+ ... select([literal_column("1")]).where(
+ ... tuple_(50, "somestring").in_(bindparam("q", expanding=True))
+ ... ),
+ ... q=[],
+ ... )
SELECT 1 WHERE (%(param_1)s, %(param_2)s)
IN (SELECT CAST(NULL AS INTEGER), CAST(NULL AS VARCHAR) WHERE 1!=1)
from sqlalchemy import TypeDecorator, LargeBinary, func
+
class CompressedLargeBinary(TypeDecorator):
impl = LargeBinary
def column_expression(self, col):
return func.uncompress(col, type_=self)
+
MyLargeBinary = LargeBinary().with_variant(CompressedLargeBinary(), "sqlite")
The above expression will render a function within SQL when used on SQLite only::
from sqlalchemy import select, column
from sqlalchemy.dialects import sqlite
- print(select([column('x', CompressedLargeBinary)]).compile(dialect=sqlite.dialect()))
+
+ print(select([column("x", CompressedLargeBinary)]).compile(dialect=sqlite.dialect()))
will render::
Given a schema such as::
dv = Table(
- 'data_values', metadata_obj,
- Column('modulus', Integer, nullable=False),
- Column('data', String(30)),
- postgresql_partition_by='range(modulus)')
+ "data_values",
+ metadata_obj,
+ Column("modulus", Integer, nullable=False),
+ Column("data", String(30)),
+ postgresql_partition_by="range(modulus)",
+ )
sa.event.listen(
dv,
"after_create",
sa.DDL(
"CREATE TABLE data_values_4_10 PARTITION OF data_values "
- "FOR VALUES FROM (4) TO (10)")
+ "FOR VALUES FROM (4) TO (10)"
+ ),
)
The two table names ``'data_values'`` and ``'data_values_4_10'`` will come
from sqlalchemy.dialects.mysql import insert
- insert_stmt = insert(my_table).values(
- id='some_existing_id',
- data='inserted value')
+ insert_stmt = insert(my_table).values(id="some_existing_id", data="inserted value")
on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
[
as several :class:`_schema.Column` -specific variants::
some_table = Table(
- 'some_table', metadata_obj,
- Column('id', Integer, primary_key=True, sqlite_on_conflict_primary_key='FAIL'),
- Column('data', Integer),
- UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE')
+ "some_table",
+ metadata_obj,
+ Column("id", Integer, primary_key=True, sqlite_on_conflict_primary_key="FAIL"),
+ Column("data", Integer),
+ UniqueConstraint("id", "data", sqlite_on_conflict="IGNORE"),
)
The above table would render in a CREATE TABLE statement as::
engine = create_engine(
"mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server",
- fast_executemany=True)
+ fast_executemany=True,
+ )
.. seealso::
on :class:`_schema.Column`::
test = Table(
- 'test', metadata_obj,
+ "test",
+ metadata_obj,
Column(
- 'id', Integer, primary_key=True, mssql_identity_start=100,
- mssql_identity_increment=10
+ "id",
+ Integer,
+ primary_key=True,
+ mssql_identity_start=100,
+ mssql_identity_increment=10,
),
- Column('name', String(20))
+ Column("name", String(20)),
)
In order to emit ``IDENTITY`` on a non-primary key column, which is a little-used
test = Table(
- 'test', metadata_obj,
- Column('id', Integer, primary_key=True, autoincrement=False),
- Column('number', Integer, autoincrement=True)
+ "test",
+ metadata_obj,
+ Column("id", Integer, primary_key=True, autoincrement=False),
+ Column("number", Integer, autoincrement=True),
)
.. seealso::
with Session(engine, future=True) as sess:
- stmt = select(User).where(
- User.name == 'sandy'
- ).join(User.addresses).where(Address.email_address.like("%gmail%"))
+ stmt = (
+ select(User)
+ .where(User.name == "sandy")
+ .join(User.addresses)
+ .where(Address.email_address.like("%gmail%"))
+ )
result = sess.execute(stmt)
Core :func:`_sql.update` and :func:`_sql.delete` can be used for bulk
operations. A bulk update like the following::
- session.query(User).filter(User.name == 'sandy').update({"password": "foobar"}, synchronize_session="fetch")
+ session.query(User).filter(User.name == "sandy").update(
+ {"password": "foobar"}, synchronize_session="fetch"
+ )
can now be achieved in :term:`2.0 style` (and indeed the above runs internally
in this way) as follows::
with Session(engine, future=True) as sess:
- stmt = update(User).where(
- User.name == 'sandy'
- ).values(password="foobar").execution_options(
- synchronize_session="fetch"
+ stmt = (
+ update(User)
+ .where(User.name == "sandy")
+ .values(password="foobar")
+ .execution_options(synchronize_session="fetch")
)
sess.execute(stmt)
is code such as the following::
stmt = select(users)
- stmt = stmt.where(stmt.c.name == 'foo')
+ stmt = stmt.where(stmt.c.name == "foo")
The above code appears intuitive and that it would generate
"SELECT * FROM users WHERE name='foo'", however veteran SQLAlchemy users will
present in the ``users.c`` collection::
stmt = select(users)
- stmt = stmt.where(stmt.selected_columns.name == 'foo')
-
+ stmt = stmt.where(stmt.selected_columns.name == "foo")
:ticket:`4617`
:meth:`_orm.Query.join`, adding JOIN criteria to the existing statement by
matching to the left entity::
- stmt = select(user_table).join(addresses_table, user_table.c.id == addresses_table.c.user_id)
+ stmt = select(user_table).join(
+ addresses_table, user_table.c.id == addresses_table.c.user_id
+ )
producing::
To alter the contents of the :attr:`_engine.URL.query` dictionary, methods
such as :meth:`_engine.URL.update_query_dict` may be used::
- >>> url.update_query_dict({"sslcert": '/path/to/crt'})
+ >>> url.update_query_dict({"sslcert": "/path/to/crt"})
postgresql://user:***@host/dbname?sslcert=%2Fpath%2Fto%2Fcrt
To upgrade code that is mutating these fields directly, a **backwards and
some_url.drivername = some_drivername
return some_url
+
def set_ssl_cert(some_url, ssl_cert):
# check for 1.4
if hasattr(some_url, "update_query_dict"):
For example::
>>> from sqlalchemy.engine import make_url
- >>> url = make_url("postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt")
+ >>> url = make_url(
+ ... "postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt"
+ ... )
>>> url.query
immutabledict({'alt_host': ('host1', 'host2'), 'sslcert': '/path/to/crt'})
from sqlalchemy.engine import CreateEnginePlugin
+
class MyPlugin(CreateEnginePlugin):
def __init__(self, url, kwargs):
# check for 1.4 style
if hasattr(CreateEnginePlugin, "update_url"):
- self.my_argument_one = url.query['my_argument_one']
- self.my_argument_two = url.query['my_argument_two']
+ self.my_argument_one = url.query["my_argument_one"]
+ self.my_argument_two = url.query["my_argument_two"]
else:
# legacy
- self.my_argument_one = url.query.pop('my_argument_one')
- self.my_argument_two = url.query.pop('my_argument_two')
+ self.my_argument_one = url.query.pop("my_argument_one")
+ self.my_argument_two = url.query.pop("my_argument_two")
- self.my_argument_three = kwargs.pop('my_argument_three', None)
+ self.my_argument_three = kwargs.pop("my_argument_three", None)
def update_url(self, url):
# this method runs in 1.4 only and should be used to consume
# plugin-specific arguments
- return url.difference_update_query(
- ["my_argument_one", "my_argument_two"]
- )
+ return url.difference_update_query(["my_argument_one", "my_argument_two"])
See the docstring at :class:`_engine.CreateEnginePlugin` for complete details
on how this class is used.
stmt = select(users_table).where(
case(
- (users_table.c.name == 'wendy', 'W'),
- (users_table.c.name == 'jack', 'J'),
- else_='E'
+ (users_table.c.name == "wendy", "W"),
+ (users_table.c.name == "jack", "J"),
+ else_="E",
)
)
address_alias = aliased(Address)
- q = session.query(User).\
- join(address_alias, User.addresses).\
- filter(Address.email_address == 'foo')
+ q = (
+ session.query(User)
+ .join(address_alias, User.addresses)
+ .filter(Address.email_address == "foo")
+ )
The above query selects from a JOIN of ``User`` and ``address_alias``, the
latter of which is an alias of the ``Address`` entity. However, the
clause to link the new ``Address`` entity with the previous ``address_alias``
entity and that will remove the warning::
- q = session.query(User).\
- join(address_alias, User.addresses).\
- filter(Address.email_address == 'foo').\
- filter(Address.id == address_alias.id) # resolve cartesian products,
- # will no longer warn
+ q = (
+ session.query(User)
+ .join(address_alias, User.addresses)
+ .filter(Address.email_address == "foo")
+ .filter(Address.id == address_alias.id)
+ ) # resolve cartesian products,
+ # will no longer warn
The cartesian product warning considers **any** kind of link between two
FROM clauses to be a resolution, even if the end result set is still
FROM clause that is completely unexpected. If the FROM clause is referred
to explicitly elsewhere and linked to the other FROMs, no warning is emitted::
- q = session.query(User).\
- join(address_alias, User.addresses).\
- filter(Address.email_address == 'foo').\
- filter(Address.id > address_alias.id) # will generate a lot of rows,
- # but no warning
+ q = (
+ session.query(User)
+ .join(address_alias, User.addresses)
+ .filter(Address.email_address == "foo")
+ .filter(Address.id > address_alias.id)
+ ) # will generate a lot of rows,
+ # but no warning
Full cartesian products are also allowed if they are explicitly stated; if we
wanted for example the cartesian product of ``User`` and ``Address``, we can
with engine.connect() as conn:
row = conn.execute(table.select().where(table.c.id == 5)).one()
-
:meth:`_engine.Result.one_or_none` - same, but also returns None for no rows
:meth:`_engine.Result.all` - returns all rows
.. sourcecode::
with engine.connect() as conn:
- # requests x, y, z
- result = conn.execute(select(table.c.x, table.c.y, table.c.z))
+ # requests x, y, z
+ result = conn.execute(select(table.c.x, table.c.y, table.c.z))
- # iterate rows as y, x
- for y, x in result.columns("y", "x"):
- print("Y: %s X: %s" % (y, x))
+ # iterate rows as y, x
+ for y, x in result.columns("y", "x"):
+ print("Y: %s X: %s" % (y, x))
:meth:`_engine.Result.scalars` - returns lists of scalar objects, from the
first column by default but can also be selected:
.. sourcecode::
with engine.connect() as conn:
- result = conn.execute(select(table.c.x, table.c.y, table.c.z))
+ result = conn.execute(select(table.c.x, table.c.y, table.c.z))
- for map_ in result.mappings():
- print("Y: %(y)s X: %(x)s" % map_)
+ for map_ in result.mappings():
+ print("Y: %(y)s X: %(x)s" % map_)
When using Core, the object returned by :meth:`_engine.Connection.execute` is
an instance of :class:`.CursorResult`, which continues to feature the same API
The biggest cross-incompatible difference is the behavior of ``__contains__``::
- "id" in row # True for a mapping, False for a named tuple
- "some name" in row # False for a mapping, True for a named tuple
+ "id" in row # True for a mapping, False for a named tuple
+ "some name" in row # False for a mapping, True for a named tuple
In 1.4, when a ``LegacyRow`` is returned by a Core result set, the above
``"id" in row`` comparison will continue to succeed, however a deprecation
a datetime value from SQLite, the data for the row as present in the
:class:`.RowProxy` object would previously have looked like::
- row_proxy = (1, '2019-12-31 19:56:58.272106')
+ row_proxy = (1, "2019-12-31 19:56:58.272106")
and then upon access via ``__getitem__``, the ``datetime.strptime()`` function
would be used on the fly to convert the above string date into a ``datetime``
the 2.0 transition::
>>> from sqlalchemy import column, select
- >>> c1, c2, c3, c4 = column('c1'), column('c2'), column('c3'), column('c4')
- >>> stmt = select(c1, c2, c3.label('c2'), c2, c4)
+ >>> c1, c2, c3, c4 = column("c1"), column("c2"), column("c3"), column("c4")
+ >>> stmt = select(c1, c2, c3.label("c2"), c2, c4)
>>> print(stmt)
SELECT c1, c2, c3 AS c2, c2, c4
deduplication of implicitly generated labels::
>>> from sqlalchemy import table
- >>> user = table('user', column('id'), column('name'))
+ >>> user = table("user", column("id"), column("name"))
>>> stmt = select(user.c.id, user.c.name, user.c.id).apply_labels()
>>> print(stmt)
SELECT "user".id AS user_id, "user".name AS user_name, "user".id AS id_1
For CAST against expressions that don't have a name, the previous logic is used
to generate the usual "anonymous" labels::
- >>> print(select(cast('hi there,' + foo.c.data, String)))
+ >>> print(select(cast("hi there," + foo.c.data, String)))
SELECT CAST(:data_1 + foo.data AS VARCHAR) AS anon_1
FROM foo
expression as these don't render inside of a CAST, will nonetheless make use of
the given name::
- >>> print(select(cast(('hi there,' + foo.c.data).label('hello_data'), String)))
+ >>> print(select(cast(("hi there," + foo.c.data).label("hello_data"), String)))
SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data
FROM foo
And of course as was always the case, :class:`.Label` can be applied to the
expression on the outside to apply an "AS <name>" label directly::
- >>> print(select(cast(('hi there,' + foo.c.data), String).label('hello_data')))
+ >>> print(select(cast(("hi there," + foo.c.data), String).label("hello_data")))
SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data
FROM foo
To ensure that a CREATE CONSTRAINT is emitted for these types, set these
flags to ``True``::
- class Spam(Base):
- __tablename__ = "spam"
- id = Column(Integer, primary_key=True)
- boolean = Column(Boolean(create_constraint=True))
- enum = Column(Enum("a", "b", "c", create_constraint=True))
-
+ class Spam(Base):
+ __tablename__ = "spam"
+ id = Column(Integer, primary_key=True)
+ boolean = Column(Boolean(create_constraint=True))
+ enum = Column(Enum("a", "b", "c", create_constraint=True))
:ticket:`5367`
the attribute::
class Book(Base):
- __tablename__ = 'book'
+ __tablename__ = "book"
book_id = Column(Integer, primary_key=True)
title = Column(String(200), nullable=False)
summary = deferred(Column(String(2000)), raiseload=True)
excerpt = deferred(Column(Text), raiseload=True)
+
book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first()
It was originally considered that the existing :func:`.raiseload` option that
attributes. However, this would break the "wildcard" behavior of :func:`.raiseload`,
which is documented as allowing one to prevent all relationships from loading::
- session.query(Order).options(
- joinedload(Order.items), raiseload('*'))
+ session.query(Order).options(joinedload(Order.items), raiseload("*"))
Above, if we had expanded :func:`.raiseload` to accommodate for columns as
well, the wildcard would also prevent columns from loading and thus be a
row._mapping[u1] # same as row[0]
- row = (
- s.query(User.id, Address.email_address)
- .join(User.addresses)
- .first()
- )
+ row = s.query(User.id, Address.email_address).join(User.addresses).first()
row._mapping[User.id] # same as row[0]
row._mapping["id"] # same as row[0]
Session = sessionmaker(engine, future=True)
with Session() as session:
- u1 = User()
- session.add(u1)
-
- a1 = Address()
- a1.user = u1 # <--- will not add "a1" to the Session
-
+ u1 = User()
+ session.add(u1)
+ a1 = Address()
+ a1.user = u1 # <--- will not add "a1" to the Session
:ticket:`5150`
relationship, when an expired object is unexpired or an object is refreshed::
>>> a1 = session.query(A).options(joinedload(A.bs)).first()
- >>> a1.data = 'new data'
+ >>> a1.data = "new data"
>>> session.commit()
Above, the ``A`` object was loaded with a ``joinedload()`` option associated
an additional query::
>>> a1 = session.query(A).options(selectinload(A.bs)).first()
- >>> a1.data = 'new data'
+ >>> a1.data = "new data"
>>> session.commit()
>>> a1.data
SELECT a.id AS a_id, a.data AS a_data
harmful, which is when the object is merged into a session::
>>> u1 = User(id=1) # create an empty User to merge with id=1 in the database
- >>> merged1 = session.merge(u1) # value of merged1.addresses is unchanged from that of the DB
+ >>> merged1 = session.merge(
+ ... u1
+ ... ) # value of merged1.addresses is unchanged from that of the DB
- >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database
+ >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database
>>> u2.addresses
[]
>>> merged2 = session.merge(u2) # value of merged2.addresses has been emptied in the DB
>>> u1 = User()
>>> l1 = u1.addresses # new list is created, associated with the state
>>> assert u1.addresses is l1 # you get the same list each time you access it
- >>> assert "addresses" not in u1.__dict__ # but it won't go into __dict__ until it's mutated
+ >>> assert (
+ ... "addresses" not in u1.__dict__
+ ... ) # but it won't go into __dict__ until it's mutated
>>> from sqlalchemy import inspect
>>> inspect(u1).attrs.addresses.history
History(added=None, unchanged=None, deleted=None)
>>> u1.addresses
[]
# this will now fail, would pass before
- >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == {"addresses": []}
+ >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == {
+ ... "addresses": []
+ ... }
or to ensure that the collection won't require a lazy load to proceed, the
(admittedly awkward) code below will now also fail::
to be inserted has the same primary key as an object that is already present::
class Product(Base):
- __tablename__ = 'product'
+ __tablename__ = "product"
id = Column(Integer, primary_key=True)
+
session = Session(engine)
# add Product with primary key 1
# ...
# this is now an error
- addresses = relationship(
- "Address", viewonly=True, cascade="all, delete-orphan")
+ addresses = relationship("Address", viewonly=True, cascade="all, delete-orphan")
The above will raise::
s.commit()
- print(
- s.query(Manager).select_entity_from(s.query(Employee).subquery()).all()
- )
-
+ print(s.query(Manager).select_entity_from(s.query(Employee).subquery()).all())
The subquery selects both the ``Engineer`` and the ``Manager`` rows, and
even though the outer query is against ``Manager``, we get a non ``Manager``
integer primary key column of a table::
Table(
- "some_table", metadata,
- Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True)
+ "some_table",
+ metadata,
+ Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True),
)
The above :class:`.Sequence` is only used for DDL and INSERT statements if the
Given the example program below::
- from sqlalchemy import column
- from sqlalchemy import create_engine
- from sqlalchemy import select
- from sqlalchemy import table
+ from sqlalchemy import column
+ from sqlalchemy import create_engine
+ from sqlalchemy import select
+ from sqlalchemy import table
- engine = create_engine("sqlite://")
+ engine = create_engine("sqlite://")
- engine.execute("CREATE TABLE foo (id integer)")
- engine.execute("INSERT INTO foo (id) VALUES (1)")
+ engine.execute("CREATE TABLE foo (id integer)")
+ engine.execute("INSERT INTO foo (id) VALUES (1)")
- foo = table("foo", column("id"))
- result = engine.execute(select([foo.c.id]))
+ foo = table("foo", column("id"))
+ result = engine.execute(select([foo.c.id]))
- print(result.fetchall())
+ print(result.fetchall())
The above program uses several patterns that many users will already identify
as "legacy", namely the use of the :meth:`_engine.Engine.execute` method
With the above guidance, we can migrate our program to use 2.0 styles, and
as a bonus our program is much clearer::
- from sqlalchemy import column
- from sqlalchemy import create_engine
- from sqlalchemy import select
- from sqlalchemy import table
- from sqlalchemy import text
-
+ from sqlalchemy import column
+ from sqlalchemy import create_engine
+ from sqlalchemy import select
+ from sqlalchemy import table
+ from sqlalchemy import text
- engine = create_engine("sqlite://")
- # don't rely on autocommit for DML and DDL
- with engine.begin() as connection:
- # use connection.execute(), not engine.execute()
- # use the text() construct to execute textual SQL
- connection.execute(text("CREATE TABLE foo (id integer)"))
- connection.execute(text("INSERT INTO foo (id) VALUES (1)"))
+ engine = create_engine("sqlite://")
+ # don't rely on autocommit for DML and DDL
+ with engine.begin() as connection:
+ # use connection.execute(), not engine.execute()
+ # use the text() construct to execute textual SQL
+ connection.execute(text("CREATE TABLE foo (id integer)"))
+ connection.execute(text("INSERT INTO foo (id) VALUES (1)"))
- foo = table("foo", column("id"))
- with engine.connect() as connection:
- # use connection.execute(), not engine.execute()
- # select() now accepts column / table expressions positionally
- result = connection.execute(select(foo.c.id))
+ foo = table("foo", column("id"))
- print(result.fetchall())
+ with engine.connect() as connection:
+ # use connection.execute(), not engine.execute()
+ # select() now accepts column / table expressions positionally
+ result = connection.execute(select(foo.c.id))
+ print(result.fetchall())
The goal of "2.0 deprecations mode" is that a program which runs with no
:class:`_exc.RemovedIn20Warning` warnings with "2.0 deprecations mode" turned
conn.commit() # commit as you go
-
-
Migration to 2.0 Step Five - Use the ``future`` flag on Session
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:class:`_orm.Session` may be used as a context manager::
from sqlalchemy.orm import Session
+
with Session(engine) as session:
session.add(MyObject())
session.commit()
conn = engine.connect()
# won't autocommit in 2.0
- conn.execute(some_table.insert().values(foo='bar'))
+ conn.execute(some_table.insert().values(foo="bar"))
Nor will this autocommit::
conn = engine.connect()
# won't autocommit in 2.0
- conn.execute(
- text("EXEC my_procedural_thing()").execution_options(autocommit=True)
- )
-
+ conn.execute(text("EXEC my_procedural_thing()").execution_options(autocommit=True))
**Migration to 2.0**
or the :meth:`_engine.Engine.begin` context manager::
with engine.begin() as conn:
- conn.execute(some_table.insert().values(foo='bar'))
- conn.execute(some_other_table.insert().values(bat='hoho'))
+ conn.execute(some_table.insert().values(foo="bar"))
+ conn.execute(some_other_table.insert().values(bat="hoho"))
with engine.connect() as conn:
with conn.begin():
- conn.execute(some_table.insert().values(foo='bar'))
- conn.execute(some_other_table.insert().values(bat='hoho'))
+ conn.execute(some_table.insert().values(foo="bar"))
+ conn.execute(some_other_table.insert().values(bat="hoho"))
with engine.begin() as conn:
conn.execute(text("EXEC my_procedural_thing()"))
:meth:`_future.Connection.begin`::
with engine.connect() as conn:
- conn.execute(some_table.insert().values(foo='bar'))
- conn.execute(some_other_table.insert().values(bat='hoho'))
+ conn.execute(some_table.insert().values(foo="bar"))
+ conn.execute(some_other_table.insert().values(bat="hoho"))
conn.commit()
of Core use cases, it's the pattern that is already recommended::
with engine.begin() as conn:
- conn.execute(some_table.insert().values(foo='bar'))
+ conn.execute(some_table.insert().values(foo="bar"))
For "commit as you go, or rollback instead" usage, which resembles how the
:class:`_orm.Session` is normally used today, the "future" version of
engine = create_engine(..., future=True)
with engine.connect() as conn:
- conn.execute(some_table.insert().values(foo='bar'))
+ conn.execute(some_table.insert().values(foo="bar"))
conn.commit()
conn.execute(text("some other SQL"))
metadata_obj = MetaData(bind=engine) # no longer supported
- metadata_obj.create_all() # requires Engine or Connection
+ metadata_obj.create_all() # requires Engine or Connection
metadata_obj.reflect() # requires Engine or Connection
- t = Table('t', metadata_obj, autoload=True) # use autoload_with=engine
+ t = Table("t", metadata_obj, autoload=True) # use autoload_with=engine
result = engine.execute(t.select()) # no longer supported
metadata_obj.reflect(engine)
# reflect individual table
- t = Table('t', metadata_obj, autoload_with=engine)
+ t = Table("t", metadata_obj, autoload_with=engine)
# connection level:
metadata_obj.reflect(connection)
# reflect individual table
- t = Table('t', metadata_obj, autoload_with=connection)
+ t = Table("t", metadata_obj, autoload_with=connection)
# execute SQL statements
result = conn.execute(t.select())
-
**Discussion**
to finally reduce the number of choices for how to execute a statement in
Core from "many choices"::
- # many choices
+ # many choices
- # bound metadata?
- metadata_obj = MetaData(engine)
+ # bound metadata?
+ metadata_obj = MetaData(engine)
- # or not?
- metadata_obj = MetaData()
+ # or not?
+ metadata_obj = MetaData()
- # execute from engine?
- result = engine.execute(stmt)
+ # execute from engine?
+ result = engine.execute(stmt)
- # or execute the statement itself (but only if you did
- # "bound metadata" above, which means you can't get rid of "bound" if any
- # part of your program uses this form)
- result = stmt.execute()
+ # or execute the statement itself (but only if you did
+ # "bound metadata" above, which means you can't get rid of "bound" if any
+ # part of your program uses this form)
+ result = stmt.execute()
- # execute from connection, but it autocommits?
- conn = engine.connect()
- conn.execute(stmt)
+ # execute from connection, but it autocommits?
+ conn = engine.connect()
+ conn.execute(stmt)
- # execute from connection, but autocommit isn't working, so use the special
- # option?
- conn.execution_options(autocommit=True).execute(stmt)
+ # execute from connection, but autocommit isn't working, so use the special
+ # option?
+ conn.execution_options(autocommit=True).execute(stmt)
- # or on the statement ?!
- conn.execute(stmt.execution_options(autocommit=True))
+ # or on the statement ?!
+ conn.execute(stmt.execution_options(autocommit=True))
- # or execute from connection, and we use explicit transaction?
- with conn.begin():
- conn.execute(stmt)
+ # or execute from connection, and we use explicit transaction?
+ with conn.begin():
+ conn.execute(stmt)
to "one choice", where by "one choice" we mean "explicit connection with
explicit transaction"; there are still a few ways to demarcate
:class:`_engine.Connection` and then to explicitly demarcate the transaction,
in the case that the operation is a write operation::
- # one choice - work with explicit connection, explicit transaction
- # (there remain a few variants on how to demarcate the transaction)
-
- # "begin once" - one transaction only per checkout
- with engine.begin() as conn:
- result = conn.execute(stmt)
+ # one choice - work with explicit connection, explicit transaction
+ # (there remain a few variants on how to demarcate the transaction)
- # "commit as you go" - zero or more commits per checkout
- with engine.connect() as conn:
- result = conn.execute(stmt)
- conn.commit()
+ # "begin once" - one transaction only per checkout
+ with engine.begin() as conn:
+ result = conn.execute(stmt)
- # "commit as you go" but with a transaction block instead of autobegin
- with engine.connect() as conn:
- with conn.begin():
- result = conn.execute(stmt)
+ # "commit as you go" - zero or more commits per checkout
+ with engine.connect() as conn:
+ result = conn.execute(stmt)
+ conn.commit()
+ # "commit as you go" but with a transaction block instead of autobegin
+ with engine.connect() as conn:
+ with conn.begin():
+ result = conn.execute(stmt)
execute() method more strict, execution options are more prominent
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
require modification::
- connection = engine.connect()
-
- # direct string SQL not supported; use text() or exec_driver_sql() method
- result = connection.execute("select * from table")
+ connection = engine.connect()
- # positional parameters no longer supported, only named
- # unless using exec_driver_sql()
- result = connection.execute(table.insert(), ('x', 'y', 'z'))
+ # direct string SQL not supported; use text() or exec_driver_sql() method
+ result = connection.execute("select * from table")
- # **kwargs no longer accepted, pass a single dictionary
- result = connection.execute(table.insert(), x=10, y=5)
+ # positional parameters no longer supported, only named
+ # unless using exec_driver_sql()
+ result = connection.execute(table.insert(), ("x", "y", "z"))
- # multiple *args no longer accepted, pass a list
- result = connection.execute(
- table.insert(),
- {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}
- )
+ # **kwargs no longer accepted, pass a single dictionary
+ result = connection.execute(table.insert(), x=10, y=5)
+ # multiple *args no longer accepted, pass a list
+ result = connection.execute(
+ table.insert(), {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}
+ )
**Migration to 2.0**
method, so the following code is cross-compatible between 1.x and 2.0::
- connection = engine.connect()
-
- from sqlalchemy import text
- result = connection.execute(text("select * from table"))
+ connection = engine.connect()
- # pass a single dictionary for single statement execution
- result = connection.execute(table.insert(), {"x": 10, "y": 5})
+ from sqlalchemy import text
- # pass a list of dictionaries for executemany
- result = connection.execute(
- table.insert(),
- [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}]
- )
+ result = connection.execute(text("select * from table"))
+ # pass a single dictionary for single statement execution
+ result = connection.execute(table.insert(), {"x": 10, "y": 5})
+ # pass a list of dictionaries for executemany
+ result = connection.execute(
+ table.insert(), [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}]
+ )
**Discussion**
row = result.first() # suppose the row is (1, 2)
- "x" in row # evaluates to False, in 1.x / future=False, this would be True
+ "x" in row # evaluates to False, in 1.x / future=False, this would be True
1 in row # evaluates to True, in 1.x / future=False, this would be False
-
**Migration to 2.0**
Application code or test suites that are testing for a particular key
stmt = select(User, Address).join(User.addresses)
for row in session.execute(stmt).mappings():
- print("the user is: %s the address is: %s" % (
- row[User],
- row[Address]
- ))
+ print("the user is: %s the address is: %s" % (row[User], row[Address]))
.. seealso::
# list emits a deprecation warning
case_clause = case(
- [
- (table.c.x == 5, "five"),
- (table.c.x == 7, "seven")
- ],
- else_="neither five nor seven"
+ [(table.c.x == 5, "five"), (table.c.x == 7, "seven")],
+ else_="neither five nor seven",
)
-
**Migration to 2.0**
Only the "generative" style of :func:`_sql.select` will be supported. The list
# case conditions passed positionally
case_clause = case(
- (table.c.x == 5, "five"),
- (table.c.x == 7, "seven"),
- else_="neither five nor seven"
+ (table.c.x == 5, "five"), (table.c.x == 7, "seven"), else_="neither five nor seven"
)
**Discussion**
Examples of "structural" vs. "data" elements are as follows::
- # table columns for CREATE TABLE - structural
- table = Table("table", metadata_obj, Column('x', Integer), Column('y', Integer))
+ # table columns for CREATE TABLE - structural
+ table = Table("table", metadata_obj, Column("x", Integer), Column("y", Integer))
- # columns in a SELECT statement - structural
- stmt = select(table.c.x, table.c.y)
+ # columns in a SELECT statement - structural
+ stmt = select(table.c.x, table.c.y)
- # literal elements in an IN clause - data
- stmt = stmt.where(table.c.y.in_([1, 2, 3]))
+ # literal elements in an IN clause - data
+ stmt = stmt.where(table.c.y.in_([1, 2, 3]))
.. seealso::
stmt = table.delete(table.c.x > 15)
# no longer supported
- stmt = table.update(
- table.c.x < 15,
- preserve_parameter_order=True
- ).values(
+ stmt = table.update(table.c.x < 15, preserve_parameter_order=True).values(
[(table.c.y, 20), (table.c.x, table.c.y + 10)]
)
stmt = table.delete().where(table.c.x > 15)
# use generative methods, ordered_values() replaces preserve_parameter_order
- stmt = table.update().where(
- table.c.x < 15,
- ).ordered_values(
- (table.c.y, 20), (table.c.x, table.c.y + 10)
+ stmt = (
+ table.update()
+ .where(
+ table.c.x < 15,
+ )
+ .ordered_values((table.c.y, 20), (table.c.x, table.c.y + 10))
)
**Discussion**
from sqlalchemy.orm import mapper
- mapper(SomeClass, some_table, properties={
- "related": relationship(SomeRelatedClass)
- })
+ mapper(SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)})
To work from a central :class:`_orm.registry` object::
mapper_reg = registry()
- mapper_reg.map_imperatively(SomeClass, some_table, properties={
- "related": relationship(SomeRelatedClass)
- })
+ mapper_reg.map_imperatively(
+ SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)}
+ )
The above :class:`_orm.registry` is also the source for declarative mappings,
and classical mappings now have access to this registry including string-based
Base = mapper_reg.generate_base()
+
class SomeRelatedClass(Base):
- __tablename__ = 'related'
+ __tablename__ = "related"
# ...
- mapper_reg.map_imperatively(SomeClass, some_table, properties={
- "related": relationship(
- "SomeRelatedClass",
- primaryjoin="SomeRelatedClass.related_id == SomeClass.id"
- )
- })
-
+ mapper_reg.map_imperatively(
+ SomeClass,
+ some_table,
+ properties={
+ "related": relationship(
+ "SomeRelatedClass",
+ primaryjoin="SomeRelatedClass.related_id == SomeClass.id",
+ )
+ },
+ )
**Discussion**
# string use removed
q = session.query(Address).filter(with_parent(u1, "addresses"))
-
**Migration to 2.0**
Modern SQLAlchemy 1.x versions support the recommended technique which
# chaining removed
q = session.query(User).join("orders", "items", "keywords")
-
**Migration to 2.0**
Use individual calls to :meth:`_orm.Query.join` for 1.x /2.0 cross compatible
The ``aliased=True`` option on :meth:`_query.Query.join` is removed, as is
the ``from_joinpoint`` flag::
- # no longer supported
- q = session.query(Node).\
- join("children", aliased=True).filter(Node.name == "some sub child").
- join("children", from_joinpoint=True, aliased=True).\
- filter(Node.name == 'some sub sub child')
+ # no longer supported
+ q = (
+ session.query(Node)
+ .join("children", aliased=True)
+ .filter(Node.name == "some sub child")
+ .join("children", from_joinpoint=True, aliased=True)
+ .filter(Node.name == "some sub sub child")
+ )
**Migration to 2.0**
Use explicit aliases instead::
- n1 = aliased(Node)
- n2 = aliased(Node)
-
- q = select(Node).join(Node.children.of_type(n1)).\
- where(n1.name == "some sub child").\
- join(n1.children.of_type(n2)).\
- where(n2.name == "some sub child")
+ n1 = aliased(Node)
+ n2 = aliased(Node)
+ q = (
+ select(Node)
+ .join(Node.children.of_type(n1))
+ .where(n1.name == "some sub child")
+ .join(n1.children.of_type(n2))
+ .where(n2.name == "some sub child")
+ )
**Discussion**
# 1.xx code
- result = session.query(User).join(User.addresses).\
- distinct().order_by(Address.email_address).all()
+ result = (
+ session.query(User)
+ .join(User.addresses)
+ .distinct()
+ .order_by(Address.email_address)
+ .all()
+ )
In version 2.0, the "email_address" column will not be automatically added
to the columns clause, and the above query will fail, since relational
# 1.4 / 2.0 code
- stmt = select(User, Address.email_address).join(User.addresses).\
- distinct().order_by(Address.email_address)
+ stmt = (
+ select(User, Address.email_address)
+ .join(User.addresses)
+ .distinct()
+ .order_by(Address.email_address)
+ )
result = session.execute(stmt).columns(User).all()
The :meth:`_orm.Query.from_self` method will be removed from :class:`_orm.Query`::
# from_self is removed
- q = session.query(User, Address.email_address).\
- join(User.addresses).\
- from_self(User).order_by(Address.email_address)
-
+ q = (
+ session.query(User, Address.email_address)
+ .join(User.addresses)
+ .from_self(User)
+ .order_by(Address.email_address)
+ )
**Migration to 2.0**
from sqlalchemy.orm import aliased
- subq = session.query(User, Address.email_address).\
- join(User.addresses).subquery()
+ subq = session.query(User, Address.email_address).join(User.addresses).subquery()
ua = aliased(User, subq)
from sqlalchemy.orm import aliased
- subq = select(User, Address.email_address).\
- join(User.addresses).subquery()
+ subq = select(User, Address.email_address).join(User.addresses).subquery()
ua = aliased(User, subq)
result = session.execute(stmt)
-
**Discussion**
The :meth:`_query.Query.from_self` method is a very complicated method that is rarely
select from both entities at once without having to specify any particular
labeling::
- # 1.4 / 2.0 code
+ # 1.4 / 2.0 code
- subq = select(User, Address).\
- join(User.addresses).subquery()
+ subq = select(User, Address).join(User.addresses).subquery()
- ua = aliased(User, subq)
- aa = aliased(Address, subq)
+ ua = aliased(User, subq)
+ aa = aliased(Address, subq)
- stmt = select(ua, aa).order_by(aa.email_address)
- result = session.execute(stmt)
+ stmt = select(ua, aa).order_by(aa.email_address)
+ result = session.execute(stmt)
The above query will disambiguate the ``.id`` column of ``User`` and
``Address``, where ``Address.id`` is rendered and tracked as ``id_1``::
# In the new API, uniquing is available but not implicitly
# enabled
- result = session.execute(
- select(User).options(joinedload(User.addresses))
- )
+ result = session.execute(select(User).options(joinedload(User.addresses)))
# this actually will raise an error to let the user know that
# uniquing should be applied
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
posts = relationship(Post, lazy="dynamic")
+
jack = session.get(User, 5)
# filter Jack's blog posts
- posts = session.scalars(
- jack.posts.statement.where(Post.headline == "this is a post")
- )
+ posts = session.scalars(jack.posts.statement.where(Post.headline == "this is a post"))
* Use the :func:`_orm.with_parent` function to construct a :func:`_sql.select`
construct directly::
jack = session.get(User, 5)
posts = session.scalars(
- select(Post).
- where(with_parent(jack, User.posts)).
- where(Post.headline == "this is a post")
+ select(Post)
+ .where(with_parent(jack, User.posts))
+ .where(Post.headline == "this is a post")
)
**Discussion**
# commits, won't be supported
sess.flush()
-
**Migration to 2.0**
The main reason a :class:`_orm.Session` is used in "autocommit" mode
sess = Session(engine)
sess.begin() # begin explicitly; if not called, will autobegin
- # when database access is needed
+ # when database access is needed
sess.add(obj)
import contextlib
+
@contextlib.contextmanager
def transaction(session):
if not session.in_transaction():
else:
yield
-
The above context manager may be used in the same way the
"subtransaction" flag works, such as in the following example::
with transaction(session):
method_b(session)
+
# method_b also starts a transaction, but when
# called from method_a participates in the ongoing
# transaction.
def method_b(session):
with transaction(session):
- session.add(SomeObject('bat', 'lala'))
+ session.add(SomeObject("bat", "lala"))
+
Session = sessionmaker(engine)
def method_a(session):
method_b(session)
+
def method_b(session):
- session.add(SomeObject('bat', 'lala'))
+ session.add(SomeObject("bat", "lala"))
+
Session = sessionmaker(engine)
Database via Azure Active Directory", which apparently lacks the
``system_views`` view entirely. Error catching has been extended that under
no circumstances will this method ever fail, provided database connectivity
- is present.
\ No newline at end of file
+ is present.
combinations of SQL label names and aliasing. This "wrapping" is not
appropriate for :func:`_orm.contains_eager` which has always had the
contract that the user-defined SQL statement is unmodified with the
- exception of adding the appropriate columns to be fetched.
\ No newline at end of file
+ exception of adding the appropriate columns to be fetched.
:meth:`_sql.ColumnOperators.contains`,
:meth:`_sql.ColumnOperators.startswith`, etc. Huge thanks to Matias
Martinez Rebori for their meticulous and complete efforts in implementing
- these new methods.
\ No newline at end of file
+ these new methods.
.. seealso::
- :ref:`change_4926`
\ No newline at end of file
+ :ref:`change_4926`
.. seealso::
- :ref:`change_5465_oracle`
\ No newline at end of file
+ :ref:`change_5465_oracle`
.. seealso::
- :ref:`postgresql_ranges`
\ No newline at end of file
+ :ref:`postgresql_ranges`
dialect dependent, with PostgreSQL, MySQL/MariaDB and SQLite supporting it,
and Oracle and SQL Server not supporting it. Third party dialects should
also seek to ensure their :meth:`_engine.Inspector.has_table` method
- searches for views as well as tables for the given name.
\ No newline at end of file
+ searches for views as well as tables for the given name.
* Removed the very old "dbapi_proxy" module, which in very early
SQLAlchemy releases was used to provide a transparent connection pool
- over a raw DBAPI connection.
\ No newline at end of file
+ over a raw DBAPI connection.
as configured with the :class:`_types.Numeric`, :class:`_types.Float` , and
related datatypes, just without the ability to maintain precision beyond 15
significant digits when using SQLite, unless alternate representations such
- as strings are used.
\ No newline at end of file
+ as strings are used.
:class:`_postgresql.ENUM` datatype is now a required keyword argument. The
"name" is necessary in any case in order for the :class:`_postgresql.ENUM`
to be usable as an error would be raised at SQL/DDL render time if "name"
- were not present.
\ No newline at end of file
+ were not present.
.. seealso::
- :ref:`change_7433`
\ No newline at end of file
+ :ref:`change_7433`
.. seealso::
- :ref:`ticket_7631`
\ No newline at end of file
+ :ref:`ticket_7631`
some scenarios as it allows the nesting attribute to be set simultaneously
along with the explicit level of the CTE.
- The :meth:`.HasCTE.add_cte` method also accepts multiple CTE objects.
\ No newline at end of file
+ The :meth:`.HasCTE.add_cte` method also accepts multiple CTE objects.
This phrase is not accepted by all databases and the operation will fail
on a database that does not support it as there is no similarly compatible
fallback within the scope of a single DDL statement.
- Pull request courtesy Mike Fiedler.
\ No newline at end of file
+ Pull request courtesy Mike Fiedler.
.. seealso::
- :ref:`postgresql_json_types` - PostgreSQL JSON types.
\ No newline at end of file
+ :ref:`postgresql_json_types` - PostgreSQL JSON types.
False, which leaves the previous behavior unchanged; this is to support
existing code that makes explicit use of these attributes in queries.
To migrate to the newer approach, apply explicit attributes to the abstract
- base class as needed.
\ No newline at end of file
+ base class as needed.
unnecessary use of a prepared statement for this query has been fixed.
Rationale is to eliminate the need for PostgreSQL to produce a query plan
when the ping is emitted. The operation is not currently supported by the
- ``psycopg2`` driver which continues to use ``SELECT 1``.
\ No newline at end of file
+ ``psycopg2`` driver which continues to use ``SELECT 1``.
Additionally, classes mapped by :class:`_orm.composite` now support
ordering comparison operations, e.g. ``<``, ``>=``, etc.
- See the new documentation at :ref:`mapper_composite` for examples.
\ No newline at end of file
+ See the new documentation at :ref:`mapper_composite` for examples.
:func:`_orm.declared_attr` to achieve this mapping; the
:class:`_schema.ForeignKey` object is copied along with the
:class:`_schema.Column` itself when the column is applied to the declared
- mapping.
\ No newline at end of file
+ mapping.
value. The :meth:`.Connection.scalar` method should be used instead, which
has been reworked with new internal codepaths to suit invoking a SELECT for
default generation objects without going through the
- :meth:`.Connection.execute` method.
\ No newline at end of file
+ :meth:`.Connection.execute` method.
contract of this method, which is that it can iterate objects through
arbitrary result sets, is long obsolete and no longer tested.
Arbitrary statements can return objects by using constructs such
- as :meth`.Select.from_statement` or :func:`_orm.aliased`.
\ No newline at end of file
+ as :meth`.Select.from_statement` or :func:`_orm.aliased`.
:func:`_orm.synonym` function
* :class:`_orm.CompositeProperty` becomes an alias for the primary name
:class:`_orm.Composite`, constructed as always from the
- :func:`_orm.composite` function
\ No newline at end of file
+ :func:`_orm.composite` function
.. seealso::
- :ref:`external_toplevel`
\ No newline at end of file
+ :ref:`external_toplevel`
stmt = select(str_col, int_col)
# (variable) stmt: ReturningInsert[Tuple[str, int]]
- ins_stmt = insert(table('t')).returning(str_col, int_col)
+ ins_stmt = insert(table("t")).returning(str_col, int_col)
* The ``Tuple[]`` type from any row returning construct, when invoked with an
``.execute()`` method, carries through to :class:`_engine.Result`
row = result.first()
if row is not None:
- # for typed tuple unpacking or indexed access,
- # use row.tuple() or row.t (this is the small typing-oriented accessor)
- strval, intval = row.t
+ # for typed tuple unpacking or indexed access,
+ # use row.tuple() or row.t (this is the small typing-oriented accessor)
+ strval, intval = row.t
- # (variable) strval: str
- strval
+ # (variable) strval: str
+ strval
- # (variable) intval: int
- intval
+ # (variable) intval: int
+ intval
* Scalar values for single-column statements do the right thing with
methods like :meth:`_engine.Connection.scalar`, :meth:`_engine.Result.scalars`,
from sqlalchemy.orm import DeclarativeBase
+
class Base(DeclarativeBase):
pass
be swapped directly for the use of :class:`_schema.Column`. Given a
1.x style mapping as::
- from sqlalchemy import Column
- from sqlalchemy.orm import relationship
- from sqlalchemy.orm import DeclarativeBase
+ from sqlalchemy import Column
+ from sqlalchemy.orm import relationship
+ from sqlalchemy.orm import DeclarativeBase
- class Base(DeclarativeBase):
- pass
- class User(Base):
- __tablename__ = 'user_account'
+ class Base(DeclarativeBase):
+ pass
+
- id = Column(Integer, primary_key=True)
- name = Column(String(30), nullable=False)
- fullname = Column(String)
- addresses = relationship("Address", back_populates="user")
+ class User(Base):
+ __tablename__ = "user_account"
- class Address(Base):
- __tablename__ = "address"
+ id = Column(Integer, primary_key=True)
+ name = Column(String(30), nullable=False)
+ fullname = Column(String)
+ addresses = relationship("Address", back_populates="user")
- id = Column(Integer, primary_key=True)
- email_address = Column(String, nullable=False)
- user_id = Column(ForeignKey("user_account.id"), nullable=False)
- user = relationship("User", back_populates="addresses")
+
+ class Address(Base):
+ __tablename__ = "address"
+
+ id = Column(Integer, primary_key=True)
+ email_address = Column(String, nullable=False)
+ user_id = Column(ForeignKey("user_account.id"), nullable=False)
+ user = relationship("User", back_populates="addresses")
We replace :class:`_schema.Column` with :func:`_orm.mapped_column`; no
arguments need to change::
- from sqlalchemy.orm import DeclarativeBase
- from sqlalchemy.orm import mapped_column
- from sqlalchemy.orm import relationship
+ from sqlalchemy.orm import DeclarativeBase
+ from sqlalchemy.orm import mapped_column
+ from sqlalchemy.orm import relationship
+
+
+ class Base(DeclarativeBase):
+ pass
- class Base(DeclarativeBase):
- pass
- class User(Base):
- __tablename__ = 'user_account'
+ class User(Base):
+ __tablename__ = "user_account"
- id = mapped_column(Integer, primary_key=True)
- name = mapped_column(String(30), nullable=False)
- fullname = mapped_column(String)
- addresses = relationship("Address", back_populates="user")
+ id = mapped_column(Integer, primary_key=True)
+ name = mapped_column(String(30), nullable=False)
+ fullname = mapped_column(String)
+ addresses = relationship("Address", back_populates="user")
- class Address(Base):
- __tablename__ = "address"
- id = mapped_column(Integer, primary_key=True)
- email_address = mapped_column(String, nullable=False)
- user_id = mapped_column(ForeignKey("user_account.id"), nullable=False)
- user = relationship("User", back_populates="addresses")
+ class Address(Base):
+ __tablename__ = "address"
+
+ id = mapped_column(Integer, primary_key=True)
+ email_address = mapped_column(String, nullable=False)
+ user_id = mapped_column(ForeignKey("user_account.id"), nullable=False)
+ user = relationship("User", back_populates="addresses")
The individual columns above are **not yet typed with Python types**,
and are instead typed as ``Mapped[Any]``; this is because we can declare any
will be more verbose, however with proficiency, this step can
be combined with subsequent steps to update mappings more directly::
- from typing import List
- from typing import Optional
- from sqlalchemy.orm import DeclarativeBase
- from sqlalchemy.orm import Mapped
- from sqlalchemy.orm import mapped_column
- from sqlalchemy.orm import relationship
+ from typing import List
+ from typing import Optional
+ from sqlalchemy.orm import DeclarativeBase
+ from sqlalchemy.orm import Mapped
+ from sqlalchemy.orm import mapped_column
+ from sqlalchemy.orm import relationship
- class Base(DeclarativeBase):
- pass
- class User(Base):
- __tablename__ = 'user_account'
+ class Base(DeclarativeBase):
+ pass
+
+
+ class User(Base):
+ __tablename__ = "user_account"
- id: Mapped[int] = mapped_column(Integer, primary_key=True)
- name: Mapped[str] = mapped_column(String(30), nullable=False)
- fullname: Mapped[Optional[str]] = mapped_column(String)
- addresses: Mapped[List["Address"]] = relationship("Address", back_populates="user")
+ id: Mapped[int] = mapped_column(Integer, primary_key=True)
+ name: Mapped[str] = mapped_column(String(30), nullable=False)
+ fullname: Mapped[Optional[str]] = mapped_column(String)
+ addresses: Mapped[List["Address"]] = relationship("Address", back_populates="user")
- class Address(Base):
- __tablename__ = "address"
- id: Mapped[int] = mapped_column(Integer, primary_key=True)
- email_address: Mapped[str] = mapped_column(String, nullable=False)
- user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"), nullable=False)
- user: Mapped["User"] = relationship("User", back_populates="addresses")
+ class Address(Base):
+ __tablename__ = "address"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True)
+ email_address: Mapped[str] = mapped_column(String, nullable=False)
+ user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"), nullable=False)
+ user: Mapped["User"] = relationship("User", back_populates="addresses")
At this point, our ORM mapping is fully typed and will produce exact-typed
:func:`_sql.select`, :class:`_orm.Query` and :class:`_engine.Result`
(as :func:`_orm.relationship` has supported string-based forward references
for ten years already ;) )::
- from typing import List
- from typing import Optional
- from sqlalchemy.orm import DeclarativeBase
- from sqlalchemy.orm import Mapped
- from sqlalchemy.orm import mapped_column
- from sqlalchemy.orm import relationship
+ from typing import List
+ from typing import Optional
+ from sqlalchemy.orm import DeclarativeBase
+ from sqlalchemy.orm import Mapped
+ from sqlalchemy.orm import mapped_column
+ from sqlalchemy.orm import relationship
- class Base(DeclarativeBase):
- pass
- class User(Base):
- __tablename__ = 'user_account'
+ class Base(DeclarativeBase):
+ pass
- id: Mapped[int] = mapped_column(primary_key=True)
- name: Mapped[str] = mapped_column(String(30))
- fullname: Mapped[Optional[str]]
- addresses: Mapped[List["Address"]] = relationship(back_populates="user")
- class Address(Base):
- __tablename__ = "address"
+ class User(Base):
+ __tablename__ = "user_account"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ name: Mapped[str] = mapped_column(String(30))
+ fullname: Mapped[Optional[str]]
+ addresses: Mapped[List["Address"]] = relationship(back_populates="user")
- id: Mapped[int] = mapped_column(primary_key=True)
- email_address: Mapped[str]
- user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"))
- user: Mapped["User"] = relationship(back_populates="addresses")
+ class Address(Base):
+ __tablename__ = "address"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ email_address: Mapped[str]
+ user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"))
+ user: Mapped["User"] = relationship(back_populates="addresses")
Step five - make use of pep-593 ``Annotated`` to package common directives into types
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
example below adds additional ``Annotated`` types in addition to our
``str50`` example to illustrate this feature::
- from typing_extensions import Annotated
- from typing import List
- from typing import Optional
- from sqlalchemy import ForeignKey
- from sqlalchemy import String
- from sqlalchemy.orm import DeclarativeBase
- from sqlalchemy.orm import Mapped
- from sqlalchemy.orm import mapped_column
- from sqlalchemy.orm import relationship
+ from typing_extensions import Annotated
+ from typing import List
+ from typing import Optional
+ from sqlalchemy import ForeignKey
+ from sqlalchemy import String
+ from sqlalchemy.orm import DeclarativeBase
+ from sqlalchemy.orm import Mapped
+ from sqlalchemy.orm import mapped_column
+ from sqlalchemy.orm import relationship
- # declarative base from previous example
- str50 = Annotated[str, 50]
+ # declarative base from previous example
+ str50 = Annotated[str, 50]
- class Base(DeclarativeBase):
- registry = registry(type_annotation_map={
- str50: String(50),
- })
- # set up mapped_column() overrides, using whole column styles that are
- # expected to be used in multiple places
- intpk = Annotated[int, mapped_column(primary_key=True)]
- user_fk = Annotated[int, mapped_column(ForeignKey('user_account.id'))]
+ class Base(DeclarativeBase):
+ registry = registry(
+ type_annotation_map={
+ str50: String(50),
+ }
+ )
+
+ # set up mapped_column() overrides, using whole column styles that are
+ # expected to be used in multiple places
+ intpk = Annotated[int, mapped_column(primary_key=True)]
+ user_fk = Annotated[int, mapped_column(ForeignKey("user_account.id"))]
- class User(Base):
- __tablename__ = 'user_account'
- id: Mapped[intpk]
- name: Mapped[str50]
- fullname: Mapped[Optional[str]]
- addresses: Mapped[List["Address"]] = relationship(back_populates="user")
+ class User(Base):
+ __tablename__ = "user_account"
- class Address(Base):
- __tablename__ = "address"
+ id: Mapped[intpk]
+ name: Mapped[str50]
+ fullname: Mapped[Optional[str]]
+ addresses: Mapped[List["Address"]] = relationship(back_populates="user")
- id: Mapped[intpk]
- email_address: Mapped[str50]
- user_id: Mapped[user_fk]
- user: Mapped["User"] = relationship(back_populates="addresses")
+
+ class Address(Base):
+ __tablename__ = "address"
+
+ id: Mapped[intpk]
+ email_address: Mapped[str50]
+ user_id: Mapped[user_fk]
+ user: Mapped["User"] = relationship(back_populates="addresses")
Above, columns that are mapped with ``Mapped[str50]``, ``Mapped[intpk]``,
or ``Mapped[user_fk]`` draw from both the
class Base(MappedAsDataclass, DeclarativeBase):
"""subclasses will be converted to dataclasses"""
+
intpk = Annotated[int, mapped_column(primary_key=True)]
str30 = Annotated[str, mapped_column(String(30))]
user_fk = Annotated[int, mapped_column(ForeignKey("user_account.id"))]
id: Mapped[intpk] = mapped_column(init=False)
email_address: Mapped[str]
user_id: Mapped[user_fk] = mapped_column(init=False)
- user: Mapped["User"] = relationship(
- back_populates="addresses", default=None
- )
+ user: Mapped["User"] = relationship(back_populates="addresses", default=None)
The above mapping has used the ``@dataclasses.dataclass`` decorator directly
on each mapped class at the same time that the declarative mapping was
>>> users = session.scalars(
... insert(User).returning(User),
... [
- ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
- ... {"name": "sandy", "fullname": "Sandy Cheeks"},
- ... {"name": "patrick", "fullname": "Patrick Star"},
- ... {"name": "squidward", "fullname": "Squidward Tentacles"},
- ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
- ... ]
+ ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
+ ... {"name": "sandy", "fullname": "Sandy Cheeks"},
+ ... {"name": "patrick", "fullname": "Patrick Star"},
+ ... {"name": "squidward", "fullname": "Squidward Tentacles"},
+ ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
+ ... ],
... )
>>> print(users.all())
[User(name='spongebob', fullname='Spongebob Squarepants'),
>>> session.execute(
... update(User),
... [
- ... {"id": 1, "fullname": "Spongebob Squarepants"},
- ... {"id": 3, "fullname": "Patrick Star"},
- ... ]
+ ... {"id": 1, "fullname": "Spongebob Squarepants"},
+ ... {"id": 3, "fullname": "Patrick Star"},
+ ... ],
... )
.. seealso::
>>> from sqlalchemy.dialects.sqlite import insert as sqlite_upsert
>>> stmt = sqlite_upsert(User).values(
... [
- ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
- ... {"name": "sandy", "fullname": "Sandy Cheeks"},
- ... {"name": "patrick", "fullname": "Patrick Star"},
- ... {"name": "squidward", "fullname": "Squidward Tentacles"},
- ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
- ... ]
+ ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
+ ... {"name": "sandy", "fullname": "Sandy Cheeks"},
+ ... {"name": "patrick", "fullname": "Patrick Star"},
+ ... {"name": "squidward", "fullname": "Squidward Tentacles"},
+ ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
+ ... ]
... )
>>> stmt = stmt.on_conflict_do_update(
- ... index_elements=[User.name],
- ... set_=dict(fullname=stmt.excluded.fullname)
+ ... index_elements=[User.name], set_=dict(fullname=stmt.excluded.fullname)
... )
>>> result = session.scalars(stmt.returning(User))
>>> print(result.all())
>>> from sqlalchemy import update
>>> stmt = (
- ... update(User).
- ... where(User.name == "squidward").
- ... values(name="spongebob").
- ... returning(User)
+ ... update(User)
+ ... .where(User.name == "squidward")
+ ... .values(name="spongebob")
+ ... .returning(User)
... )
>>> result = session.scalars(stmt, execution_options={"synchronize_session": "fetch"})
>>> print(result.all())
from sqlalchemy.dialects.mysql import VARCHAR
- type_ = String(255).with_variant(VARCHAR(255, charset='utf8mb4'), "mysql", "mariadb")
+ type_ = String(255).with_variant(VARCHAR(255, charset="utf8mb4"), "mysql", "mariadb")
if typing.TYPE_CHECKING:
reveal_type(type_)
from sqlalchemy.dialects import oracle
- Table(
- "some_table", metadata,
- Column("value", oracle.FLOAT(126))
- )
+ Table("some_table", metadata, Column("value", oracle.FLOAT(126)))
A binary precision value of 126 is synonymous with using the
:class:`_sqltypes.DOUBLE_PRECISION` datatype, and a value of 63 is equivalent
from sqlalchemy.dialects import oracle
Table(
- "some_table", metadata,
- Column("value", Float(5).with_variant(oracle.FLOAT(16), "oracle"))
+ "some_table",
+ metadata,
+ Column("value", Float(5).with_variant(oracle.FLOAT(16), "oracle")),
)
-
.. _change_7086:
``match()`` operator on PostgreSQL uses ``plainto_tsquery()`` rather than ``to_tsquery()``
Recall from :doc:`/core/engines` that an :class:`_engine.Engine` is created via
the :func:`_sa.create_engine` call::
- engine = create_engine('mysql+mysqldb://scott:tiger@localhost/test')
+ engine = create_engine("mysql+mysqldb://scott:tiger@localhost/test")
The typical usage of :func:`_sa.create_engine` is once per particular database
URL, held globally for the lifetime of a single application process. A single
with engine.connect() as connection:
result = connection.execute(text("select username from users"))
for row in result:
- print("username:", row['username'])
+ print("username:", row["username"])
Above, the :meth:`_engine.Engine.connect` method returns a :class:`_engine.Connection`
object, and by using it in a Python context manager (e.g. the ``with:``
with engine.connect() as connection:
connection.execute(some_table.insert(), {"x": 7, "y": "this is some data"})
- connection.execute(some_other_table.insert(), {"q": 8, "p": "this is some more data"})
+ connection.execute(
+ some_other_table.insert(), {"q": 8, "p": "this is some more data"}
+ )
connection.commit() # commit the transaction
with engine.connect() as connection:
with connection.begin():
connection.execute(some_table.insert(), {"x": 7, "y": "this is some data"})
- connection.execute(some_other_table.insert(), {"q": 8, "p": "this is some more data"})
+ connection.execute(
+ some_other_table.insert(), {"q": 8, "p": "this is some more data"}
+ )
# transaction is committed
with engine.begin() as connection:
connection.execute(some_table.insert(), {"x": 7, "y": "this is some data"})
- connection.execute(some_other_table.insert(), {"q": 8, "p": "this is some more data"})
+ connection.execute(
+ some_other_table.insert(), {"q": 8, "p": "this is some more data"}
+ )
# transaction is committed, and Connection is released to the connection
# pool
>>> with e.begin() as conn:
... conn.commit()
... conn.begin()
- ...
2021-11-08 09:49:07,517 INFO sqlalchemy.engine.Engine BEGIN (implicit)
2021-11-08 09:49:07,517 INFO sqlalchemy.engine.Engine COMMIT
Traceback (most recent call last):
# run a new statement outside of a block. The connection
# autobegins
- connection.execute(some_other_table.insert(), {"q": 8, "p": "this is some more data"})
+ connection.execute(
+ some_other_table.insert(), {"q": 8, "p": "this is some more data"}
+ )
# commit explicitly
connection.commit()
For example, to force REPEATABLE READ on a specific connection, then
begin a transaction::
- with engine.connect().execution_options(isolation_level="REPEATABLE READ") as connection:
- with connection.begin():
- connection.execute(<statement>)
+ with engine.connect().execution_options(
+ isolation_level="REPEATABLE READ"
+ ) as connection:
+ with connection.begin():
+ connection.execute("<statement>")
.. tip:: The return value of
the :meth:`_engine.Connection.execution_options` method is the same
from sqlalchemy import create_engine
eng = create_engine(
- "postgresql://scott:tiger@localhost/test",
- isolation_level="REPEATABLE READ"
+ "postgresql://scott:tiger@localhost/test", isolation_level="REPEATABLE READ"
)
With the above setting, each new DBAPI connection the moment it's created will
eng = create_engine(
"postgresql+psycopg2://scott:tiger@localhost/test",
- execution_options={
- "isolation_level": "REPEATABLE READ"
- }
+ execution_options={"isolation_level": "REPEATABLE READ"},
)
With the above setting, the DBAPI connection will be set to use a
If we wanted to check out a :class:`_engine.Connection` object and use it
"autocommit" mode, we would proceed as follows::
- with engine.connect() as connection:
- connection.execution_options(isolation_level="AUTOCOMMIT")
- connection.execute(<statement>)
- connection.execute(<statement>)
+ with engine.connect() as connection:
+ connection.execution_options(isolation_level="AUTOCOMMIT")
+ connection.execute("<statement>")
+ connection.execute("<statement>")
Above illustrates normal usage of "DBAPI autocommit" mode. There is no
need to make use of methods such as :meth:`_engine.Connection.begin`
In the example below, statements remain
**autocommitting** regardless of SQLAlchemy-level transaction blocks::
- with engine.connect() as connection:
- connection = connection.execution_options(isolation_level="AUTOCOMMIT")
+ with engine.connect() as connection:
+ connection = connection.execution_options(isolation_level="AUTOCOMMIT")
- # this begin() does not affect the DBAPI connection, isolation stays at AUTOCOMMIT
- with connection.begin() as trans:
- connection.execute(<statement>)
- connection.execute(<statement>)
+ # this begin() does not affect the DBAPI connection, isolation stays at AUTOCOMMIT
+ with connection.begin() as trans:
+ connection.execute("<statement>")
+ connection.execute("<statement>")
When we run a block like the above with logging turned on, the logging
will attempt to indicate that while a DBAPI level ``.commit()`` is called,
below will raise an error, as :meth:`_engine.Connection.begin` is being
called after autobegin has already occurred::
- with engine.connect() as connection:
- connection = connection.execution_options(isolation_level="AUTOCOMMIT")
+ with engine.connect() as connection:
+ connection = connection.execution_options(isolation_level="AUTOCOMMIT")
- # "transaction" is autobegin (but has no effect due to autocommit)
- connection.execute(<statement>)
+ # "transaction" is autobegin (but has no effect due to autocommit)
+ connection.execute("<statement>")
- # this will raise; "transaction" is already begun
- with connection.begin() as trans:
- connection.execute(<statement>)
+ # this will raise; "transaction" is already begun
+ with connection.begin() as trans:
+ connection.execute("<statement>")
The above example also demonstrates the same theme that the "autocommit"
isolation level is a configurational detail of the underlying database
connection.execution_options(isolation_level="AUTOCOMMIT")
# run statement(s) in autocommit mode
- connection.execute(<statement>)
+ connection.execute("<statement>")
# "commit" the autobegun "transaction"
connection.commit()
# use a begin block
with connection.begin() as trans:
- connection.execute(<statement>)
+ connection.execute("<statement>")
Above, to manually revert the isolation level we made use of
:attr:`_engine.Connection.default_isolation_level` to restore the default
with engine.connect().execution_options(isolation_level="AUTOCOMMIT") as connection:
# run statement in autocommit mode
- connection.execute(<statement>)
+ connection.execute("<statement>")
# use a regular block
with engine.begin() as connection:
- connection.execute(<statement>)
+ connection.execute("<statement>")
To sum up:
These three behaviors are illustrated in the example below::
with engine.connect() as conn:
- result = (
- conn.
- execution_options(yield_per=100).
- execute(text("select * from table"))
- )
+ result = conn.execution_options(yield_per=100).execute(text("select * from table"))
for partition in result.partitions():
# partition is an iterable that will be at most 100 items
Given a table::
user_table = Table(
- 'user', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('name', String(50))
+ "user",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50)),
)
The "schema" of this :class:`_schema.Table` as defined by the
render the schema as ``user_schema_one``::
connection = engine.connect().execution_options(
- schema_translate_map={None: "user_schema_one"})
+ schema_translate_map={None: "user_schema_one"}
+ )
result = connection.execute(user_table.select())
connection = engine.connect().execution_options(
schema_translate_map={
- None: "user_schema_one", # no schema name -> "user_schema_one"
- "special": "special_schema", # schema="special" becomes "special_schema"
- "public": None # Table objects with schema="public" will render with no schema
- })
+ None: "user_schema_one", # no schema name -> "user_schema_one"
+ "special": "special_schema", # schema="special" becomes "special_schema"
+ "public": None, # Table objects with schema="public" will render with no schema
+ }
+ )
The :paramref:`.Connection.execution_options.schema_translate_map` parameter
affects all DDL and SQL constructs generated from the SQL expression language,
of this cache defaults to 500 and may be configured using the
:paramref:`_sa.create_engine.query_cache_size` parameter::
- engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", query_cache_size=1200)
+ engine = create_engine(
+ "postgresql+psycopg2://scott:tiger@localhost/test", query_cache_size=1200
+ )
The size of the cache can grow to be a factor of 150% of the size given, before
it's pruned back down to the target size. A cache of size 1200 above can therefore
As an example, we will examine the logging produced by the following program::
- from sqlalchemy import Column
- from sqlalchemy import create_engine
- from sqlalchemy import ForeignKey
- from sqlalchemy import Integer
- from sqlalchemy import String
- from sqlalchemy.ext.declarative import declarative_base
- from sqlalchemy.orm import relationship
- from sqlalchemy.orm import Session
+ from sqlalchemy import Column
+ from sqlalchemy import create_engine
+ from sqlalchemy import ForeignKey
+ from sqlalchemy import Integer
+ from sqlalchemy import String
+ from sqlalchemy.ext.declarative import declarative_base
+ from sqlalchemy.orm import relationship
+ from sqlalchemy.orm import Session
- Base = declarative_base()
+ Base = declarative_base()
- class A(Base):
- __tablename__ = "a"
+ class A(Base):
+ __tablename__ = "a"
- id = Column(Integer, primary_key=True)
- data = Column(String)
- bs = relationship("B")
+ id = Column(Integer, primary_key=True)
+ data = Column(String)
+ bs = relationship("B")
- class B(Base):
- __tablename__ = "b"
- id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey("a.id"))
- data = Column(String)
+ class B(Base):
+ __tablename__ = "b"
+ id = Column(Integer, primary_key=True)
+ a_id = Column(ForeignKey("a.id"))
+ data = Column(String)
- e = create_engine("sqlite://", echo=True)
- Base.metadata.create_all(e)
+ e = create_engine("sqlite://", echo=True)
+ Base.metadata.create_all(e)
- s = Session(e)
+ s = Session(e)
- s.add_all(
- [A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])]
- )
- s.commit()
+ s.add_all([A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])])
+ s.commit()
- for a_rec in s.query(A):
- print(a_rec.bs)
+ for a_rec in s.query(A):
+ print(a_rec.bs)
When run, each SQL statement that's logged will include a bracketed
cache statistics badge to the left of the parameters passed. The four
style invocations. For example, to run a series of SQL statements and have
them cached in a particular dictionary::
- my_cache = {}
- with engine.connect().execution_options(compiled_cache=my_cache) as conn:
- conn.execute(table.select())
+ my_cache = {}
+ with engine.connect().execution_options(compiled_cache=my_cache) as conn:
+ conn.execute(table.select())
The SQLAlchemy ORM uses the above technique to hold onto per-mapper caches
within the unit of work "flush" process that are separate from the default
The cache can also be disabled with this argument by sending a value of
``None``::
- # disable caching for this connection
- with engine.connect().execution_options(compiled_cache=None) as conn:
- conn.execute(table.select())
+ # disable caching for this connection
+ with engine.connect().execution_options(compiled_cache=None) as conn:
+ conn.execute(table.select())
.. _engine_thirdparty_caching:
from sqlalchemy.engine.default import DefaultDialect
+
class MyDialect(DefaultDialect):
supports_statement_cache = True
def limit_clause(self, select, **kw):
text = ""
if select._limit is not None:
- text += " \n LIMIT %d" % (select._limit, )
+ text += " \n LIMIT %d" % (select._limit,)
if select._offset is not None:
- text += " \n OFFSET %d" % (select._offset, )
+ text += " \n OFFSET %d" % (select._offset,)
return text
The above routine renders the :attr:`.Select._limit` and
from sqlalchemy import lambda_stmt
+
def run_my_statement(connection, parameter):
stmt = lambda_stmt(lambda: select(table))
stmt += lambda s: s.where(table.c.col == parameter)
return connection.execute(stmt)
+
with engine.connect() as conn:
result = run_my_statement(some_connection, "some parameter")
def upd(id_, newname):
stmt = lambda_stmt(lambda: users.update())
stmt += lambda s: s.values(name=newname)
- stmt += lambda s: s.where(users.c.id==id_)
+ stmt += lambda s: s.where(users.c.id == id_)
return stmt
+
with engine.begin() as conn:
conn.execute(upd(7, "foo"))
>>> def my_stmt(x, y):
... stmt = lambda_stmt(lambda: select(func.max(x, y)))
... return stmt
- ...
>>> engine = create_engine("sqlite://", echo=True)
>>> with engine.connect() as conn:
... print(conn.scalar(my_stmt(5, 10)))
... print(conn.scalar(my_stmt(12, 8)))
- ...
{opensql}SELECT max(?, ?) AS max_1
[generated in 0.00057s] (5, 10){stop}
10
>>> def my_stmt(x, y):
... def get_x():
... return x
+ ...
... def get_y():
... return y
- ...
+ ...
... stmt = lambda_stmt(lambda: select(func.max(get_x(), get_y())))
... return stmt
- ...
>>> with engine.connect() as conn:
... print(conn.scalar(my_stmt(5, 10)))
- ...
Traceback (most recent call last):
# ...
sqlalchemy.exc.InvalidRequestError: Can't invoke Python callable get_x()
>>> def my_stmt(x, y):
... def get_x():
... return x
+ ...
... def get_y():
... return y
- ...
+ ...
... x_param, y_param = get_x(), get_y()
... stmt = lambda_stmt(lambda: select(func.max(x_param, y_param)))
... return stmt
... def __init__(self, x, y):
... self.x = x
... self.y = y
- ...
>>> def my_stmt(foo):
... stmt = lambda_stmt(lambda: select(func.max(foo.x, foo.y)))
... return stmt
- ...
>>> with engine.connect() as conn:
- ... print(conn.scalar(my_stmt(Foo(5, 10))))
- ...
+ ... print(conn.scalar(my_stmt(Foo(5, 10))))
Traceback (most recent call last):
# ...
sqlalchemy.exc.InvalidRequestError: Closure variable named 'foo' inside of
>>> def my_stmt(foo):
... stmt = lambda_stmt(
- ... lambda: select(func.max(foo.x, foo.y)),
- ... track_closure_variables=False
+ ... lambda: select(func.max(foo.x, foo.y)), track_closure_variables=False
... )
... return stmt
>>> def my_stmt(self, foo):
... stmt = lambda_stmt(
- ... lambda: select(*self.column_expressions),
- ... track_closure_variables=False
- ... )
- ... stmt = stmt.add_criteria(
- ... lambda: self.where_criteria,
- ... track_on=[self]
+ ... lambda: select(*self.column_expressions), track_closure_variables=False
... )
+ ... stmt = stmt.add_criteria(lambda: self.where_criteria, track_on=[self])
... return stmt
Using ``track_on`` means the given objects will be stored long term in the
state within the construct::
>>> from sqlalchemy import select, column
- >>> stmt = select(column('q'))
+ >>> stmt = select(column("q"))
>>> cache_key = stmt._generate_cache_key()
>>> print(cache_key) # somewhat paraphrased
CacheKey(key=(
:func:`_sa.create_engine`::
engine = create_engine(
- "mariadb+mariadbconnector://scott:tiger@host/db",
- use_insertmanyvalues=False
+ "mariadb+mariadbconnector://scott:tiger@host/db", use_insertmanyvalues=False
)
The feature can also be disabled from being used implicitly for a particular
:paramref:`_schema.Table.implicit_returning` parameter as ``False``::
t = Table(
- 't',
+ "t",
metadata,
- Column('id', Integer, primary_key=True),
- Column('x', Integer),
- implicit_returning=False
+ Column("id", Integer, primary_key=True),
+ Column("x", Integer),
+ implicit_returning=False,
)
The reason one might want to disable RETURNING for a specific table is to
result = conn.execute(
table.insert().returning(table.c.id),
parameterlist,
- execution_options={"insertmanyvalues_page_size": 100}
+ execution_options={"insertmanyvalues_page_size": 100},
)
Or configured on the statement itself::
- stmt = table.insert().returning(table.c.id).execution_options(
- insertmanyvalues_page_size=100
+ stmt = (
+ table.insert()
+ .returning(table.c.id)
+ .execution_options(insertmanyvalues_page_size=100)
)
with e.begin() as conn:
result = conn.execute(stmt, parameterlist)
with engine.connect() as conn:
conn.exec_driver_sql("SET param='bar'")
-
.. versionadded:: 1.4 Added the :meth:`_engine.Connection.exec_driver_sql` method.
.. _dbapi_connections_cursor:
connection = engine.raw_connection()
try:
cursor_obj = connection.cursor()
- cursor_obj.callproc("my_procedure", ['x', 'y', 'z'])
+ cursor_obj.callproc("my_procedure", ["x", "y", "z"])
results = list(cursor_obj.fetchall())
cursor_obj.close()
connection.commit()
finally:
connection.close()
-
-
Registering New Dialects
========================
+.. highlight:: ini
+.. format:off
+
The :func:`_sa.create_engine` function call locates the given dialect
using setuptools entrypoints. These entry points can be established
for third party dialects within the setup.py script. For example,
The above entrypoint would then be accessed as ``create_engine("mysql+foodialect://")``.
+.. format:on
+.. highlight:: python
+
Registering Dialects In-Process
-------------------------------
the need for separate installation. Use the ``register()`` function as follows::
from sqlalchemy.dialects import registry
+
registry.register("mysql.foodialect", "myapp.dialect", "MyMySQLDialect")
The above will respond to ``create_engine("mysql+foodialect://")`` and load the
is specified by constructing a :class:`~sqlalchemy.schema.ForeignKey` object
as an argument to a :class:`~sqlalchemy.schema.Column` object::
- user_preference = Table('user_preference', metadata_obj,
- Column('pref_id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False),
- Column('pref_name', String(40), nullable=False),
- Column('pref_value', String(100))
+ user_preference = Table(
+ "user_preference",
+ metadata_obj,
+ Column("pref_id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("user.user_id"), nullable=False),
+ Column("pref_name", String(40), nullable=False),
+ Column("pref_value", String(100)),
)
Above, we define a new table ``user_preference`` for which each row must
has a composite primary key. Below we define a table ``invoice`` which has a
composite primary key::
- invoice = Table('invoice', metadata_obj,
- Column('invoice_id', Integer, primary_key=True),
- Column('ref_num', Integer, primary_key=True),
- Column('description', String(60), nullable=False)
+ invoice = Table(
+ "invoice",
+ metadata_obj,
+ Column("invoice_id", Integer, primary_key=True),
+ Column("ref_num", Integer, primary_key=True),
+ Column("description", String(60), nullable=False),
)
And then a table ``invoice_item`` with a composite foreign key referencing
``invoice``::
- invoice_item = Table('invoice_item', metadata_obj,
- Column('item_id', Integer, primary_key=True),
- Column('item_name', String(60), nullable=False),
- Column('invoice_id', Integer, nullable=False),
- Column('ref_num', Integer, nullable=False),
- ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num'])
+ invoice_item = Table(
+ "invoice_item",
+ metadata_obj,
+ Column("item_id", Integer, primary_key=True),
+ Column("item_name", String(60), nullable=False),
+ Column("invoice_id", Integer, nullable=False),
+ Column("ref_num", Integer, nullable=False),
+ ForeignKeyConstraint(
+ ["invoice_id", "ref_num"], ["invoice.invoice_id", "invoice.ref_num"]
+ ),
)
It's important to note that the
most forms of ALTER. Given a schema like::
node = Table(
- 'node', metadata_obj,
- Column('node_id', Integer, primary_key=True),
- Column(
- 'primary_element', Integer,
- ForeignKey('element.element_id')
- )
+ "node",
+ metadata_obj,
+ Column("node_id", Integer, primary_key=True),
+ Column("primary_element", Integer, ForeignKey("element.element_id")),
)
element = Table(
- 'element', metadata_obj,
- Column('element_id', Integer, primary_key=True),
- Column('parent_node_id', Integer),
+ "element",
+ metadata_obj,
+ Column("element_id", Integer, primary_key=True),
+ Column("parent_node_id", Integer),
ForeignKeyConstraint(
- ['parent_node_id'], ['node.node_id'],
- name='fk_element_parent_node_id'
- )
+ ["parent_node_id"], ["node.node_id"], name="fk_element_parent_node_id"
+ ),
)
When we call upon :meth:`_schema.MetaData.create_all` on a backend such as the
.. sourcecode:: pycon+sql
>>> with engine.connect() as conn:
- ... metadata_obj.create_all(conn, checkfirst=False)
+ ... metadata_obj.create_all(conn, checkfirst=False)
{opensql}CREATE TABLE element (
element_id SERIAL NOT NULL,
parent_node_id INTEGER,
.. sourcecode:: pycon+sql
>>> with engine.connect() as conn:
- ... metadata_obj.drop_all(conn, checkfirst=False)
+ ... metadata_obj.drop_all(conn, checkfirst=False)
{opensql}ALTER TABLE element DROP CONSTRAINT fk_element_parent_node_id
DROP TABLE node
DROP TABLE element
the ``'element'`` table as follows::
element = Table(
- 'element', metadata_obj,
- Column('element_id', Integer, primary_key=True),
- Column('parent_node_id', Integer),
+ "element",
+ metadata_obj,
+ Column("element_id", Integer, primary_key=True),
+ Column("parent_node_id", Integer),
ForeignKeyConstraint(
- ['parent_node_id'], ['node.node_id'],
- use_alter=True, name='fk_element_parent_node_id'
- )
+ ["parent_node_id"],
+ ["node.node_id"],
+ use_alter=True,
+ name="fk_element_parent_node_id",
+ ),
)
in our CREATE DDL we will see the ALTER statement only for this constraint,
.. sourcecode:: pycon+sql
>>> with engine.connect() as conn:
- ... metadata_obj.create_all(conn, checkfirst=False)
+ ... metadata_obj.create_all(conn, checkfirst=False)
{opensql}CREATE TABLE element (
element_id SERIAL NOT NULL,
parent_node_id INTEGER,
arguments. The value is any string which will be output after the appropriate
"ON UPDATE" or "ON DELETE" phrase::
- child = Table('child', metadata_obj,
- Column('id', Integer,
- ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"),
- primary_key=True
- )
- )
-
- composite = Table('composite', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('rev_id', Integer),
- Column('note_id', Integer),
+ child = Table(
+ "child",
+ metadata_obj,
+ Column(
+ "id",
+ Integer,
+ ForeignKey("parent.id", onupdate="CASCADE", ondelete="CASCADE"),
+ primary_key=True,
+ ),
+ )
+
+ composite = Table(
+ "composite",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("rev_id", Integer),
+ Column("note_id", Integer),
ForeignKeyConstraint(
- ['rev_id', 'note_id'],
- ['revisions.id', 'revisions.note_id'],
- onupdate="CASCADE", ondelete="SET NULL"
- )
+ ["rev_id", "note_id"],
+ ["revisions.id", "revisions.note_id"],
+ onupdate="CASCADE",
+ ondelete="SET NULL",
+ ),
)
Note that these clauses require ``InnoDB`` tables when used with MySQL.
from sqlalchemy import UniqueConstraint
metadata_obj = MetaData()
- mytable = Table('mytable', metadata_obj,
-
+ mytable = Table(
+ "mytable",
+ metadata_obj,
# per-column anonymous unique constraint
- Column('col1', Integer, unique=True),
-
- Column('col2', Integer),
- Column('col3', Integer),
-
+ Column("col1", Integer, unique=True),
+ Column("col2", Integer),
+ Column("col3", Integer),
# explicit/composite unique constraint. 'name' is optional.
- UniqueConstraint('col2', 'col3', name='uix_1')
- )
+ UniqueConstraint("col2", "col3", name="uix_1"),
+ )
CHECK Constraint
----------------
from sqlalchemy import PrimaryKeyConstraint
- my_table = Table('mytable', metadata_obj,
- Column('id', Integer),
- Column('version_id', Integer),
- Column('data', String(50)),
- PrimaryKeyConstraint('id', 'version_id', name='mytable_pk')
- )
+ my_table = Table(
+ "mytable",
+ metadata_obj,
+ Column("id", Integer),
+ Column("version_id", Integer),
+ Column("data", String(50)),
+ PrimaryKeyConstraint("id", "version_id", name="mytable_pk"),
+ )
.. seealso::
An example naming convention that suits basic cases is as follows::
convention = {
- "ix": 'ix_%(column_0_label)s',
- "uq": "uq_%(table_name)s_%(column_0_name)s",
- "ck": "ck_%(table_name)s_%(constraint_name)s",
- "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
- "pk": "pk_%(table_name)s"
+ "ix": "ix_%(column_0_label)s",
+ "uq": "uq_%(table_name)s_%(column_0_name)s",
+ "ck": "ck_%(table_name)s_%(constraint_name)s",
+ "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
+ "pk": "pk_%(table_name)s",
}
metadata_obj = MetaData(naming_convention=convention)
For example, we can observe the name produced when we create an unnamed
:class:`.UniqueConstraint`::
- >>> user_table = Table('user', metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('name', String(30), nullable=False),
- ... UniqueConstraint('name')
+ >>> user_table = Table(
+ ... "user",
+ ... metadata_obj,
+ ... Column("id", Integer, primary_key=True),
+ ... Column("name", String(30), nullable=False),
+ ... UniqueConstraint("name"),
... )
>>> list(user_table.constraints)[1].name
'uq_user_name'
This same feature takes effect even if we just use the :paramref:`_schema.Column.unique`
flag::
- >>> user_table = Table('user', metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('name', String(30), nullable=False, unique=True)
- ... )
+ >>> user_table = Table(
+ ... "user",
+ ... metadata_obj,
+ ... Column("id", Integer, primary_key=True),
+ ... Column("name", String(30), nullable=False, unique=True),
+ ... )
>>> list(user_table.constraints)[1].name
'uq_user_name'
hash of the long name. For example, the naming convention below will
generate very long names given the column names in use::
- metadata_obj = MetaData(naming_convention={
- "uq": "uq_%(table_name)s_%(column_0_N_name)s"
- })
+ metadata_obj = MetaData(
+ naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"}
+ )
long_names = Table(
- 'long_names', metadata_obj,
- Column('information_channel_code', Integer, key='a'),
- Column('billing_convention_name', Integer, key='b'),
- Column('product_identifier', Integer, key='c'),
- UniqueConstraint('a', 'b', 'c')
+ "long_names",
+ metadata_obj,
+ Column("information_channel_code", Integer, key="a"),
+ Column("billing_convention_name", Integer, key="b"),
+ Column("product_identifier", Integer, key="c"),
+ UniqueConstraint("a", "b", "c"),
)
On the PostgreSQL dialect, names longer than 63 characters will be truncated
import uuid
+
def fk_guid(constraint, table):
- str_tokens = [
- table.name,
- ] + [
- element.parent.name for element in constraint.elements
- ] + [
- element.target_fullname for element in constraint.elements
- ]
- guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode('ascii'))
+ str_tokens = (
+ [
+ table.name,
+ ]
+ + [element.parent.name for element in constraint.elements]
+ + [element.target_fullname for element in constraint.elements]
+ )
+ guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode("ascii"))
return str(guid)
+
convention = {
"fk_guid": fk_guid,
- "ix": 'ix_%(column_0_label)s',
+ "ix": "ix_%(column_0_label)s",
"fk": "fk_%(fk_guid)s",
}
>>> metadata_obj = MetaData(naming_convention=convention)
- >>> user_table = Table('user', metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('version', Integer, primary_key=True),
- ... Column('data', String(30))
- ... )
- >>> address_table = Table('address', metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('user_id', Integer),
- ... Column('user_version_id', Integer)
- ... )
- >>> fk = ForeignKeyConstraint(['user_id', 'user_version_id'],
- ... ['user.id', 'user.version'])
+ >>> user_table = Table(
+ ... "user",
+ ... metadata_obj,
+ ... Column("id", Integer, primary_key=True),
+ ... Column("version", Integer, primary_key=True),
+ ... Column("data", String(30)),
+ ... )
+ >>> address_table = Table(
+ ... "address",
+ ... metadata_obj,
+ ... Column("id", Integer, primary_key=True),
+ ... Column("user_id", Integer),
+ ... Column("user_version_id", Integer),
+ ... )
+ >>> fk = ForeignKeyConstraint(["user_id", "user_version_id"], ["user.id", "user.version"])
>>> address_table.append_constraint(fk)
>>> fk.name
fk_0cd51ab5-8d70-56e8-a83c-86661737766d
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
- Table('foo', metadata_obj,
- Column('value', Integer),
- CheckConstraint('value > 5', name='value_gt_5')
+ Table(
+ "foo",
+ metadata_obj,
+ Column("value", Integer),
+ CheckConstraint("value > 5", name="value_gt_5"),
)
The above table will produce the name ``ck_foo_value_gt_5``::
:func:`_expression.column` element within the constraint's expression,
either by declaring the constraint separate from the table::
- metadata_obj = MetaData(
- naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}
- )
+ metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"})
- foo = Table('foo', metadata_obj,
- Column('value', Integer)
- )
+ foo = Table("foo", metadata_obj, Column("value", Integer))
CheckConstraint(foo.c.value > 5)
from sqlalchemy import column
- metadata_obj = MetaData(
- naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}
- )
+ metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"})
- foo = Table('foo', metadata_obj,
- Column('value', Integer),
- CheckConstraint(column('value') > 5)
+ foo = Table(
+ "foo", metadata_obj, Column("value", Integer), CheckConstraint(column("value") > 5)
)
Both will produce the name ``ck_foo_value``::
The name for the constraint here is most directly set up by sending
the "name" parameter, e.g. :paramref:`.Boolean.name`::
- Table('foo', metadata_obj,
- Column('flag', Boolean(name='ck_foo_flag'))
- )
+ Table("foo", metadata_obj, Column("flag", Boolean(name="ck_foo_flag")))
The naming convention feature may be combined with these types as well,
normally by using a convention which includes ``%(constraint_name)s``
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
- Table('foo', metadata_obj,
- Column('flag', Boolean(name='flag_bool'))
- )
+ Table("foo", metadata_obj, Column("flag", Boolean(name="flag_bool")))
The above table will produce the constraint name ``ck_foo_flag_bool``::
which works nicely with :class:`.SchemaType` since these constraints have
only one column::
- metadata_obj = MetaData(
- naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}
- )
+ metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"})
- Table('foo', metadata_obj,
- Column('flag', Boolean())
- )
+ Table("foo", metadata_obj, Column("flag", Boolean()))
The above schema will produce::
identify columns::
metadata_obj = MetaData()
- mytable = Table('mytable', metadata_obj,
- Column('col1', Integer),
-
- Column('col2', Integer),
-
- Column('col3', Integer),
- Column('col4', Integer),
-
+ mytable = Table(
+ "mytable",
+ metadata_obj,
+ Column("col1", Integer),
+ Column("col2", Integer),
+ Column("col3", Integer),
+ Column("col4", Integer),
# place an index on col1, col2
- Index('idx_col12', 'col1', 'col2'),
-
+ Index("idx_col12", "col1", "col2"),
# place a unique index on col3, col4
- Index('idx_col34', 'col3', 'col4', unique=True)
+ Index("idx_col34", "col3", "col4", unique=True),
)
The :class:`~sqlalchemy.schema.Index` object also supports its own ``create()`` method:
from sqlalchemy import Index
- Index('someindex', mytable.c.somecol.desc())
+ Index("someindex", mytable.c.somecol.desc())
Or with a backend that supports functional indexes such as PostgreSQL,
a "case insensitive" index can be created using the ``lower()`` function::
from sqlalchemy import func, Index
- Index('someindex', func.lower(mytable.c.somecol))
+ Index("someindex", func.lower(mytable.c.somecol))
Index API
---------
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import BINARY
+
@compiles(BINARY, "sqlite")
def compile_binary_sqlite(type_, compiler, **kw):
return "BLOB"
from sqlalchemy.types import TypeDecorator, Unicode
+
class CoerceUTF8(TypeDecorator):
"""Safely coerce Python bytestrings to Unicode
before passing off to the database."""
def process_bind_param(self, value, dialect):
if isinstance(value, str):
- value = value.decode('utf-8')
+ value = value.decode("utf-8")
return value
Rounding Numerics
from sqlalchemy.types import TypeDecorator, Numeric
from decimal import Decimal
+
class SafeNumeric(TypeDecorator):
"""Adds quantization to Numeric."""
def __init__(self, *arg, **kw):
TypeDecorator.__init__(self, *arg, **kw)
- self.quantize_int = - self.impl.scale
+ self.quantize_int = -self.impl.scale
self.quantize = Decimal(10) ** self.quantize_int
def process_bind_param(self, value, dialect):
- if isinstance(value, Decimal) and \
- value.as_tuple()[2] < self.quantize_int:
+ if isinstance(value, Decimal) and value.as_tuple()[2] < self.quantize_int:
value = value.quantize(self.quantize)
return value
import datetime
+
class TZDateTime(TypeDecorator):
impl = DateTime
cache_ok = True
if value is not None:
if not value.tzinfo:
raise TypeError("tzinfo is required")
- value = value.astimezone(datetime.timezone.utc).replace(
- tzinfo=None
- )
+ value = value.astimezone(datetime.timezone.utc).replace(tzinfo=None)
return value
def process_result_value(self, value, dialect):
value = value.replace(tzinfo=datetime.timezone.utc)
return value
-
.. _custom_guid_type:
Backend-agnostic GUID Type
from sqlalchemy.dialects.postgresql import UUID
import uuid
+
class GUID(TypeDecorator):
"""Platform-independent GUID type.
CHAR(32), storing as stringified hex values.
"""
+
impl = CHAR
cache_ok = True
def load_dialect_impl(self, dialect):
- if dialect.name == 'postgresql':
+ if dialect.name == "postgresql":
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
- elif dialect.name == 'postgresql':
+ elif dialect.name == "postgresql":
return str(value)
else:
if not isinstance(value, uuid.UUID):
from sqlalchemy.types import TypeDecorator, VARCHAR
import json
+
class JSONEncodedDict(TypeDecorator):
"""Represents an immutable structure as a json-encoded string.
json_type = MutableDict.as_mutable(JSONEncodedDict)
+
class MyClass(Base):
# ...
json_data = Column(json_type)
-
.. seealso::
:ref:`mutable_toplevel`
from sqlalchemy import type_coerce, String
- stmt = select(my_table).where(
- type_coerce(my_table.c.json_data, String).like('%foo%'))
+ stmt = select(my_table).where(type_coerce(my_table.c.json_data, String).like("%foo%"))
:class:`.TypeDecorator` provides a built-in system for working up type
translations like these based on operators. If we wanted to frequently use the
from sqlalchemy.sql import operators
from sqlalchemy import String
+
class JSONEncodedDict(TypeDecorator):
impl = VARCHAR
from sqlalchemy import func
from sqlalchemy.types import UserDefinedType
+
class Geometry(UserDefinedType):
def get_col_spec(self):
return "GEOMETRY"
We can apply the ``Geometry`` type into :class:`_schema.Table` metadata
and use it in a :func:`_expression.select` construct::
- geometry = Table('geometry', metadata,
- Column('geom_id', Integer, primary_key=True),
- Column('geom_data', Geometry)
- )
+ geometry = Table(
+ "geometry",
+ metadata,
+ Column("geom_id", Integer, primary_key=True),
+ Column("geom_data", Geometry),
+ )
- print(select(geometry).where(
- geometry.c.geom_data == 'LINESTRING(189412 252431,189631 259122)'))
+ print(
+ select(geometry).where(
+ geometry.c.geom_data == "LINESTRING(189412 252431,189631 259122)"
+ )
+ )
The resulting SQL embeds both functions as appropriate. ``ST_AsText``
is applied to the columns clause so that the return value is run through
a :func:`_expression.select` against a :func:`.label` of our expression, the string
label is moved to the outside of the wrapped expression::
- print(select(geometry.c.geom_data.label('my_data')))
+ print(select(geometry.c.geom_data.label("my_data")))
Output::
PostgreSQL ``pgcrypto`` extension to encrypt/decrypt values
transparently::
- from sqlalchemy import create_engine, String, select, func, \
- MetaData, Table, Column, type_coerce, TypeDecorator
+ from sqlalchemy import (
+ create_engine,
+ String,
+ select,
+ func,
+ MetaData,
+ Table,
+ Column,
+ type_coerce,
+ TypeDecorator,
+ )
from sqlalchemy.dialects.postgresql import BYTEA
+
class PGPString(TypeDecorator):
impl = BYTEA
def column_expression(self, col):
return func.pgp_sym_decrypt(col, self.passphrase)
+
metadata_obj = MetaData()
- message = Table('message', metadata_obj,
- Column('username', String(50)),
- Column('message',
- PGPString("this is my passphrase")),
- )
+ message = Table(
+ "message",
+ metadata_obj,
+ Column("username", String(50)),
+ Column("message", PGPString("this is my passphrase")),
+ )
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", echo=True)
with engine.begin() as conn:
metadata_obj.create_all(conn)
- conn.execute(message.insert(), username="some user",
- message="this is my message")
+ conn.execute(message.insert(), username="some user", message="this is my message")
- print(conn.scalar(
- select(message.c.message).\
- where(message.c.username == "some user")
- ))
+ print(
+ conn.scalar(select(message.c.message).where(message.c.username == "some user"))
+ )
The ``pgp_sym_encrypt`` and ``pgp_sym_decrypt`` functions are applied
to the INSERT and SELECT statements::
is a Python callable that accepts any arbitrary right-hand side expression::
>>> from sqlalchemy import column
- >>> expr = column('x').op('>>')(column('y'))
+ >>> expr = column("x").op(">>")(column("y"))
>>> print(expr)
x >> y
from sqlalchemy import Integer
+
class MyInt(Integer):
class comparator_factory(Integer.Comparator):
def __add__(self, other):
from sqlalchemy import Integer
+
class MyInt(Integer):
class comparator_factory(Integer.Comparator):
def __add__(self, other):
from sqlalchemy import Integer, func
+
class MyInt(Integer):
class comparator_factory(Integer.Comparator):
def log(self, other):
from sqlalchemy.sql.expression import UnaryExpression
from sqlalchemy.sql import operators
+
class MyInteger(Integer):
class comparator_factory(Integer.Comparator):
def factorial(self):
- return UnaryExpression(self.expr,
- modifier=operators.custom_op("!"),
- type_=MyInteger)
+ return UnaryExpression(
+ self.expr, modifier=operators.custom_op("!"), type_=MyInteger
+ )
Using the above type::
>>> from sqlalchemy.sql import column
- >>> print(column('x', MyInteger).factorial())
+ >>> print(column("x", MyInteger).factorial())
x !
.. seealso::
for this database table elsewhere using reflection, it will not have this
datatype. For example::
- >>> from sqlalchemy import Table, Column, MetaData, create_engine, PickleType, Integer
+ >>> from sqlalchemy import (
+ ... Table,
+ ... Column,
+ ... MetaData,
+ ... create_engine,
+ ... PickleType,
+ ... Integer,
+ ... )
>>> metadata = MetaData()
- >>> my_table = Table("my_table", metadata, Column('id', Integer), Column("data", PickleType))
- >>> engine = create_engine("sqlite://", echo='debug')
+ >>> my_table = Table(
+ ... "my_table", metadata, Column("id", Integer), Column("data", PickleType)
+ ... )
+ >>> engine = create_engine("sqlite://", echo="debug")
>>> my_table.create(engine)
INFO sqlalchemy.engine.base.Engine
CREATE TABLE my_table (
columns for which we want to use a custom or decorated datatype::
>>> metadata_three = MetaData()
- >>> my_reflected_table = Table("my_table", metadata_three, Column("data", PickleType), autoload_with=engine)
+ >>> my_reflected_table = Table(
+ ... "my_table",
+ ... metadata_three,
+ ... Column("data", PickleType),
+ ... autoload_with=engine,
+ ... )
The ``my_reflected_table`` object above is reflected, and will load the
definition of the "id" column from the SQLite database. But for the "data"
from sqlalchemy import PickleType
from sqlalchemy import Table
+
@event.listens_for(Table, "column_reflect")
def _setup_pickletype(inspector, table, column_info):
if isinstance(column_info["type"], BLOB):
in order to affect only those columns where the datatype is important, such as
a lookup table of table names and possibly column names, or other heuristics
in order to accurately determine which columns should be established with an
-in Python datatype.
\ No newline at end of file
+in Python datatype.
event.listen(
metadata,
"after_create",
- DDL("ALTER TABLE users ADD CONSTRAINT "
+ DDL(
+ "ALTER TABLE users ADD CONSTRAINT "
"cst_user_name_length "
- " CHECK (length(user_name) >= 8)")
+ " CHECK (length(user_name) >= 8)"
+ ),
)
A more comprehensive method of creating libraries of DDL constructs is to use
the PostgreSQL backend, we could invoke this as::
mytable = Table(
- 'mytable', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', String(50))
+ "mytable",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("data", String(50)),
)
func = DDL(
"FOR EACH ROW EXECUTE PROCEDURE my_func();"
)
- event.listen(
- mytable,
- 'after_create',
- func.execute_if(dialect='postgresql')
- )
+ event.listen(mytable, "after_create", func.execute_if(dialect="postgresql"))
- event.listen(
- mytable,
- 'after_create',
- trigger.execute_if(dialect='postgresql')
- )
+ event.listen(mytable, "after_create", trigger.execute_if(dialect="postgresql"))
The :paramref:`.ExecutableDDLElement.execute_if.dialect` keyword also accepts a tuple
of string dialect names::
event.listen(
- mytable,
- "after_create",
- trigger.execute_if(dialect=('postgresql', 'mysql'))
+ mytable, "after_create", trigger.execute_if(dialect=("postgresql", "mysql"))
)
event.listen(
- mytable,
- "before_drop",
- trigger.execute_if(dialect=('postgresql', 'mysql'))
+ mytable, "before_drop", trigger.execute_if(dialect=("postgresql", "mysql"))
)
The :meth:`.ExecutableDDLElement.execute_if` method can also work against a callable
.. sourcecode:: python+sql
>>> from sqlalchemy import create_engine
- >>> postgresql_engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", echo=True)
+ >>> postgresql_engine = create_engine(
+ ... "postgresql+psycopg2://scott:tiger@localhost/test", echo=True
+ ... )
>>> meta.create_all(postgresql_engine)
{opensql}BEGIN (implicit)
select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where pg_catalog.pg_table_is_visible(c.oid) and relname=%(name)s
.. sourcecode:: python+sql
def only_pg_14(ddl_element, target, bind, dialect, **kw):
- return (
- dialect.name == "postgresql" and
- dialect.server_version_info >= (14,)
- )
+ return dialect.name == "postgresql" and dialect.server_version_info >= (14,)
+
my_table = Table(
"my_table",
The simplest kind of default is a scalar value used as the default value of a column::
- Table("mytable", metadata_obj,
- Column("somecolumn", Integer, default=12)
- )
+ Table("mytable", metadata_obj, Column("somecolumn", Integer, default=12))
Above, the value "12" will be bound as the column value during an INSERT if no
other value is supplied.
not very common (as UPDATE statements are usually looking for dynamic
defaults)::
- Table("mytable", metadata_obj,
- Column("somecolumn", Integer, onupdate=25)
- )
-
+ Table("mytable", metadata_obj, Column("somecolumn", Integer, onupdate=25))
Python-Executed Functions
-------------------------
# a function which counts upwards
i = 0
+
+
def mydefault():
global i
i += 1
return i
- t = Table("mytable", metadata_obj,
- Column('id', Integer, primary_key=True, default=mydefault),
+
+ t = Table(
+ "mytable",
+ metadata_obj,
+ Column("id", Integer, primary_key=True, default=mydefault),
)
It should be noted that for real "incrementing sequence" behavior, the
import datetime
- t = Table("mytable", metadata_obj,
- Column('id', Integer, primary_key=True),
-
+ t = Table(
+ "mytable",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
# define 'last_updated' to be populated with datetime.now()
- Column('last_updated', DateTime, onupdate=datetime.datetime.now),
+ Column("last_updated", DateTime, onupdate=datetime.datetime.now),
)
When an update statement executes and no value is passed for ``last_updated``,
single ``context`` argument::
def mydefault(context):
- return context.get_current_parameters()['counter'] + 12
+ return context.get_current_parameters()["counter"] + 12
- t = Table('mytable', metadata_obj,
- Column('counter', Integer),
- Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault)
+
+ t = Table(
+ "mytable",
+ metadata_obj,
+ Column("counter", Integer),
+ Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault),
)
The above default generation function is applied so that it will execute for
also be passed SQL expressions, which are in most cases rendered inline within the
INSERT or UPDATE statement::
- t = Table("mytable", metadata_obj,
- Column('id', Integer, primary_key=True),
-
+ t = Table(
+ "mytable",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
# define 'create_date' to default to now()
- Column('create_date', DateTime, default=func.now()),
-
+ Column("create_date", DateTime, default=func.now()),
# define 'key' to pull its default from the 'keyvalues' table
- Column('key', String(20), default=select(keyvalues.c.key).where(keyvalues.c.type='type1')),
-
+ Column(
+ "key",
+ String(20),
+ default=select(keyvalues.c.key).where(keyvalues.c.type="type1"),
+ ),
# define 'last_modified' to use the current_timestamp SQL function on update
- Column('last_modified', DateTime, onupdate=func.utc_timestamp())
- )
+ Column("last_modified", DateTime, onupdate=func.utc_timestamp()),
+ )
Above, the ``create_date`` column will be populated with the result of the
``now()`` SQL function (which, depending on backend, compiles into ``NOW()``
.. sourcecode:: python+sql
- t = Table('test', metadata_obj,
- Column('abc', String(20), server_default='abc'),
- Column('created_at', DateTime, server_default=func.sysdate()),
- Column('index_value', Integer, server_default=text("0"))
+ t = Table(
+ "test",
+ metadata_obj,
+ Column("abc", String(20), server_default="abc"),
+ Column("created_at", DateTime, server_default=func.sysdate()),
+ Column("index_value", Integer, server_default=text("0")),
)
A create call for the above table will produce::
from sqlalchemy.schema import FetchedValue
- t = Table('test', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('abc', TIMESTAMP, server_default=FetchedValue()),
- Column('def', String(20), server_onupdate=FetchedValue())
+ t = Table(
+ "test",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("abc", TIMESTAMP, server_default=FetchedValue()),
+ Column("def", String(20), server_onupdate=FetchedValue()),
)
The :class:`.FetchedValue` indicator does not affect the rendered DDL for the
configured to fire off during UPDATE operations if desired. It is most
commonly used in conjunction with a single integer primary key column::
- table = Table("cartitems", metadata_obj,
+ table = Table(
+ "cartitems",
+ metadata_obj,
Column(
"cart_id",
Integer,
- Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True),
+ Sequence("cart_id_seq", metadata=metadata_obj),
+ primary_key=True,
+ ),
Column("description", String(40)),
- Column("createdate", DateTime())
+ Column("createdate", DateTime()),
)
Where above, the table "cartitems" is associated with a sequence named
passing it directly to a SQL execution method::
with my_engine.connect() as conn:
- seq = Sequence('some_sequence')
+ seq = Sequence("some_sequence")
nextid = conn.execute(seq)
In order to embed the "next value" function of a :class:`.Sequence`
method, which will render at statement compilation time a SQL function that is
appropriate for the target backend::
- >>> my_seq = Sequence('some_sequence')
+ >>> my_seq = Sequence("some_sequence")
>>> stmt = select(my_seq.next_value())
>>> print(stmt.compile(dialect=postgresql.dialect()))
SELECT nextval('some_sequence') AS next_value_1
For many years, the SQLAlchemy documentation referred to the
example of associating a :class:`.Sequence` with a table as follows::
- table = Table("cartitems", metadata_obj,
- Column("cart_id", Integer, Sequence('cart_id_seq'),
- primary_key=True),
+ table = Table(
+ "cartitems",
+ metadata_obj,
+ Column("cart_id", Integer, Sequence("cart_id_seq"), primary_key=True),
Column("description", String(40)),
- Column("createdate", DateTime())
+ Column("createdate", DateTime()),
)
While the above is a prominent idiomatic pattern, it is recommended that
the :class:`.Sequence` in most cases be explicitly associated with the
:class:`_schema.MetaData`, using the :paramref:`.Sequence.metadata` parameter::
- table = Table("cartitems", metadata_obj,
+ table = Table(
+ "cartitems",
+ metadata_obj,
Column(
"cart_id",
Integer,
- Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True),
+ Sequence("cart_id_seq", metadata=metadata_obj),
+ primary_key=True,
+ ),
Column("description", String(40)),
- Column("createdate", DateTime())
+ Column("createdate", DateTime()),
)
The :class:`.Sequence` object is a first class
:class:`_schema.Column` as the **Python side default generator**::
Column(
- "cart_id", Integer, Sequence('cart_id_seq', metadata=metadata_obj),
- primary_key=True)
+ "cart_id", Integer, Sequence("cart_id_seq", metadata=metadata_obj), primary_key=True
+ )
In the above case, the :class:`.Sequence` will automatically be subject
to CREATE SEQUENCE / DROP SEQUENCE DDL when the related :class:`_schema.Table`
:class:`_schema.Column` both as the Python-side default generator as well as
the server-side default generator::
- cart_id_seq = Sequence('cart_id_seq', metadata=metadata_obj)
- table = Table("cartitems", metadata_obj,
+ cart_id_seq = Sequence("cart_id_seq", metadata=metadata_obj)
+ table = Table(
+ "cartitems",
+ metadata_obj,
Column(
- "cart_id", Integer, cart_id_seq,
- server_default=cart_id_seq.next_value(), primary_key=True),
+ "cart_id",
+ Integer,
+ cart_id_seq,
+ server_default=cart_id_seq.next_value(),
+ primary_key=True,
+ ),
Column("description", String(40)),
- Column("createdate", DateTime())
+ Column("createdate", DateTime()),
)
or with the ORM::
class CartItem(Base):
- __tablename__ = 'cartitems'
+ __tablename__ = "cartitems"
- cart_id_seq = Sequence('cart_id_seq', metadata=Base.metadata)
+ cart_id_seq = Sequence("cart_id_seq", metadata=Base.metadata)
cart_id = Column(
- Integer, cart_id_seq,
- server_default=cart_id_seq.next_value(), primary_key=True)
+ Integer, cart_id_seq, server_default=cart_id_seq.next_value(), primary_key=True
+ )
description = Column(String(40))
createdate = Column(DateTime)
data = Table(
"data",
metadata_obj,
- Column('id', Integer, Identity(start=42, cycle=True), primary_key=True),
- Column('data', String)
+ Column("id", Integer, Identity(start=42, cycle=True), primary_key=True),
+ Column("data", String),
)
The DDL for the ``data`` table when run on a PostgreSQL 12 backend will look
:func:`_sa.create_engine()`::
from sqlalchemy import create_engine
- engine = create_engine('postgresql+psycopg2://scott:tiger@localhost:5432/mydatabase')
+
+ engine = create_engine("postgresql+psycopg2://scott:tiger@localhost:5432/mydatabase")
The above engine creates a :class:`.Dialect` object tailored towards
PostgreSQL, as well as a :class:`_pool.Pool` object which will establish a DBAPI
PostgreSQL DBAPIs include pg8000 and asyncpg::
# default
- engine = create_engine('postgresql://scott:tiger@localhost/mydatabase')
+ engine = create_engine("postgresql://scott:tiger@localhost/mydatabase")
# psycopg2
- engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase')
+ engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/mydatabase")
# pg8000
- engine = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase')
+ engine = create_engine("postgresql+pg8000://scott:tiger@localhost/mydatabase")
More notes on connecting to PostgreSQL at :ref:`postgresql_toplevel`.
MySQL DBAPIs available, including PyMySQL::
# default
- engine = create_engine('mysql://scott:tiger@localhost/foo')
+ engine = create_engine("mysql://scott:tiger@localhost/foo")
# mysqlclient (a maintained fork of MySQL-Python)
- engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo')
+ engine = create_engine("mysql+mysqldb://scott:tiger@localhost/foo")
# PyMySQL
- engine = create_engine('mysql+pymysql://scott:tiger@localhost/foo')
+ engine = create_engine("mysql+pymysql://scott:tiger@localhost/foo")
More notes on connecting to MySQL at :ref:`mysql_toplevel`.
The Oracle dialect uses cx_oracle as the default DBAPI::
- engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname')
+ engine = create_engine("oracle://scott:tiger@127.0.0.1:1521/sidname")
- engine = create_engine('oracle+cx_oracle://scott:tiger@tnsname')
+ engine = create_engine("oracle+cx_oracle://scott:tiger@tnsname")
More notes on connecting to Oracle at :ref:`oracle_toplevel`.
also available::
# pyodbc
- engine = create_engine('mssql+pyodbc://scott:tiger@mydsn')
+ engine = create_engine("mssql+pyodbc://scott:tiger@mydsn")
# pymssql
- engine = create_engine('mssql+pymssql://scott:tiger@hostname:port/dbname')
+ engine = create_engine("mssql+pymssql://scott:tiger@hostname:port/dbname")
More notes on connecting to SQL Server at :ref:`mssql_toplevel`.
# sqlite://<nohostname>/<path>
# where <path> is relative:
- engine = create_engine('sqlite:///foo.db')
+ engine = create_engine("sqlite:///foo.db")
And for an absolute file path, the three slashes are followed by the absolute path::
# Unix/Mac - 4 initial slashes in total
- engine = create_engine('sqlite:////absolute/path/to/foo.db')
+ engine = create_engine("sqlite:////absolute/path/to/foo.db")
# Windows
- engine = create_engine('sqlite:///C:\\path\\to\\foo.db')
+ engine = create_engine("sqlite:///C:\\path\\to\\foo.db")
# Windows alternative using raw string
- engine = create_engine(r'sqlite:///C:\path\to\foo.db')
+ engine = create_engine(r"sqlite:///C:\path\to\foo.db")
To use a SQLite ``:memory:`` database, specify an empty URL::
- engine = create_engine('sqlite://')
+ engine = create_engine("sqlite://")
More notes on connecting to SQLite at :ref:`sqlite_toplevel`.
this is DBAPIs that accept an argument ``encoding`` for character encodings,
such as most MySQL DBAPIs::
- engine = create_engine(
- "mysql+pymysql://user:pass@host/test?charset=utf8mb4"
- )
+ engine = create_engine("mysql+pymysql://user:pass@host/test?charset=utf8mb4")
The advantage of using the query string is that additional DBAPI options may be
specified in configuration files in a manner that's portable to the DBAPI
method directly as follows::
>>> from sqlalchemy import create_engine
- >>> engine = create_engine("mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4")
+ >>> engine = create_engine(
+ ... "mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4"
+ ... )
>>> args, kwargs = engine.dialect.create_connect_args(engine.url)
>>> args, kwargs
([], {'host': 'some_host', 'database': 'test', 'user': 'some_user', 'password': 'some_pass', 'charset': 'utf8mb4', 'client_flag': 2})
engine = create_engine(
"postgresql+psycopg2://user:pass@hostname/dbname",
- connect_args={"connection_factory": MyConnectionFactory}
+ connect_args={"connection_factory": MyConnectionFactory},
)
Another example is the pyodbc "timeout" parameter::
engine = create_engine(
- "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server",
- connect_args={"timeout": 30}
+ "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server",
+ connect_args={"timeout": 30},
)
The above example also illustrates that both URL "query string" parameters as
engine = create_engine("postgresql+psycopg2://user:pass@hostname/dbname")
+
@event.listens_for(engine, "do_connect")
def receive_do_connect(dialect, conn_rec, cargs, cparams):
- cparams['connection_factory'] = MyConnectionFactory
+ cparams["connection_factory"] = MyConnectionFactory
.. _engines_dynamic_tokens:
engine = create_engine("postgresql+psycopg2://user@hostname/dbname")
+
@event.listens_for(engine, "do_connect")
def provide_token(dialect, conn_rec, cargs, cparams):
- cparams['token'] = get_authentication_token()
+ cparams["token"] = get_authentication_token()
.. seealso::
from sqlalchemy import event
- engine = create_engine(
- "postgresql+psycopg2://user:pass@hostname/dbname"
- )
+ engine = create_engine("postgresql+psycopg2://user:pass@hostname/dbname")
+
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
cursor_obj.execute("SET some session variables")
cursor_obj.close()
-
Fully Replacing the DBAPI ``connect()`` function
------------------------------------------------
from sqlalchemy import event
- engine = create_engine(
- "postgresql+psycopg2://user:pass@hostname/dbname"
- )
+ engine = create_engine("postgresql+psycopg2://user:pass@hostname/dbname")
+
@event.listens_for(engine, "do_connect")
def receive_do_connect(dialect, conn_rec, cargs, cparams):
import logging
logging.basicConfig()
- logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
+ logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
By default, the log level is set to ``logging.WARN`` within the entire
``sqlalchemy`` namespace so that no log operations occur, even within an
>>> from sqlalchemy import create_engine, text
- >>> e = create_engine("sqlite://", echo=True, echo_pool='debug')
+ >>> e = create_engine("sqlite://", echo=True, echo_pool="debug")
>>> with e.connect() as conn:
- ... print(conn.scalar(text("select 'hi'")))
- ...
+ ... print(conn.scalar(text("select 'hi'")))
2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Created new connection <sqlite3.Connection object at 0x7f287819ac60>
2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Connection <sqlite3.Connection object at 0x7f287819ac60> checked out from pool
2020-10-24 12:54:57,702 INFO sqlalchemy.engine.Engine select 'hi'
Use of these flags is roughly equivalent to::
import logging
+
logging.basicConfig()
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
>>> from sqlalchemy import create_engine
>>> from sqlalchemy import text
- >>> e = create_engine("sqlite://", echo=True, logging_name='myengine')
+ >>> e = create_engine("sqlite://", echo=True, logging_name="myengine")
>>> with e.connect() as conn:
... conn.execute(text("select 'hi'"))
- ...
2020-10-24 12:47:04,291 INFO sqlalchemy.engine.Engine.myengine select 'hi'
2020-10-24 12:47:04,292 INFO sqlalchemy.engine.Engine.myengine ()
>>> e = create_engine("sqlite://", echo=True, hide_parameters=True)
>>> with e.connect() as conn:
... conn.execute(text("select :some_private_name"), {"some_private_name": "pii"})
- ...
2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine select ?
2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine [SQL parameters hidden due to hide_parameters=True]
from sqlalchemy.event import listen
from sqlalchemy.pool import Pool
+
def my_on_connect(dbapi_con, connection_record):
print("New DBAPI connection:", dbapi_con)
- listen(Pool, 'connect', my_on_connect)
+
+ listen(Pool, "connect", my_on_connect)
To listen with the :func:`.listens_for` decorator looks like::
from sqlalchemy.event import listens_for
from sqlalchemy.pool import Pool
+
@listens_for(Pool, "connect")
def my_on_connect(dbapi_con, connection_record):
print("New DBAPI connection:", dbapi_con)
from sqlalchemy.event import listens_for
from sqlalchemy.pool import Pool
+
@listens_for(Pool, "connect", named=True)
def my_on_connect(**kw):
- print("New DBAPI connection:", kw['dbapi_connection'])
+ print("New DBAPI connection:", kw["dbapi_connection"])
When using named argument passing, the names listed in the function argument
specification will be used as keys in the dictionary.
from sqlalchemy.event import listens_for
from sqlalchemy.pool import Pool
+
@listens_for(Pool, "connect", named=True)
def my_on_connect(dbapi_connection, **kw):
print("New DBAPI connection:", dbapi_connection)
- print("Connection record:", kw['connection_record'])
+ print("Connection record:", kw["connection_record"])
Above, the presence of ``**kw`` tells :func:`.listens_for` that
arguments should be passed to the function by name, rather than positionally.
from sqlalchemy.engine import Engine
import psycopg2
+
def connect():
- return psycopg2.connect(user='ed', host='127.0.0.1', dbname='test')
+ return psycopg2.connect(user="ed", host="127.0.0.1", dbname="test")
+
my_pool = QueuePool(connect)
- my_engine = create_engine('postgresql+psycopg2://ed@localhost/test')
+ my_engine = create_engine("postgresql+psycopg2://ed@localhost/test")
# associate listener with all instances of Pool
- listen(Pool, 'connect', my_on_connect)
+ listen(Pool, "connect", my_on_connect)
# associate listener with all instances of Pool
# via the Engine class
- listen(Engine, 'connect', my_on_connect)
+ listen(Engine, "connect", my_on_connect)
# associate listener with my_pool
- listen(my_pool, 'connect', my_on_connect)
+ listen(my_pool, "connect", my_on_connect)
# associate listener with my_engine.pool
- listen(my_engine, 'connect', my_on_connect)
-
+ listen(my_engine, "connect", my_on_connect)
.. _event_modifiers:
def validate_phone(target, value, oldvalue, initiator):
"""Strip non-numeric characters from a phone number"""
- return re.sub(r'\D', '', value)
+ return re.sub(r"\D", "", value)
+
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
- listen(UserContact.phone, 'set', validate_phone, retval=True)
+ listen(UserContact.phone, "set", validate_phone, retval=True)
Event Reference
---------------
automatically. The are invoked in the same way as any other member of the
:data:`_sql.func` namespace::
- select(func.count('*')).select_from(some_table)
+ select(func.count("*")).select_from(some_table)
Note that any name not known to :data:`_sql.func` generates the function name
as is - there is no restriction on what SQL functions can be called, known or
.. toctree::
:hidden:
- tutorial
\ No newline at end of file
+ tutorial
from sqlalchemy import Table, Column, Integer, String
user = Table(
- 'user',
+ "user",
metadata_obj,
- Column('user_id', Integer, primary_key=True),
- Column('user_name', String(16), nullable=False),
- Column('email_address', String(60)),
- Column('nickname', String(50), nullable=False)
+ Column("user_id", Integer, primary_key=True),
+ Column("user_name", String(16), nullable=False),
+ Column("email_address", String(60)),
+ Column("nickname", String(50), nullable=False),
)
Above, a table called ``user`` is described, which contains four columns. The
references)::
>>> for t in metadata_obj.sorted_tables:
- ... print(t.name)
+ ... print(t.name)
user
user_preference
invoice
accessors which allow inspection of its properties. Given the following
:class:`~sqlalchemy.schema.Table` definition::
- employees = Table('employees', metadata_obj,
- Column('employee_id', Integer, primary_key=True),
- Column('employee_name', String(60), nullable=False),
- Column('employee_dept', Integer, ForeignKey("departments.department_id"))
+ employees = Table(
+ "employees",
+ metadata_obj,
+ Column("employee_id", Integer, primary_key=True),
+ Column("employee_name", String(60), nullable=False),
+ Column("employee_dept", Integer, ForeignKey("departments.department_id")),
)
Note the :class:`~sqlalchemy.schema.ForeignKey` object used in this table -
employees.c.employee_id
# via string
- employees.c['employee_id']
+ employees.c["employee_id"]
# a tuple of columns may be returned using multiple strings
# (new in 2.0)
- emp_id, name, type = employees.c['employee_id', "name", "type"]
+ emp_id, name, type = employees.c["employee_id", "name", "type"]
# iterate through all columns
for c in employees.c:
metadata_obj = MetaData()
financial_info = Table(
- 'financial_info',
+ "financial_info",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('value', String(100), nullable=False),
- schema='remote_banks'
+ Column("id", Integer, primary_key=True),
+ Column("value", String(100), nullable=False),
+ schema="remote_banks",
)
SQL that is rendered using this :class:`_schema.Table`, such as the SELECT
in the :attr:`_schema.MetaData.tables` collection by searching for the
key ``'remote_banks.financial_info'``::
- >>> metadata_obj.tables['remote_banks.financial_info']
+ >>> metadata_obj.tables["remote_banks.financial_info"]
Table('financial_info', MetaData(),
Column('id', Integer(), table=<financial_info>, primary_key=True, nullable=False),
Column('value', String(length=100), table=<financial_info>, nullable=False),
customer = Table(
"customer",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('financial_info_id', ForeignKey("remote_banks.financial_info.id")),
- schema='remote_banks'
+ Column("id", Integer, primary_key=True),
+ Column("financial_info_id", ForeignKey("remote_banks.financial_info.id")),
+ schema="remote_banks",
)
The :paramref:`_schema.Table.schema` argument may also be used with certain
dotted "database/owner" tokens. The tokens may be placed directly in the name
at once, such as::
- schema="dbo.scott"
+ schema = "dbo.scott"
.. seealso::
metadata_obj = MetaData(schema="remote_banks")
financial_info = Table(
- 'financial_info',
+ "financial_info",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('value', String(100), nullable=False),
+ Column("id", Integer, primary_key=True),
+ Column("value", String(100), nullable=False),
)
Above, for any :class:`_schema.Table` object (or :class:`_schema.Sequence` object
includes that the :class:`_schema.Table` is cataloged in the :class:`_schema.MetaData`
using the schema-qualified name, that is::
- metadata_obj.tables['remote_banks.financial_info']
+ metadata_obj.tables["remote_banks.financial_info"]
When using the :class:`_schema.ForeignKey` or :class:`_schema.ForeignKeyConstraint`
objects to refer to this table, either the schema-qualified name or the
# either will work:
refers_to_financial_info = Table(
- 'refers_to_financial_info',
+ "refers_to_financial_info",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('fiid', ForeignKey('financial_info.id')),
+ Column("id", Integer, primary_key=True),
+ Column("fiid", ForeignKey("financial_info.id")),
)
# or
refers_to_financial_info = Table(
- 'refers_to_financial_info',
+ "refers_to_financial_info",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('fiid', ForeignKey('remote_banks.financial_info.id')),
+ Column("id", Integer, primary_key=True),
+ Column("fiid", ForeignKey("remote_banks.financial_info.id")),
)
When using a :class:`_schema.MetaData` object that sets
metadata_obj = MetaData(schema="remote_banks")
financial_info = Table(
- 'financial_info',
+ "financial_info",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('value', String(100), nullable=False),
- schema=BLANK_SCHEMA # will not use "remote_banks"
+ Column("id", Integer, primary_key=True),
+ Column("value", String(100), nullable=False),
+ schema=BLANK_SCHEMA, # will not use "remote_banks"
)
.. seealso::
engine = create_engine("oracle+cx_oracle://scott:tiger@tsn_name")
+
@event.listens_for(engine, "connect", insert=True)
def set_current_schema(dbapi_connection, connection_record):
cursor_obj = dbapi_connection.cursor()
"InnoDB". This can be expressed with :class:`~sqlalchemy.schema.Table` using
``mysql_engine``::
- addresses = Table('engine_email_addresses', metadata_obj,
- Column('address_id', Integer, primary_key=True),
- Column('remote_user_id', Integer, ForeignKey(users.c.user_id)),
- Column('email_address', String(20)),
- mysql_engine='InnoDB'
+ addresses = Table(
+ "engine_email_addresses",
+ metadata_obj,
+ Column("address_id", Integer, primary_key=True),
+ Column("remote_user_id", Integer, ForeignKey(users.c.user_id)),
+ Column("email_address", String(20)),
+ mysql_engine="InnoDB",
)
Other backends may support table-level options as well - these would be
>>> user_table = Table(
... "user_account",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('name', String(30)),
- ... Column('fullname', String)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("name", String(30)),
+ ... Column("fullname", String),
... )
>>> from sqlalchemy import ForeignKey
>>> address_table = Table(
... "address",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('user_id', None, ForeignKey('user_account.id')),
- ... Column('email_address', String, nullable=False)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("user_id", None, ForeignKey("user_account.id")),
+ ... Column("email_address", String, nullable=False),
... )
>>> metadata_obj.create_all(engine)
BEGIN (implicit)
>>> Base = declarative_base()
>>> from sqlalchemy.orm import relationship
>>> class User(Base):
- ... __tablename__ = 'user_account'
- ...
+ ... __tablename__ = "user_account"
+ ...
... id = Column(Integer, primary_key=True)
... name = Column(String(30))
... fullname = Column(String)
- ...
+ ...
... addresses = relationship("Address", back_populates="user")
- ...
+ ...
... def __repr__(self):
- ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
+ ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
>>> class Address(Base):
- ... __tablename__ = 'address'
- ...
+ ... __tablename__ = "address"
+ ...
... id = Column(Integer, primary_key=True)
... email_address = Column(String, nullable=False)
- ... user_id = Column(Integer, ForeignKey('user_account.id'))
- ...
+ ... user_id = Column(Integer, ForeignKey("user_account.id"))
+ ...
... user = relationship("User", back_populates="addresses")
- ...
+ ...
... def __repr__(self):
... return f"Address(id={self.id!r}, email_address={self.email_address!r})"
>>> conn = engine.connect()
>>> from sqlalchemy.orm import Session
>>> session = Session(conn)
- >>> session.add_all([
- ... User(name="spongebob", fullname="Spongebob Squarepants", addresses=[
- ... Address(email_address="spongebob@sqlalchemy.org")
- ... ]),
- ... User(name="sandy", fullname="Sandy Cheeks", addresses=[
- ... Address(email_address="sandy@sqlalchemy.org"),
- ... Address(email_address="squirrel@squirrelpower.org")
- ... ]),
- ... User(name="patrick", fullname="Patrick Star", addresses=[
- ... Address(email_address="pat999@aol.com")
- ... ]),
- ... User(name="squidward", fullname="Squidward Tentacles", addresses=[
- ... Address(email_address="stentcl@sqlalchemy.org")
- ... ]),
- ... User(name="ehkrabs", fullname="Eugene H. Krabs"),
- ... ])
+ >>> session.add_all(
+ ... [
+ ... User(
+ ... name="spongebob",
+ ... fullname="Spongebob Squarepants",
+ ... addresses=[Address(email_address="spongebob@sqlalchemy.org")],
+ ... ),
+ ... User(
+ ... name="sandy",
+ ... fullname="Sandy Cheeks",
+ ... addresses=[
+ ... Address(email_address="sandy@sqlalchemy.org"),
+ ... Address(email_address="squirrel@squirrelpower.org"),
+ ... ],
+ ... ),
+ ... User(
+ ... name="patrick",
+ ... fullname="Patrick Star",
+ ... addresses=[Address(email_address="pat999@aol.com")],
+ ... ),
+ ... User(
+ ... name="squidward",
+ ... fullname="Squidward Tentacles",
+ ... addresses=[Address(email_address="stentcl@sqlalchemy.org")],
+ ... ),
+ ... User(name="ehkrabs", fullname="Eugene H. Krabs"),
+ ... ]
+ ... )
>>> session.commit()
BEGIN ...
>>> conn.begin()
* :meth:`_sql.ColumnOperators.__eq__` (Python "``==``" operator)::
- >>> print(column('x') == 5)
+ >>> print(column("x") == 5)
x = :x_1
..
* :meth:`_sql.ColumnOperators.__ne__` (Python "``!=``" operator)::
- >>> print(column('x') != 5)
+ >>> print(column("x") != 5)
x != :x_1
..
* :meth:`_sql.ColumnOperators.__gt__` (Python "``>``" operator)::
- >>> print(column('x') > 5)
+ >>> print(column("x") > 5)
x > :x_1
..
* :meth:`_sql.ColumnOperators.__lt__` (Python "``<``" operator)::
- >>> print(column('x') < 5)
+ >>> print(column("x") < 5)
x < :x_1
..
* :meth:`_sql.ColumnOperators.__ge__` (Python "``>=``" operator)::
- >>> print(column('x') >= 5)
+ >>> print(column("x") >= 5)
x >= :x_1
..
* :meth:`_sql.ColumnOperators.__le__` (Python "``<=``" operator)::
- >>> print(column('x') <= 5)
+ >>> print(column("x") <= 5)
x <= :x_1
..
* :meth:`_sql.ColumnOperators.between`::
- >>> print(column('x').between(5, 10))
+ >>> print(column("x").between(5, 10))
x BETWEEN :x_1 AND :x_2
..
values to the :meth:`_sql.ColumnOperators.in_` method::
- >>> print(column('x').in_([1, 2, 3]))
+ >>> print(column("x").in_([1, 2, 3]))
x IN (__[POSTCOMPILE_x_1])
The special bound form ``__[POSTCOMPILE`` is rendered into individual parameters
"NOT IN" is available via the :meth:`_sql.ColumnOperators.not_in` operator::
- >>> print(column('x').not_in([1, 2, 3]))
+ >>> print(column("x").not_in([1, 2, 3]))
(x NOT IN (__[POSTCOMPILE_x_1]))
This is typically more easily available by negating with the ``~`` operator::
- >>> print(~column('x').in_([1, 2, 3]))
+ >>> print(~column("x").in_([1, 2, 3]))
(x NOT IN (__[POSTCOMPILE_x_1]))
Tuple IN Expressions
then receives a list of tuples::
>>> from sqlalchemy import tuple_
- >>> tup = tuple_(column('x', Integer), column('y', Integer))
+ >>> tup = tuple_(column("x", Integer), column("y", Integer))
>>> expr = tup.in_([(1, 2), (3, 4)])
>>> print(expr)
(x, y) IN (__[POSTCOMPILE_param_1])
construct is passed in directly, without any explicit conversion to a named
subquery::
- >>> print(column('x').in_(select(user_table.c.id)))
+ >>> print(column("x").in_(select(user_table.c.id)))
x IN (SELECT user_account.id
FROM user_account)
Tuples work as expected::
>>> print(
- ... tuple_(column('x'), column('y')).in_(
+ ... tuple_(column("x"), column("y")).in_(
... select(user_table.c.id, address_table.c.id).join(address_table)
... )
... )
as "<expr> IS NULL". The ``NULL`` constant is most easily acquired
using regular Python ``None``::
- >>> print(column('x').is_(None))
+ >>> print(column("x").is_(None))
x IS NULL
SQL NULL is also explicitly available, if needed, using the
:func:`_sql.null` construct::
>>> from sqlalchemy import null
- >>> print(column('x').is_(null()))
+ >>> print(column("x").is_(null()))
x IS NULL
The :meth:`_sql.ColumnOperators.is_` operator is automatically invoked when
explicitly, paricularly when used with a dynamic value::
>>> a = None
- >>> print(column('x') == a)
+ >>> print(column("x") == a)
x IS NULL
Note that the Python ``is`` operator is **not overloaded**. Even though
Similar to :meth:`_sql.ColumnOperators.is_`, produces "IS NOT"::
- >>> print(column('x').is_not(None))
+ >>> print(column("x").is_not(None))
x IS NOT NULL
Is similarly equivalent to ``!= None``::
- >>> print(column('x') != None)
+ >>> print(column("x") != None)
x IS NOT NULL
* :meth:`_sql.ColumnOperators.is_distinct_from`:
Produces SQL IS DISTINCT FROM::
- >>> print(column('x').is_distinct_from('some value'))
+ >>> print(column("x").is_distinct_from("some value"))
x IS DISTINCT FROM :x_1
* :meth:`_sql.ColumnOperators.isnot_distinct_from`:
Produces SQL IS NOT DISTINCT FROM::
- >>> print(column('x').isnot_distinct_from('some value'))
+ >>> print(column("x").isnot_distinct_from("some value"))
x IS NOT DISTINCT FROM :x_1
String Comparisons
* :meth:`_sql.ColumnOperators.like`::
- >>> print(column('x').like('word'))
+ >>> print(column("x").like("word"))
x LIKE :x_1
..
Case insensitive LIKE makes use of the SQL ``lower()`` function on a
generic backend. On the PostgreSQL backend it will use ``ILIKE``::
- >>> print(column('x').ilike('word'))
+ >>> print(column("x").ilike("word"))
lower(x) LIKE lower(:x_1)
..
* :meth:`_sql.ColumnOperators.notlike`::
- >>> print(column('x').notlike('word'))
+ >>> print(column("x").notlike("word"))
x NOT LIKE :x_1
..
* :meth:`_sql.ColumnOperators.notilike`::
- >>> print(column('x').notilike('word'))
+ >>> print(column("x").notilike("word"))
lower(x) NOT LIKE lower(:x_1)
..
* :meth:`_sql.ColumnOperators.startswith`::
The string containment operators
- >>> print(column('x').startswith('word'))
+ >>> print(column("x").startswith("word"))
x LIKE :x_1 || '%'
..
* :meth:`_sql.ColumnOperators.endswith`::
- >>> print(column('x').endswith('word'))
+ >>> print(column("x").endswith("word"))
x LIKE '%' || :x_1
..
* :meth:`_sql.ColumnOperators.contains`::
- >>> print(column('x').contains('word'))
+ >>> print(column("x").contains("word"))
x LIKE '%' || :x_1 || '%'
..
This is a dialect-specific operator that makes use of the MATCH
feature of the underlying database, if available::
- >>> print(column('x').match('word'))
+ >>> print(column("x").match("word"))
x MATCH :x_1
..
for example the PostgreSQL dialect::
>>> from sqlalchemy.dialects import postgresql
- >>> print(column('x').regexp_match('word').compile(dialect=postgresql.dialect()))
+ >>> print(column("x").regexp_match("word").compile(dialect=postgresql.dialect()))
x ~ %(x_1)s
Or MySQL::
>>> from sqlalchemy.dialects import mysql
- >>> print(column('x').regexp_match('word').compile(dialect=mysql.dialect()))
+ >>> print(column("x").regexp_match("word").compile(dialect=mysql.dialect()))
x REGEXP %s
..
String concatenation::
- >>> print(column('x').concat("some string"))
+ >>> print(column("x").concat("some string"))
x || :x_1
This operator is available via :meth:`_sql.ColumnOperators.__add__`, that
is, the Python ``+`` operator, when working with a column expression that
derives from :class:`_types.String`::
- >>> print(column('x', String) + "some string")
+ >>> print(column("x", String) + "some string")
x || :x_1
The operator will produce the appropriate database-specific construct,
such as on MySQL it's historically been the ``concat()`` SQL function::
- >>> print((column('x', String) + "some string").compile(dialect=mysql.dialect()))
+ >>> print((column("x", String) + "some string").compile(dialect=mysql.dialect()))
concat(x, %s)
..
Complementary to :meth:`_sql.ColumnOperators.regexp` this produces REGEXP
REPLACE equivalent for the backends which support it::
- >>> print(column('x').regexp_replace('foo', 'bar').compile(dialect=postgresql.dialect()))
+ >>> print(column("x").regexp_replace("foo", "bar").compile(dialect=postgresql.dialect()))
REGEXP_REPLACE(x, %(x_1)s, %(x_2)s)
..
Produces the COLLATE SQL operator which provides for specific collations
at expression time::
- >>> print((column('x').collate('latin1_german2_ci') == 'Müller').compile(dialect=mysql.dialect()))
+ >>> print(
+ ... (column("x").collate("latin1_german2_ci") == "Müller").compile(
+ ... dialect=mysql.dialect()
+ ... )
+ ... )
(x COLLATE latin1_german2_ci) = %s
>>> from sqlalchemy import literal
- >>> print((literal('Müller').collate('latin1_german2_ci') == column('x')).compile(dialect=mysql.dialect()))
+ >>> print(
+ ... (literal("Müller").collate("latin1_german2_ci") == column("x")).compile(
+ ... dialect=mysql.dialect()
+ ... )
+ ... )
(%s COLLATE latin1_german2_ci) = x
..
* :meth:`_sql.ColumnOperators.__add__`, :meth:`_sql.ColumnOperators.__radd__` (Python "``+``" operator)::
- >>> print(column('x') + 5)
+ >>> print(column("x") + 5)
x + :x_1
- >>> print(5 + column('x'))
+ >>> print(5 + column("x"))
:x_1 + x
..
* :meth:`_sql.ColumnOperators.__sub__`, :meth:`_sql.ColumnOperators.__rsub__` (Python "``-``" operator)::
- >>> print(column('x') - 5)
+ >>> print(column("x") - 5)
x - :x_1
- >>> print(5 - column('x'))
+ >>> print(5 - column("x"))
:x_1 - x
..
* :meth:`_sql.ColumnOperators.__mul__`, :meth:`_sql.ColumnOperators.__rmul__` (Python "``*``" operator)::
- >>> print(column('x') * 5)
+ >>> print(column("x") * 5)
x * :x_1
- >>> print(5 * column('x'))
+ >>> print(5 * column("x"))
:x_1 * x
..
* :meth:`_sql.ColumnOperators.__truediv__`, :meth:`_sql.ColumnOperators.__rtruediv__` (Python "``/``" operator).
This is the Python ``truediv`` operator, which will ensure integer true division occurs::
- >>> print(column('x') / 5)
+ >>> print(column("x") / 5)
x / CAST(:x_1 AS NUMERIC)
- >>> print(5 / column('x'))
+ >>> print(5 / column("x"))
:x_1 / CAST(x AS NUMERIC)
.. versionchanged:: 2.0 The Python ``/`` operator now ensures integer true division takes place
For the default backend as well as backends such as PostgreSQL, the SQL ``/`` operator normally
behaves this way for integer values::
- >>> print(column('x') // 5)
+ >>> print(column("x") // 5)
x / :x_1
- >>> print(5 // column('x', Integer))
+ >>> print(5 // column("x", Integer))
:x_1 / x
For backends that don't use floor division by default, or when used with numeric values,
the FLOOR() function is used to ensure floor division::
- >>> print(column('x') // 5.5)
+ >>> print(column("x") // 5.5)
FLOOR(x / :x_1)
- >>> print(5 // column('x', Numeric))
+ >>> print(5 // column("x", Numeric))
FLOOR(:x_1 / x)
.. versionadded:: 2.0 Support for FLOOR division
* :meth:`_sql.ColumnOperators.__mod__`, :meth:`_sql.ColumnOperators.__rmod__` (Python "``%``" operator)::
- >>> print(column('x') % 5)
+ >>> print(column("x") % 5)
x % :x_1
- >>> print(5 % column('x'))
+ >>> print(5 % column("x"))
:x_1 % x
..
:meth:`_sql.Update.where` and :meth:`_sql.Delete.where`::
>>> print(
- ... select(address_table.c.email_address).
- ... where(user_table.c.name == 'squidward').
- ... where(address_table.c.user_id == user_table.c.id)
- ... )
+ ... select(address_table.c.email_address)
+ ... .where(user_table.c.name == "squidward")
+ ... .where(address_table.c.user_id == user_table.c.id)
+ ... )
SELECT address.email_address
FROM address, user_account
WHERE user_account.name = :name_1 AND address.user_id = user_account.id
:meth:`_sql.Select.where`, :meth:`_sql.Update.where` and :meth:`_sql.Delete.where` also accept multiple expressions with the same effect::
>>> print(
- ... select(address_table.c.email_address).
- ... where(
- ... user_table.c.name == 'squidward',
- ... address_table.c.user_id == user_table.c.id
- ... )
- ... )
+ ... select(address_table.c.email_address).where(
+ ... user_table.c.name == "squidward",
+ ... address_table.c.user_id == user_table.c.id,
+ ... )
+ ... )
SELECT address.email_address
FROM address, user_account
WHERE user_account.name = :name_1 AND address.user_id = user_account.id
>>> from sqlalchemy import and_, or_
>>> print(
- ... select(address_table.c.email_address).
- ... where(
+ ... select(address_table.c.email_address).where(
... and_(
- ... or_(user_table.c.name == 'squidward', user_table.c.name == 'sandy'),
- ... address_table.c.user_id == user_table.c.id
+ ... or_(user_table.c.name == "squidward", user_table.c.name == "sandy"),
+ ... address_table.c.user_id == user_table.c.id,
... )
... )
... )
typically invert the operator in a boolean expression::
>>> from sqlalchemy import not_
- >>> print(not_(column('x') == 5))
+ >>> print(not_(column("x") == 5))
x != :x_1
It also may apply a keyword such as ``NOT`` when appropriate::
>>> from sqlalchemy import Boolean
- >>> print(not_(column('x', Boolean)))
+ >>> print(not_(column("x", Boolean)))
NOT x
The Python binary ``&`` operator is overloaded to behave the same
as :func:`_sql.and_` (note parenthesis around the two operands)::
- >>> print((column('x') == 5) & (column('y') == 10))
+ >>> print((column("x") == 5) & (column("y") == 10))
x = :x_1 AND y = :y_1
..
The Python binary ``|`` operator is overloaded to behave the same
as :func:`_sql.or_` (note parenthesis around the two operands)::
- >>> print((column('x') == 5) | (column('y') == 10))
+ >>> print((column("x") == 5) | (column("y") == 10))
x = :x_1 OR y = :y_1
..
as :func:`_sql.not_`, either inverting the existing operator, or
applying the ``NOT`` keyword to the expression as a whole::
- >>> print(~(column('x') == 5))
+ >>> print(~(column("x") == 5))
x != :x_1
>>> from sqlalchemy import Boolean
- >>> print(~column('x', Boolean))
+ >>> print(~column("x", Boolean))
NOT x
..
.. Setup code, not for display
>>> conn.close()
- ROLLBACK
\ No newline at end of file
+ ROLLBACK
``pool_size``, ``max_overflow``, ``pool_recycle`` and
``pool_timeout``. For example::
- engine = create_engine('postgresql+psycopg2://me@localhost/mydb',
- pool_size=20, max_overflow=0)
+ engine = create_engine(
+ "postgresql+psycopg2://me@localhost/mydb", pool_size=20, max_overflow=0
+ )
All SQLAlchemy pool implementations have in common
that none of them "pre create" connections - all implementations wait
the :class:`.NullPool` implementation::
from sqlalchemy.pool import NullPool
+
engine = create_engine(
- 'postgresql+psycopg2://scott:tiger@localhost/test',
- poolclass=NullPool)
+ "postgresql+psycopg2://scott:tiger@localhost/test", poolclass=NullPool
+ )
Using a Custom Connection Function
----------------------------------
import sqlalchemy.pool as pool
import psycopg2
+
def getconn():
- c = psycopg2.connect(user='ed', host='127.0.0.1', dbname='test')
+ c = psycopg2.connect(user="ed", host="127.0.0.1", dbname="test")
return c
+
mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5)
DBAPI connections can then be procured from the pool using the
some_engine = create_engine(...)
+
@event.listens_for(some_engine, "engine_connect")
def ping_connection(connection, branch):
if branch:
illustrated by the code example below::
from sqlalchemy import create_engine, exc
+
e = create_engine(...)
c = e.connect()
period of time::
from sqlalchemy import create_engine
+
e = create_engine("mysql+mysqldb://scott:tiger@localhost/test", pool_recycle=3600)
Above, any DBAPI connection that has been open for more than one hour will be invalidated and replaced,
basically whether or not its desirable for the pool to keep a full set of
connections ready to go even during idle periods::
- engine = create_engine(
- "postgreql://", pool_use_lifo=True, pool_pre_ping=True)
+ engine = create_engine("postgreql://", pool_use_lifo=True, pool_pre_ping=True)
Above, we also make use of the :paramref:`_sa.create_engine.pool_pre_ping` flag
so that connections which are closed from the server side are gracefully
more than once::
from sqlalchemy.pool import NullPool
- engine = create_engine("mysql+mysqldb://user:pass@host/dbname", poolclass=NullPool)
+ engine = create_engine("mysql+mysqldb://user:pass@host/dbname", poolclass=NullPool)
2. Call :meth:`_engine.Engine.dispose` on any given :class:`_engine.Engine`,
passing the :paramref:`.Engine.dispose.close` parameter with a value of
engine = create_engine("...")
+
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
- connection_record.info['pid'] = os.getpid()
+ connection_record.info["pid"] = os.getpid()
+
@event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
- if connection_record.info['pid'] != pid:
+ if connection_record.info["pid"] != pid:
connection_record.dbapi_connection = connection_proxy.dbapi_connection = None
raise exc.DisconnectionError(
- "Connection record belongs to pid %s, "
- "attempting to check out in pid %s" %
- (connection_record.info['pid'], pid)
+ "Connection record belongs to pid %s, "
+ "attempting to check out in pid %s" % (connection_record.info["pid"], pid)
)
Above, we use an approach similar to that described in
most simple case you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData`
object, and the ``autoload_with`` argument::
- >>> messages = Table('messages', metadata_obj, autoload_with=engine)
+ >>> messages = Table("messages", metadata_obj, autoload_with=engine)
>>> [c.name for c in messages.columns]
['message_id', 'message_name', 'date']
``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the
effect such that the ``shopping_carts`` table will also be loaded::
- >>> shopping_cart_items = Table('shopping_cart_items', metadata_obj, autoload_with=engine)
- >>> 'shopping_carts' in metadata_obj.tables:
+ >>> shopping_cart_items = Table("shopping_cart_items", metadata_obj, autoload_with=engine)
+ >>> "shopping_carts" in metadata_obj.tables
True
The :class:`~sqlalchemy.schema.MetaData` has an interesting "singleton-like"
already exists with the given name. Such as below, we can access the already
generated ``shopping_carts`` table just by naming it::
- shopping_carts = Table('shopping_carts', metadata_obj)
+ shopping_carts = Table("shopping_carts", metadata_obj)
Of course, it's a good idea to use ``autoload_with=engine`` with the above table
regardless. This is so that the table's attributes will be loaded if they have
tables; this is handy for specifying custom datatypes, constraints such as
primary keys that may not be configured within the database, etc.::
- >>> mytable = Table('mytable', metadata_obj,
- ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key
- ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode
- ... # additional Column objects which require no change are reflected normally
- ... autoload_with=some_engine)
+ >>> mytable = Table(
+ ... "mytable",
+ ... metadata_obj,
+ ... Column(
+ ... "id", Integer, primary_key=True
+ ... ), # override reflected 'id' to have primary key
+ ... Column("mydata", Unicode(50)), # override reflected 'mydata' to be Unicode
+ ... # additional Column objects which require no change are reflected normally
+ ... autoload_with=some_engine,
+ ... )
.. seealso::
Use the "override" technique for this, specifying explicitly those columns
which are part of the primary key or have foreign key constraints::
- my_view = Table("some_view", metadata,
- Column("view_id", Integer, primary_key=True),
- Column("related_thing", Integer, ForeignKey("othertable.thing_id")),
- autoload_with=engine
+ my_view = Table(
+ "some_view",
+ metadata,
+ Column("view_id", Integer, primary_key=True),
+ Column("related_thing", Integer, ForeignKey("othertable.thing_id")),
+ autoload_with=engine,
)
Reflecting All Tables at Once
metadata_obj = MetaData()
metadata_obj.reflect(bind=someengine)
- users_table = metadata_obj.tables['users']
- addresses_table = metadata_obj.tables['addresses']
+ users_table = metadata_obj.tables["users"]
+ addresses_table = metadata_obj.tables["addresses"]
``metadata.reflect()`` also provides a handy way to clear or delete all the rows in a database::
schema will be reflected, and they will be populated as schema-qualified
with that name::
- >>> metadata_obj.tables['project.messages']
+ >>> metadata_obj.tables["project.messages"]
Table('messages', MetaData(), Column('message_id', INTEGER(), table=<messages>), schema='project')
Similarly, an individual :class:`_schema.Table` object that includes the
database schema, overriding any default schema that may have been configured on the
owning :class:`_schema.MetaData` collection::
- >>> messages = Table('messages', metadata_obj, schema="project", autoload_with=someengine)
+ >>> messages = Table("messages", metadata_obj, schema="project", autoload_with=someengine)
>>> messages
Table('messages', MetaData(), Column('message_id', INTEGER(), table=<messages>), schema='project')
>>> # reflect in non-schema qualified fashion
>>> messages_table_1 = Table("messages", metadata_obj, autoload_with=someengine)
>>> # reflect in schema qualified fashion
- >>> messages_table_2 = Table("messages", metadata_obj, schema="project", autoload_with=someengine)
+ >>> messages_table_2 = Table(
+ ... "messages", metadata_obj, schema="project", autoload_with=someengine
+ ... )
>>> # two different objects
>>> messages_table_1 is messages_table_2
False
qualified fashion::
>>> # reflect "messages" in a schema qualified fashion
- >>> messages_table_1 = Table("messages", metadata_obj, schema="project", autoload_with=someengine)
+ >>> messages_table_1 = Table(
+ ... "messages", metadata_obj, schema="project", autoload_with=someengine
+ ... )
The above ``messages_table_1`` will refer to ``projects`` also in a schema
qualified fashion. This "projects" table will be reflected automatically by
>>> messages_table_1.c.project_id.references(projects_table_1.c.project_id)
False
- >>> it refers to this one
+ >>> # it refers to this one
>>> projects_table_2 = metadata_obj.tables["project.projects"]
>>> messages_table_1.c.project_id.references(projects_table_2.c.project_id)
True
- >>> they're different, as one non-schema qualified and the other one is
+ >>> # they're different, as one non-schema qualified and the other one is
>>> projects_table_1 is projects_table_2
False
from sqlalchemy import create_engine
from sqlalchemy import inspect
- engine = create_engine('...')
+
+ engine = create_engine("...")
insp = inspect(engine)
print(insp.get_table_names())
>>> metadata_obj = MetaData()
>>> @event.listens_for(metadata_obj, "column_reflect")
- >>> def genericize_datatypes(inspector, tablename, column_dict):
+ ... def genericize_datatypes(inspector, tablename, column_dict):
... column_dict["type"] = column_dict["type"].as_generic()
>>> my_generic_table = Table("my_table", metadata_obj, autoload_with=mysql_engine)
metadata_obj = MetaData()
user = Table(
- 'user',
+ "user",
metadata_obj,
- Column('user_name', String, primary_key=True),
- Column('email_address', String(60)),
+ Column("user_name", String, primary_key=True),
+ Column("email_address", String(60)),
)
When using a particular :class:`_types.TypeEngine` class in a
valid with SQL server are importable from the top level dialect, whether
they originate from :mod:`sqlalchemy.types` or from the local dialect::
- from sqlalchemy.dialects.mssql import \
- BIGINT, BINARY, BIT, CHAR, DATE, DATETIME, DATETIME2, \
- DATETIMEOFFSET, DECIMAL, FLOAT, IMAGE, INTEGER, JSON, MONEY, \
- NCHAR, NTEXT, NUMERIC, NVARCHAR, REAL, SMALLDATETIME, \
- SMALLINT, SMALLMONEY, SQL_VARIANT, TEXT, TIME, \
- TIMESTAMP, TINYINT, UNIQUEIDENTIFIER, VARBINARY, VARCHAR
+ from sqlalchemy.dialects.mssql import (
+ BIGINT,
+ BINARY,
+ BIT,
+ CHAR,
+ DATE,
+ DATETIME,
+ DATETIME2,
+ DATETIMEOFFSET,
+ DECIMAL,
+ FLOAT,
+ IMAGE,
+ INTEGER,
+ JSON,
+ MONEY,
+ NCHAR,
+ NTEXT,
+ NUMERIC,
+ NVARCHAR,
+ REAL,
+ SMALLDATETIME,
+ SMALLINT,
+ SMALLMONEY,
+ SQL_VARIANT,
+ TEXT,
+ TIME,
+ TIMESTAMP,
+ TINYINT,
+ UNIQUEIDENTIFIER,
+ VARBINARY,
+ VARCHAR,
+ )
Types which are specific to SQL Server, or have SQL Server-specific
construction arguments, are as follows:
As with all SQLAlchemy dialects, all UPPERCASE types that are known to be
valid with MySQL are importable from the top level dialect::
- from sqlalchemy.dialects.mysql import \
- BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \
- DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \
- LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \
- NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \
- TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR
+ from sqlalchemy.dialects.mysql import (
+ BIGINT,
+ BINARY,
+ BIT,
+ BLOB,
+ BOOLEAN,
+ CHAR,
+ DATE,
+ DATETIME,
+ DECIMAL,
+ DECIMAL,
+ DOUBLE,
+ ENUM,
+ FLOAT,
+ INTEGER,
+ LONGBLOB,
+ LONGTEXT,
+ MEDIUMBLOB,
+ MEDIUMINT,
+ MEDIUMTEXT,
+ NCHAR,
+ NUMERIC,
+ NVARCHAR,
+ REAL,
+ SET,
+ SMALLINT,
+ TEXT,
+ TIME,
+ TIMESTAMP,
+ TINYBLOB,
+ TINYINT,
+ TINYTEXT,
+ VARBINARY,
+ VARCHAR,
+ YEAR,
+ )
Types which are specific to MySQL, or have MySQL-specific
construction arguments, are as follows:
valid with Oracle are importable from the top level dialect, whether
they originate from :mod:`sqlalchemy.types` or from the local dialect::
- from sqlalchemy.dialects.oracle import \
- BFILE, BLOB, CHAR, CLOB, DATE, \
- DOUBLE_PRECISION, FLOAT, INTERVAL, LONG, NCLOB, NCHAR, \
- NUMBER, NVARCHAR, NVARCHAR2, RAW, TIMESTAMP, VARCHAR, \
- VARCHAR2
+ from sqlalchemy.dialects.oracle import (
+ BFILE,
+ BLOB,
+ CHAR,
+ CLOB,
+ DATE,
+ DOUBLE_PRECISION,
+ FLOAT,
+ INTERVAL,
+ LONG,
+ NCLOB,
+ NCHAR,
+ NUMBER,
+ NVARCHAR,
+ NVARCHAR2,
+ RAW,
+ TIMESTAMP,
+ VARCHAR,
+ VARCHAR2,
+ )
.. versionadded:: 1.2.19 Added :class:`_types.NCHAR` to the list of datatypes
exported by the Oracle dialect.
from sqlalchemy import TypeDecorator
from sqlalchemy.dialects.postgresql import ARRAY
+
class ArrayOfEnum(TypeDecorator):
impl = ARRAY
return sa.cast(bindvalue, self)
def result_processor(self, dialect, coltype):
- super_rp = super(ArrayOfEnum, self).result_processor(
- dialect, coltype)
+ super_rp = super(ArrayOfEnum, self).result_processor(dialect, coltype)
def handle_raw_string(value):
inner = re.match(r"^{(.*)}$", value).group(1)
if value is None:
return None
return super_rp(handle_raw_string(value))
+
return process
E.g.::
E.g.::
Table(
- 'mydata', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', CastingArray(JSONB))
+ "mydata",
+ metadata,
+ Column("id", Integer, primary_key=True),
+ Column("data", CastingArray(JSONB)),
)
.. _postgresql_ranges:
E.g. an example of a fully typed model using the
:class:`_postgresql.TSRANGE` datatype::
- from datetime import datetime
+ from datetime import datetime
- from sqlalchemy.dialects.postgresql import Range
- from sqlalchemy.dialects.postgresql import TSRANGE
- from sqlalchemy.orm import DeclarativeBase
- from sqlalchemy.orm import Mapped
- from sqlalchemy.orm import mapped_column
+ from sqlalchemy.dialects.postgresql import Range
+ from sqlalchemy.dialects.postgresql import TSRANGE
+ from sqlalchemy.orm import DeclarativeBase
+ from sqlalchemy.orm import Mapped
+ from sqlalchemy.orm import mapped_column
+
+
+ class Base(DeclarativeBase):
+ pass
- class Base(DeclarativeBase):
- pass
- class RoomBooking(Base):
+ class RoomBooking(Base):
- __tablename__ = 'room_booking'
+ __tablename__ = "room_booking"
- id: Mapped[int] = mapped_column(primary_key=True)
- room: Mapped[str]
- during: Mapped[Range[datetime]] = mapped_column(TSRANGE)
+ id: Mapped[int] = mapped_column(primary_key=True)
+ room: Mapped[str]
+ during: Mapped[Range[datetime]] = mapped_column(TSRANGE)
To represent data for the ``during`` column above, the :class:`_postgresql.Range`
type is a simple dataclass that will represent the bounds of the range.
Below illustrates an INSERT of a row into the above ``room_booking`` table::
- from sqlalchemy import create_engine
- from sqlalchemy.orm import Session
+ from sqlalchemy import create_engine
+ from sqlalchemy.orm import Session
- engine = create_engine("postgresql+psycopg://scott:tiger@pg14/dbname")
+ engine = create_engine("postgresql+psycopg://scott:tiger@pg14/dbname")
- Base.metadata.create_all(engine)
+ Base.metadata.create_all(engine)
- with Session(engine) as session:
- booking = RoomBooking(
- room="101",
- during=Range(datetime(2013, 3, 23), datetime(2013, 3, 25))
- )
- session.add(booking)
- session.commit()
+ with Session(engine) as session:
+ booking = RoomBooking(
+ room="101", during=Range(datetime(2013, 3, 23), datetime(2013, 3, 25))
+ )
+ session.add(booking)
+ session.commit()
Selecting from any range column will also return :class:`_postgresql.Range`
objects as indicated::
- from sqlalchemy import select
+ from sqlalchemy import select
- with Session(engine) as session:
- for row in session.execute(select(RoomBooking.during)):
- print(row)
+ with Session(engine) as session:
+ for row in session.execute(select(RoomBooking.during)):
+ print(row)
The available range datatypes are as follows:
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
+
class Base(DeclarativeBase):
pass
+
class EventCalendar(Base):
- __tablename__ = 'event_calendar'
+ __tablename__ = "event_calendar"
id: Mapped[int] = mapped_column(primary_key=True)
event_name: Mapped[str]
with Session(engine) as session:
calendar = EventCalendar(
event_name="SQLAlchemy Tutorial Sessions",
- in_session_periods= [
+ in_session_periods=[
Range(datetime(2013, 3, 23), datetime(2013, 3, 25)),
Range(datetime(2013, 4, 12), datetime(2013, 4, 15)),
Range(datetime(2013, 5, 9), datetime(2013, 5, 12)),
- ]
+ ],
)
session.add(calendar)
session.commit()
valid with PostgreSQL are importable from the top level dialect, whether
they originate from :mod:`sqlalchemy.types` or from the local dialect::
- from sqlalchemy.dialects.postgresql import \
- ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, \
- DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER, \
- INTERVAL, JSON, JSONB, MACADDR, MONEY, NUMERIC, OID, REAL, SMALLINT, TEXT, \
- TIME, TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE, \
- DATERANGE, TSRANGE, TSTZRANGE, TSVECTOR
+ from sqlalchemy.dialects.postgresql import (
+ ARRAY,
+ BIGINT,
+ BIT,
+ BOOLEAN,
+ BYTEA,
+ CHAR,
+ CIDR,
+ DATE,
+ DOUBLE_PRECISION,
+ ENUM,
+ FLOAT,
+ HSTORE,
+ INET,
+ INTEGER,
+ INTERVAL,
+ JSON,
+ JSONB,
+ MACADDR,
+ MONEY,
+ NUMERIC,
+ OID,
+ REAL,
+ SMALLINT,
+ TEXT,
+ TIME,
+ TIMESTAMP,
+ UUID,
+ VARCHAR,
+ INT4RANGE,
+ INT8RANGE,
+ NUMRANGE,
+ DATERANGE,
+ TSRANGE,
+ TSTZRANGE,
+ TSVECTOR,
+ )
Types which are specific to PostgreSQL, or have PostgreSQL-specific
construction arguments, are as follows:
For example::
- from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE
+ from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE
+
- class RoomBooking(Base):
+ class RoomBooking(Base):
- __tablename__ = 'room_booking'
+ __tablename__ = "room_booking"
- room = Column(Integer(), primary_key=True)
- during = Column(TSRANGE())
+ room = Column(Integer(), primary_key=True)
+ during = Column(TSRANGE())
- __table_args__ = (
- ExcludeConstraint(('room', '='), ('during', '&&')),
- )
+ __table_args__ = (ExcludeConstraint(("room", "="), ("during", "&&")),)
PostgreSQL DML Constructs
-------------------------
valid with SQLite are importable from the top level dialect, whether
they originate from :mod:`sqlalchemy.types` or from the local dialect::
- from sqlalchemy.dialects.sqlite import \
- BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \
- INTEGER, NUMERIC, JSON, SMALLINT, TEXT, TIME, TIMESTAMP, \
- VARCHAR
+ from sqlalchemy.dialects.sqlite import (
+ BLOB,
+ BOOLEAN,
+ CHAR,
+ DATE,
+ DATETIME,
+ DECIMAL,
+ FLOAT,
+ INTEGER,
+ NUMERIC,
+ JSON,
+ SMALLINT,
+ TEXT,
+ TIME,
+ TIMESTAMP,
+ VARCHAR,
+ )
.. module:: sqlalchemy.dialects.sqlite
directly, such as when we use ``print()``::
>>> from sqlalchemy import column
- >>> print(column('x') == 5)
+ >>> print(column("x") == 5)
x = :x_1
When the above SQL expression is stringified, the :class:`.StrSQLCompiler`
>>> from sqlalchemy.dialects.postgresql import insert
>>> from sqlalchemy import table, column
- >>> my_table = table('my_table', column('x'), column('y'))
- >>> insert_stmt = insert(my_table).values(x='foo')
- >>> insert_stmt = insert_stmt.on_conflict_do_nothing(
- ... index_elements=['y']
- ... )
+ >>> my_table = table("my_table", column("x"), column("y"))
+ >>> insert_stmt = insert(my_table).values(x="foo")
+ >>> insert_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["y"])
>>> print(insert_stmt)
Traceback (most recent call last):
declarative such as::
class Bar(Base):
- __tablename__ = 'bar'
+ __tablename__ = "bar"
id = Column(Integer, primary_key=True)
cprop = deferred(Column(Integer))
- __table_args__ = (
- CheckConstraint(cprop > 5),
- )
+ __table_args__ = (CheckConstraint(cprop > 5),)
Above, the ``cprop`` attribute is used inline before it has been mapped,
however this ``cprop`` attribute is not a :class:`_schema.Column`,
:attr:`.ColumnProperty.expression` attribute::
class Bar(Base):
- __tablename__ = 'bar'
+ __tablename__ = "bar"
id = Column(Integer, primary_key=True)
cprop = deferred(Column(Integer))
- __table_args__ = (
- CheckConstraint(cprop.expression > 5),
- )
-
-
+ __table_args__ = (CheckConstraint(cprop.expression > 5),)
.. _error_cd3x:
implicitly or explicitly and does not provide a value when the statement
is executed::
- stmt = select(table.c.column).where(table.c.id == bindparam('my_param'))
+ stmt = select(table.c.column).where(table.c.id == bindparam("my_param"))
- result = conn.execute(stmt)
+ result = conn.execute(stmt)
Above, no value has been provided for the parameter "my_param". The correct
approach is to provide a value::
- result = conn.execute(stmt, my_param=12)
+ result = conn.execute(stmt, my_param=12)
When the message takes the form "a value is required for bind parameter <x>
in parameter group <y>", the message is referring to the "executemany" style
set of parameters in the list. As the second entry does not contain "b",
this error is generated::
- m = MetaData()
- t = Table(
- 't', m,
- Column('a', Integer),
- Column('b', Integer),
- Column('c', Integer)
- )
-
- e.execute(
- t.insert(), [
- {"a": 1, "b": 2, "c": 3},
- {"a": 2, "c": 4},
- {"a": 3, "b": 4, "c": 5},
- ]
- )
+ m = MetaData()
+ t = Table("t", m, Column("a", Integer), Column("b", Integer), Column("c", Integer))
+
+ e.execute(
+ t.insert(),
+ [
+ {"a": 1, "b": 2, "c": 3},
+ {"a": 2, "c": 4},
+ {"a": 3, "b": 4, "c": 5},
+ ],
+ )
+
+.. code-block::
sqlalchemy.exc.StatementError: (sqlalchemy.exc.InvalidRequestError)
A value is required for bind parameter 'b', in parameter group 1
Since "b" is required, pass it as ``None`` so that the INSERT may proceed::
- e.execute(
- t.insert(), [
- {"a": 1, "b": 2, "c": 3},
- {"a": 2, "b": None, "c": 4},
- {"a": 3, "b": 4, "c": 5},
- ]
- )
+ e.execute(
+ t.insert(),
+ [
+ {"a": 1, "b": 2, "c": 3},
+ {"a": 2, "b": None, "c": 4},
+ {"a": 3, "b": 4, "c": 5},
+ ],
+ )
.. seealso::
Given an example as::
m = MetaData()
- t = Table(
- 't', m,
- Column('a', Integer),
- Column('b', Integer),
- Column('c', Integer)
- )
+ t = Table("t", m, Column("a", Integer), Column("b", Integer), Column("c", Integer))
stmt = select(t)
Above, ``stmt`` represents a SELECT statement. The error is produced when we want
a1 = Address.__table__
- q = s.query(User).\
- join(a1, User.addresses).\
- filter(Address.email_address == 'ed@foo.com').all()
-
+ q = (
+ s.query(User)
+ .join(a1, User.addresses)
+ .filter(Address.email_address == "ed@foo.com")
+ .all()
+ )
The above pattern also allows an arbitrary selectable, such as
a Core :class:`_sql.Join` or :class:`_sql.Alias` object,
a1 = Address.__table__.alias()
- q = s.query(User).\
- join(a1, User.addresses).\
- filter(a1.c.email_address == 'ed@foo.com').all()
+ q = (
+ s.query(User)
+ .join(a1, User.addresses)
+ .filter(a1.c.email_address == "ed@foo.com")
+ .all()
+ )
The correct way to specify a join target is always by using the mapped
class itself or an :class:`_orm.aliased` object, in the latter case using the
:meth:`_orm.PropComparator.of_type` modifier to set up an alias::
# normal join to relationship entity
- q = s.query(User).\
- join(User.addresses).\
- filter(Address.email_address == 'ed@foo.com')
+ q = s.query(User).join(User.addresses).filter(Address.email_address == "ed@foo.com")
# name Address target explicitly, not necessary but legal
- q = s.query(User).\
- join(Address, User.addresses).\
- filter(Address.email_address == 'ed@foo.com')
+ q = (
+ s.query(User)
+ .join(Address, User.addresses)
+ .filter(Address.email_address == "ed@foo.com")
+ )
Join to an alias::
a1 = aliased(Address)
# of_type() form; recommended
- q = s.query(User).\
- join(User.addresses.of_type(a1)).\
- filter(a1.email_address == 'ed@foo.com')
+ q = (
+ s.query(User)
+ .join(User.addresses.of_type(a1))
+ .filter(a1.email_address == "ed@foo.com")
+ )
# target, onclause form
- q = s.query(User).\
- join(a1, User.addresses).\
- filter(a1.email_address == 'ed@foo.com')
-
+ q = s.query(User).join(a1, User.addresses).filter(a1.email_address == "ed@foo.com")
.. _error_xaj2:
of the join. For example given a joined inheritance mapping as::
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = Column(Integer, primary_key=True)
manager_id = Column(ForeignKey("manager.id"))
name = Column(String(50))
reports_to = relationship("Manager", foreign_keys=manager_id)
__mapper_args__ = {
- 'polymorphic_identity':'employee',
- 'polymorphic_on':type,
+ "polymorphic_identity": "employee",
+ "polymorphic_on": type,
}
+
class Manager(Employee):
- __tablename__ = 'manager'
- id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "manager"
+ id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
__mapper_args__ = {
- 'polymorphic_identity':'manager',
- 'inherit_condition': id == Employee.id
+ "polymorphic_identity": "manager",
+ "inherit_condition": id == Employee.id,
}
The above mapping includes a relationship between the ``Employee`` and
If we then wanted to use :func:`_orm.contains_eager` to populate the
``reports_to`` attribute, we refer to the alias::
- >>> stmt =select(Employee).join(
- ... Employee.reports_to.of_type(manager_alias)
- ... ).options(
- ... contains_eager(Employee.reports_to.of_type(manager_alias))
+ >>> stmt = (
+ ... select(Employee)
+ ... .join(Employee.reports_to.of_type(manager_alias))
+ ... .options(contains_eager(Employee.reports_to.of_type(manager_alias)))
... )
Without using the explicit :func:`_orm.aliased` object, in some more nested
# configuration step occurs
a = relationship("A", back_populates="bs", cascade="all, delete-orphan")
+
configure_mappers()
Above, the "delete-orphan" setting on ``B.a`` indicates the intent that
For the typical example that's missing
:paramref:`_orm.relationship.back_populates`, given the following mapping::
- class Parent(Base):
- __tablename__ = "parent"
- id = Column(Integer, primary_key=True)
- children = relationship("Child")
+ class Parent(Base):
+ __tablename__ = "parent"
+ id = Column(Integer, primary_key=True)
+ children = relationship("Child")
- class Child(Base):
- __tablename__ = "child"
- id = Column(Integer, primary_key=True)
- parent_id = Column(ForeignKey("parent.id"))
- parent = relationship("Parent")
+ class Child(Base):
+ __tablename__ = "child"
+ id = Column(Integer, primary_key=True)
+ parent_id = Column(ForeignKey("parent.id"))
+ parent = relationship("Parent")
The above mapping will generate warnings::
The solution is to apply :paramref:`_orm.relationship.back_populates`::
class Parent(Base):
- __tablename__ = "parent"
- id = Column(Integer, primary_key=True)
- children = relationship("Child", back_populates="parent")
+ __tablename__ = "parent"
+ id = Column(Integer, primary_key=True)
+ children = relationship("Child", back_populates="parent")
- class Child(Base):
- __tablename__ = "child"
- id = Column(Integer, primary_key=True)
- parent_id = Column(ForeignKey("parent.id"))
- parent = relationship("Parent", back_populates="children")
+ class Child(Base):
+ __tablename__ = "child"
+ id = Column(Integer, primary_key=True)
+ parent_id = Column(ForeignKey("parent.id"))
+ parent = relationship("Parent", back_populates="children")
For more customized relationships where an "overlap" situation may be
intentional and cannot be resolved, the :paramref:`_orm.relationship.overlaps`
:paramref:`_orm.relationship.primaryjoin` conditions that limit the related
items in each case::
- class Parent(Base):
- __tablename__ = "parent"
- id = Column(Integer, primary_key=True)
- c1 = relationship(
- "Child",
- primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 0)",
- backref="parent",
- overlaps="c2, parent"
- )
- c2 = relationship(
- "Child",
- primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 1)",
- overlaps="c1, parent"
- )
-
+ class Parent(Base):
+ __tablename__ = "parent"
+ id = Column(Integer, primary_key=True)
+ c1 = relationship(
+ "Child",
+ primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 0)",
+ backref="parent",
+ overlaps="c2, parent",
+ )
+ c2 = relationship(
+ "Child",
+ primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 1)",
+ overlaps="c1, parent",
+ )
- class Child(Base):
- __tablename__ = "child"
- id = Column(Integer, primary_key=True)
- parent_id = Column(ForeignKey("parent.id"))
- flag = Column(Integer)
+ class Child(Base):
+ __tablename__ = "child"
+ id = Column(Integer, primary_key=True)
+ parent_id = Column(ForeignKey("parent.id"))
+ flag = Column(Integer)
Above, the ORM will know that the overlap between ``Parent.c1``,
``Parent.c2`` and ``Child.parent`` is intentional.
# result internally pre-fetches all objects
result = sess.execute(
- select(User).where(User.id == 7),
- execution_options={"prebuffer_rows": True}
+ select(User).where(User.id == 7), execution_options={"prebuffer_rows": True}
)
# context manager is closed, so session_obj above is closed, identity
the :meth:`.Executable.execute` method directly off of a Core expression object
that is not associated with any :class:`_engine.Engine`::
- metadata_obj = MetaData()
- table = Table('t', metadata_obj, Column('q', Integer))
+ metadata_obj = MetaData()
+ table = Table("t", metadata_obj, Column("q", Integer))
- stmt = select(table)
- result = stmt.execute() # <--- raises
+ stmt = select(table)
+ result = stmt.execute() # <--- raises
What the logic is expecting is that the :class:`_schema.MetaData` object has
been **bound** to a :class:`_engine.Engine`::
- engine = create_engine("mysql+pymysql://user:pass@host/db")
- metadata_obj = MetaData(bind=engine)
+ engine = create_engine("mysql+pymysql://user:pass@host/db")
+ metadata_obj = MetaData(bind=engine)
Where above, any statement that derives from a :class:`_schema.Table` which
in turn derives from that :class:`_schema.MetaData` will implicitly make use of
The correct way to invoke statements is via
the :meth:`_engine.Connection.execute` method of a :class:`_engine.Connection`::
- with engine.connect() as conn:
- result = conn.execute(stmt)
+ with engine.connect() as conn:
+ result = conn.execute(stmt)
When using the ORM, a similar facility is available via the :class:`.Session`::
- result = session.execute(stmt)
+ result = session.execute(stmt)
.. seealso::
The :func:`_sa.create_engine` call accepts additional arguments either
directly via the ``connect_args`` keyword argument::
- e = create_engine("mysql+mysqldb://scott:tiger@localhost/test",
- connect_args={"encoding": "utf8"})
+ e = create_engine(
+ "mysql+mysqldb://scott:tiger@localhost/test", connect_args={"encoding": "utf8"}
+ )
Or for basic string and integer arguments, they can usually be specified
in the query string of the URL::
statement executions::
- import time
-
- from sqlalchemy import event
-
-
- def reconnecting_engine(engine, num_retries, retry_interval):
- def _run_with_retries(fn, context, cursor_obj, statement, *arg, **kw):
- for retry in range(num_retries + 1):
- try:
- fn(cursor_obj, statement, context=context, *arg)
- except engine.dialect.dbapi.Error as raw_dbapi_err:
- connection = context.root_connection
- if engine.dialect.is_disconnect(
- raw_dbapi_err, connection, cursor_obj
- ):
- if retry > num_retries:
- raise
- engine.logger.error(
- "disconnection error, retrying operation",
- exc_info=True,
- )
- connection.invalidate()
-
- # use SQLAlchemy 2.0 API if available
- if hasattr(connection, "rollback"):
- connection.rollback()
- else:
- trans = connection.get_transaction()
- if trans:
- trans.rollback()
-
- time.sleep(retry_interval)
- context.cursor = cursor_obj = connection.connection.cursor()
- else:
- raise
- else:
- return True
-
- e = engine.execution_options(isolation_level="AUTOCOMMIT")
-
- @event.listens_for(e, "do_execute_no_params")
- def do_execute_no_params(cursor_obj, statement, context):
- return _run_with_retries(
- context.dialect.do_execute_no_params, context, cursor_obj, statement
- )
-
- @event.listens_for(e, "do_execute")
- def do_execute(cursor_obj, statement, parameters, context):
- return _run_with_retries(
- context.dialect.do_execute, context, cursor_obj, statement, parameters
- )
-
- return e
+ import time
+
+ from sqlalchemy import event
+
+
+ def reconnecting_engine(engine, num_retries, retry_interval):
+ def _run_with_retries(fn, context, cursor_obj, statement, *arg, **kw):
+ for retry in range(num_retries + 1):
+ try:
+ fn(cursor_obj, statement, context=context, *arg)
+ except engine.dialect.dbapi.Error as raw_dbapi_err:
+ connection = context.root_connection
+ if engine.dialect.is_disconnect(raw_dbapi_err, connection, cursor_obj):
+ if retry > num_retries:
+ raise
+ engine.logger.error(
+ "disconnection error, retrying operation",
+ exc_info=True,
+ )
+ connection.invalidate()
+
+ # use SQLAlchemy 2.0 API if available
+ if hasattr(connection, "rollback"):
+ connection.rollback()
+ else:
+ trans = connection.get_transaction()
+ if trans:
+ trans.rollback()
+
+ time.sleep(retry_interval)
+ context.cursor = cursor_obj = connection.connection.cursor()
+ else:
+ raise
+ else:
+ return True
+
+ e = engine.execution_options(isolation_level="AUTOCOMMIT")
+
+ @event.listens_for(e, "do_execute_no_params")
+ def do_execute_no_params(cursor_obj, statement, context):
+ return _run_with_retries(
+ context.dialect.do_execute_no_params, context, cursor_obj, statement
+ )
+
+ @event.listens_for(e, "do_execute")
+ def do_execute(cursor_obj, statement, parameters, context):
+ return _run_with_retries(
+ context.dialect.do_execute, context, cursor_obj, statement, parameters
+ )
+
+ return e
Given the above recipe, a reconnection mid-transaction may be demonstrated
using the following proof of concept script. Once run, it will emit a
time.sleep(5)
e = reconnecting_engine(
- create_engine(
- "mysql+mysqldb://scott:tiger@localhost/test", echo_pool=True
- ),
+ create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo_pool=True),
num_retries=5,
retry_interval=2,
)
from sqlalchemy import create_engine
from sqlalchemy.pool import QueuePool
- engine = create_engine('mysql+mysqldb://scott:tiger@localhost/myisam_database', pool=QueuePool(reset_on_return=False))
+ engine = create_engine(
+ "mysql+mysqldb://scott:tiger@localhost/myisam_database",
+ pool=QueuePool(reset_on_return=False),
+ )
I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
to ``True``, ``False``, and ``None``. Setting to ``commit`` will cause
a COMMIT as any connection is returned to the pool::
- engine = create_engine('mssql+pyodbc://scott:tiger@mydsn', pool=QueuePool(reset_on_return='commit'))
-
+ engine = create_engine(
+ "mssql+pyodbc://scott:tiger@mydsn", pool=QueuePool(reset_on_return="commit")
+ )
I am using multiple connections with a SQLite database (typically to test transaction operation), and my test program is not working!
----------------------------------------------------------------------------------------------------------------------------------------------------------
from sqlalchemy import create_mock_engine
+
def dump(sql, *multiparams, **params):
print(sql.compile(dialect=engine.dialect))
- engine = create_mock_engine('postgresql+psycopg2://', dump)
+
+
+ engine = create_mock_engine("postgresql+psycopg2://", dump)
metadata_obj.create_all(engine, checkfirst=False)
The `Alembic <https://alembic.sqlalchemy.org>`_ tool also supports
class SomeClass(Base):
__table__ = some_table_with_no_pk
__mapper_args__ = {
- 'primary_key':[some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar]
+ "primary_key": [some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar]
}
Better yet is when using fully declared table metadata, use the ``primary_key=True``
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(A):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(Integer, ForeignKey('a.id'))
+ a_id = Column(Integer, ForeignKey("a.id"))
As of SQLAlchemy version 0.9.5, the above condition is detected, and will
warn that the ``id`` column of ``A`` and ``B`` is being combined under
A mapping which resolves this is as follows::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(A):
- __tablename__ = 'b'
+ __tablename__ = "b"
- b_id = Column('id', Integer, primary_key=True)
- a_id = Column(Integer, ForeignKey('a.id'))
+ b_id = Column("id", Integer, primary_key=True)
+ a_id = Column(Integer, ForeignKey("a.id"))
Suppose we did want ``A.id`` and ``B.id`` to be mirrors of each other, despite
the fact that ``B.a_id`` is where ``A.id`` is related. We could combine
them together using :func:`.column_property`::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
+
class B(A):
- __tablename__ = 'b'
+ __tablename__ = "b"
# probably not what you want, but this is a demonstration
id = column_property(Column(Integer, primary_key=True), A.id)
- a_id = Column(Integer, ForeignKey('a.id'))
-
-
+ a_id = Column(Integer, ForeignKey("a.id"))
I'm using Declarative and setting primaryjoin/secondaryjoin using an ``and_()`` or ``or_()``, and I am getting an error message about foreign keys.
------------------------------------------------------------------------------------------------------------------------------------------------------------------
class MyClass(Base):
# ....
- foo = relationship("Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar"))
+ foo = relationship(
+ "Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar")
+ )
That's an ``and_()`` of two string expressions, which SQLAlchemy cannot apply any mapping towards. Declarative allows :func:`_orm.relationship` arguments to be specified as strings, which are converted into expression objects using ``eval()``. But this doesn't occur inside of an ``and_()`` expression - it's a special operation declarative applies only to the *entirety* of what's passed to primaryjoin or other arguments as a string::
class MyClass(Base):
# ....
- foo = relationship("Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)")
+ foo = relationship(
+ "Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)"
+ )
Or if the objects you need are already available, skip the strings::
class MyClass(Base):
# ....
- foo = relationship(Dest, primaryjoin=and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar))
+ foo = relationship(
+ Dest, primaryjoin=and_(MyClass.id == Dest.foo_id, MyClass.foo == Dest.bar)
+ )
The same idea applies to all the other arguments, such as ``foreign_keys``::
.. sourcecode:: python+sql
- >>> user = session.scalars(select(User).options(subqueryload(User.addresses)).limit(1)).first()
+ >>> user = session.scalars(
+ ... select(User).options(subqueryload(User.addresses)).limit(1)
+ ... ).first()
{opensql}-- the "main" query
SELECT users.id AS users_id
FROM users
means that you should :meth:`_sql.Select.order_by` on a unique column on the table.
The primary key is a good choice for this::
- session.scalars(select(User).options(subqueryload(User.addresses)).order_by(User.id).limit(1)).first()
+ session.scalars(
+ select(User).options(subqueryload(User.addresses)).order_by(User.id).limit(1)
+ ).first()
Note that the :func:`_orm.joinedload` eager loader strategy does not suffer from
the same problem because only one query is ever issued, so the load query
logger = logging.getLogger("myapp.sqltime")
logger.setLevel(logging.DEBUG)
+
@event.listens_for(Engine, "before_cursor_execute")
- def before_cursor_execute(conn, cursor, statement,
- parameters, context, executemany):
- conn.info.setdefault('query_start_time', []).append(time.time())
+ def before_cursor_execute(conn, cursor, statement, parameters, context, executemany):
+ conn.info.setdefault("query_start_time", []).append(time.time())
logger.debug("Start Query: %s", statement)
+
@event.listens_for(Engine, "after_cursor_execute")
- def after_cursor_execute(conn, cursor, statement,
- parameters, context, executemany):
- total = time.time() - conn.info['query_start_time'].pop(-1)
+ def after_cursor_execute(conn, cursor, statement, parameters, context, executemany):
+ total = time.time() - conn.info["query_start_time"].pop(-1)
logger.debug("Query Complete!")
logger.debug("Total Time: %f", total)
import pstats
import contextlib
+
@contextlib.contextmanager
def profiled():
pr = cProfile.Profile()
yield
pr.disable()
s = io.StringIO()
- ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
+ ps = pstats.Stats(pr, stream=s).sort_stats("cumulative")
ps.print_stats()
# uncomment this to see who's calling what
# ps.print_callers()
To profile a section of code::
with profiled():
- session.scalars(select(FooClass).where(FooClass.somevalue==8)).all()
+ session.scalars(select(FooClass).where(FooClass.somevalue == 8)).all()
The output of profiling can be used to give an idea where time is
being spent. A section of profiling output looks like this::
from sqlalchemy import TypeDecorator
import time
+
class Foo(TypeDecorator):
impl = String
def process_result_value(self, value, thing):
# intentionally add slowness for illustration purposes
- time.sleep(.001)
+ time.sleep(0.001)
return value
the profiling output of this intentionally slow operation can be seen like this::
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
- Base = declarative_base(create_engine('sqlite://'))
+ Base = declarative_base(create_engine("sqlite://"))
+
class Foo(Base):
- __tablename__ = 'foo'
+ __tablename__ = "foo"
id = Column(Integer, primary_key=True)
+
Base.metadata.create_all()
session = sessionmaker()()
# continue using session without rolling back
session.commit()
-
The usage of the :class:`.Session` should fit within a structure similar to this::
try:
Given a block such as::
- sess = Session() # begins a logical transaction
- try:
- sess.flush()
+ sess = Session() # begins a logical transaction
+ try:
+ sess.flush()
- sess.commit()
- except:
- sess.rollback()
+ sess.commit()
+ except:
+ sess.rollback()
Above, when a :class:`.Session` is first created, assuming "autocommit mode"
isn't used, a logical transaction is established within the :class:`.Session`.
for example use the ``User`` mapping described at :ref:`ormtutorial_toplevel`,
and we had a SQL query like the following::
- q = session.query(User).outerjoin(User.addresses).filter(User.name == 'jack')
+ q = session.query(User).outerjoin(User.addresses).filter(User.name == "jack")
Above, the sample data used in the tutorial has two rows in the ``addresses``
table for the ``users`` row with the name ``'jack'``, primary key value 5.
are **deduplicated**. This does not occur if we instead request individual
columns back::
- >>> session.query(User.id, User.name).outerjoin(User.addresses).filter(User.name == 'jack').all()
+ >>> session.query(User.id, User.name).outerjoin(User.addresses).filter(
+ ... User.name == "jack"
+ ... ).all()
[(5, 'jack'), (5, 'jack')]
There are two main reasons the :class:`_query.Query` will deduplicate:
print("ITER!")
return iter([1, 2, 3, 4, 5])
+
list(Iterates())
output::
o = session.scalars(select(SomeClass).limit(1)).first()
o.foo_id = 7
- Session.expire(o, ['foo']) # object must be persistent for this
+ Session.expire(o, ["foo"]) # object must be persistent for this
foo_7 = session.get(Foo, 7)
Session.flush() # emits INSERT
# expire this because we already set .foo to None
- Session.expire(o, ['foo'])
+ Session.expire(o, ["foo"])
assert new_obj.foo is foo_7 # now it loads
-
.. topic:: Attribute loading for non-persistent objects
One variant on the "pending" behavior above is if we use the flag
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B", backref="a")
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = Column(Integer, primary_key=True)
- a_id = Column(ForeignKey('a.id'))
- c_id = Column(ForeignKey('c.id'))
+ a_id = Column(ForeignKey("a.id"))
+ c_id = Column(ForeignKey("c.id"))
c = relationship("C", backref="bs")
class C(Base):
- __tablename__ = 'c'
+ __tablename__ = "c"
id = Column(Integer, primary_key=True)
if we don't use it explicitly)::
>>> from sqlalchemy import table, column, select
- >>> t = table('my_table', column('x'))
+ >>> t = table("my_table", column("x"))
>>> statement = select(t)
>>> print(str(statement))
SELECT my_table.x
as::
>>> from sqlalchemy import column
- >>> print(column('x') == 'some value')
+ >>> print(column("x") == "some value")
x = :x_1
Stringifying for Specific Databases
use a PostgreSQL dialect::
from sqlalchemy.dialects import postgresql
+
print(statement.compile(dialect=postgresql.dialect()))
Note that any dialect can be assembled using :func:`_sa.create_engine` itself
from sqlalchemy.sql import table, column, select
- t = table('t', column('x'))
+ t = table("t", column("x"))
s = select(t).where(t.c.x == 5)
Base = declarative_base()
+
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(UUID)
+
stmt = select(A).where(A.data == uuid.uuid4())
Given the above model and statement which will compare a column to a single
their positional order for the statement as compiled::
import re
+
e = create_engine("sqlite+pysqlite://")
# will use qmark style, i.e. ? for param
# params in positional order
params = (repr(compiled.params[name]) for name in compiled.positiontup)
- print(re.sub(r'\?', lambda m: next(params), str(compiled)))
+ print(re.sub(r"\?", lambda m: next(params), str(compiled)))
The above snippet prints::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import BindParameter
+
@compiles(BindParameter)
def _render_literal_bindparam(element, compiler, use_my_literal_recipe=False, **kw):
if not use_my_literal_recipe:
# render the value directly
return repr(element.value)
+
e = create_engine("postgresql+psycopg2://")
print(stmt.compile(e, compile_kwargs={"use_my_literal_recipe": True}))
from sqlalchemy import TypeDecorator
+
class UUIDStringify(TypeDecorator):
impl = UUID
or locally within the statement using :func:`_sql.type_coerce`, such as ::
from sqlalchemy import type_coerce
+
stmt = select(A).where(type_coerce(A.data, UUIDStringify) == uuid.uuid4())
print(stmt.compile(e, compile_kwargs={"literal_binds": True}))
>>> e = create_engine("sqlite+pysqlite://")
>>> compiled = stmt.compile(e, compile_kwargs={"render_postcompile": True})
>>> params = (repr(compiled.params[name]) for name in compiled.positiontup)
- >>> print(re.sub(r'\?', lambda m: next(params), str(compiled)))
+ >>> print(re.sub(r"\?", lambda m: next(params), str(compiled)))
SELECT a.id, a.data
FROM a
WHERE a.data IN (UUID('aa1944d6-9a5a-45d5-b8da-0ba1ef0a4f38'), UUID('a81920e6-15e2-4392-8a3c-d775ffa9ccd2'), UUID('b5574cdb-ff9b-49a3-be52-dbc89f087bfa'))
The :meth:`.Operators.op` method allows one to create a custom database operator
otherwise not known by SQLAlchemy::
- >>> print(column('q').op('->')(column('p')))
+ >>> print(column("q").op("->")(column("p")))
q -> p
However, when using it on the right side of a compound expression, it doesn't
generate parenthesis as we expect::
- >>> print((column('q1') + column('q2')).op('->')(column('p')))
+ >>> print((column("q1") + column("q2")).op("->")(column("p")))
q1 + q2 -> p
Where above, we probably want ``(q1 + q2) -> p``.
number, where 100 is the maximum value, and the highest number used by any
SQLAlchemy operator is currently 15::
- >>> print((column('q1') + column('q2')).op('->', precedence=100)(column('p')))
+ >>> print((column("q1") + column("q2")).op("->", precedence=100)(column("p")))
(q1 + q2) -> p
We can also usually force parenthesization around a binary expression (e.g.
an expression that has left/right operands and an operator) using the
:meth:`_expression.ColumnElement.self_group` method::
- >>> print((column('q1') + column('q2')).self_group().op('->')(column('p')))
+ >>> print((column("q1") + column("q2")).self_group().op("->")(column("p")))
(q1 + q2) -> p
Why are the parentheses rules like this?
operator is known to be associative, so that parenthesis are generated
minimally. Otherwise, an expression like::
- column('a') & column('b') & column('c') & column('d')
+ column("a") & column("b") & column("c") & column("d")
would produce::
other cases, it leads to things that are more likely to confuse databases or at
the very least readability, such as::
- column('q', ARRAY(Integer, dimensions=2))[5][6]
+ column("q", ARRAY(Integer, dimensions=2))[5][6]
would produce::
e.g. the highest? Then this expression makes more parenthesis, but is
otherwise OK, that is, these two are equivalent::
- >>> print((column('q') - column('y')).op('+', precedence=100)(column('z')))
+ >>> print((column("q") - column("y")).op("+", precedence=100)(column("z")))
(q - y) + z
- >>> print((column('q') - column('y')).op('+')(column('z')))
+ >>> print((column("q") - column("y")).op("+")(column("z")))
q - y + z
but these two are not::
- >>> print(column('q') - column('y').op('+', precedence=100)(column('z')))
+ >>> print(column("q") - column("y").op("+", precedence=100)(column("z")))
q - y + z
- >>> print(column('q') - column('y').op('+')(column('z')))
+ >>> print(column("q") - column("y").op("+")(column("z")))
q - (y + z)
For now, it's not clear that as long as we are doing parenthesization based on
import numpy
+
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(Integer)
+
# .. later
session.add(A(data=numpy.int64(10)))
session.commit()
-
In the latter case, the issue is due to the ``numpy.int64`` datatype overriding
the ``__eq__()`` method and enforcing that the return type of an expression is
``numpy.True`` or ``numpy.False``, which breaks SQLAlchemy's expression
>>> import numpy
>>> from sqlalchemy import column, Integer
- >>> print(column('x', Integer) == numpy.int64(10)) # works
+ >>> print(column("x", Integer) == numpy.int64(10)) # works
x = :x_1
- >>> print(numpy.int64(10) == column('x', Integer)) # breaks
+ >>> print(numpy.int64(10) == column("x", Integer)) # breaks
False
These errors are both solved in the same way, which is that special numpy
session.add(A(data=int(data)))
- result = session.execute(
- select(A.data).where(int(data) == A.data)
- )
+ result = session.execute(select(A.data).where(int(data) == A.data))
session.commit()
SQL expression for WHERE/HAVING role expected, got True
-------------------------------------------------------
-See :ref:`numpy_int64`.
\ No newline at end of file
+See :ref:`numpy_int64`.
.. sourcecode:: python+sql
>>> import sqlalchemy
- >>> sqlalchemy.__version__ # doctest: +SKIP
+ >>> sqlalchemy.__version__ # doctest: +SKIP
2.0.0
Next Steps
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import DeclarativeBase, relationship
+
class Base(DeclarativeBase):
pass
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import DeclarativeBase, relationship
+
class Base(DeclarativeBase):
pass
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import DeclarativeBase, relationship
+
class Base(DeclarativeBase):
pass
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
-
Declarative vs. Imperative Forms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
configuration looks like::
registry.map_imperatively(
- Parent, parent_table, properties={
- "children": relationship(
- "Child", back_populates="parent"
- )
- }
+ Parent,
+ parent_table,
+ properties={"children": relationship("Child", back_populates="parent")},
)
registry.map_imperatively(
- Child, child_table, properties={
- "parent": relationship("Parent", back_populates="children")
- }
+ Child,
+ child_table,
+ properties={"parent": relationship("Parent", back_populates="children")},
)
Additionally, the default collection style for non-annotated mappings is
id: Mapped[int] = mapped_column(primary_key=True)
children: Mapped[list["Child"]] = relationship(backref="parent")
+
class Child(Base):
__tablename__ = "child"
parent_id = mapped_column(ForeignKey("parent.id"))
parent = relationship("Parent", back_populates="child")
-
.. _relationships_many_to_many:
Many To Many
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
# note for a Core table, we use the sqlalchemy.Column construct,
# not sqlalchemy.orm.mapped_column
association_table = Table(
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
association_table = Table(
"association",
Base.metadata,
secondary=association_table, back_populates="children"
)
-
When using the :paramref:`_orm.relationship.backref` parameter instead of
:paramref:`_orm.relationship.back_populates`, the backref will automatically
use the same :paramref:`_orm.relationship.secondary` argument for the
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
association_table = Table(
"association",
Base.metadata,
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
class Association(Base):
__tablename__ = "association"
left_id: Mapped[int] = mapped_column(ForeignKey("left.id"), primary_key=True)
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
class Association(Base):
__tablename__ = "association"
left_id: Mapped[int] = mapped_column(ForeignKey("left.id"), primary_key=True)
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
class Association(Base):
__tablename__ = "association"
# association between Assocation -> Parent
parent: Mapped["Parent"] = relationship(back_populates="child_associations")
+
class Parent(Base):
__tablename__ = "left"
id: Mapped[int] = mapped_column(primary_key=True)
# many-to-many relationship to Child, bypassing the `Association` class
- children: Mapped[list["Child"]] = relationship(secondary="association", back_populates="parents")
+ children: Mapped[list["Child"]] = relationship(
+ secondary="association", back_populates="parents"
+ )
# association between Parent -> Association -> Child
- child_associations: Mapped[list["Association"]] = relationship(back_populates="parent")
+ child_associations: Mapped[list["Association"]] = relationship(
+ back_populates="parent"
+ )
+
class Child(Base):
__tablename__ = "right"
id: Mapped[int] = mapped_column(primary_key=True)
# many-to-many relationship to Parent, bypassing the `Association` class
- parents: Mapped[list["Parent"]] = relationship(secondary="association", back_populates="children")
+ parents: Mapped[list["Parent"]] = relationship(
+ secondary="association", back_populates="children"
+ )
# association between Child -> Association -> Parent
- parent_associations: Mapped[list["Association"]] = relationship(back_populates="child")
+ parent_associations: Mapped[list["Association"]] = relationship(
+ back_populates="child"
+ )
When using this ORM model to make changes, changes made to
``Parent.children`` will not be coordinated with changes made to
)
# association between Parent -> Association -> Child
- child_associations: Mapped[list["Association"]] = relationship(back_populates="parent")
+ child_associations: Mapped[list["Association"]] = relationship(
+ back_populates="parent"
+ )
+
class Child(Base):
__tablename__ = "right"
)
# association between Child -> Association -> Parent
- parent_associations: Mapped[list["Association"]] = relationship(back_populates="child")
+ parent_associations: Mapped[list["Association"]] = relationship(
+ back_populates="child"
+ )
The above mapping will not write any changes to ``Parent.children`` or
``Child.parents`` to the database, preventing conflicting writes. However, reads
the :func:`_orm.relationship` construct::
registry.map_imperatively(
- Parent, parent_table, properties={
- "children": relationship("Child", back_populates="parent")
- }
+ Parent,
+ parent_table,
+ properties={"children": relationship("Child", back_populates="parent")},
)
registry.map_imperatively(
- Child, child_table, properties={
- "parent": relationship("Parent", back_populates="children")
- }
+ Child,
+ child_table,
+ properties={"parent": relationship("Parent", back_populates="children")},
)
These string names are resolved into classes in the mapper resolution stage,
primaryjoin="myapp.mymodel.Parent.id == myapp.mymodel.Child.parent_id",
)
-
The qualified path can be any partial path that removes ambiguity between
the names. For example, to disambiguate between
``myapp.model1.Child`` and ``myapp.model2.Child``,
id: Mapped[int] = mapped_column(primary_key=True)
children: Mapped[list["Child"]] = relationship(
- "Child",
- secondary=lambda: association_table
+ "Child", secondary=lambda: association_table
)
Or to illustrate locating the same :class:`.Table` object by name,
id: Mapped[int] = mapped_column(primary_key=True)
children: Mapped[list["Child"]] = relationship(secondary="association")
-
.. warning:: When passed as a string,
:paramref:`_orm.relationship.secondary` argument is interpreted using Python's
``eval()`` function, even though it's typically the name of a table.
>>> user1 = sess1.scalars(select(User).filter_by(id=1)).first()
>>> address1 = user1.addresses[0]
- >>> sess1.close() # user1, address1 no longer associated with sess1
+ >>> sess1.close() # user1, address1 no longer associated with sess1
>>> user1.addresses.remove(address1) # address1 no longer associated with user1
>>> sess2 = Session()
- >>> sess2.add(user1) # ... but it still gets added to the new session,
+ >>> sess2.add(user1) # ... but it still gets added to the new session,
>>> address1 in sess2 # because it's still "pending" for flush
True
addresses = relationship("Address", cascade="all, delete-orphan")
+
# ...
del user.addresses[1]
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
class Parent(Base):
__tablename__ = "parent"
# use a list
children: Mapped[list["Child"]] = relationship()
+
class Child(Base):
__tablename__ = "child"
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
class Parent(Base):
__tablename__ = "parent"
# use a set
children: Mapped[set["Child"]] = relationship()
+
class Child(Base):
__tablename__ = "child"
# non-annotated mapping
+
class Parent(Base):
__tablename__ = "parent"
children = relationship("Child", collection_class=set)
+
class Child(Base):
__tablename__ = "child"
cascade="all, delete-orphan",
)
+
class Note(Base):
__tablename__ = "note"
class ListLike:
def __init__(self):
self.data = []
+
def append(self, item):
self.data.append(item)
+
def remove(self, item):
self.data.remove(item)
+
def extend(self, items):
self.data.extend(items)
+
def __iter__(self):
return iter(self.data)
+
def foo(self):
- return 'foo'
+ return "foo"
``append``, ``remove``, and ``extend`` are known list-like methods, and will
be instrumented automatically. ``__iter__`` is not a mutator method and won't
def __init__(self):
self.data = set()
+
def append(self, item):
self.data.add(item)
+
def remove(self, item):
self.data.remove(item)
+
def __iter__(self):
return iter(self.data)
from sqlalchemy.orm.collections import collection
+
class SetLike:
__emulates__ = set
from sqlalchemy.util import OrderedDict
from sqlalchemy.orm.collections import MappedCollection
+
class NodeMap(OrderedDict, MappedCollection):
"""Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained."""
class MyAwesomeList(some.great.library.AwesomeList):
pass
+
# ... relationship(..., collection_class=MyAwesomeList)
The ORM uses this approach for built-ins, quietly substituting a trivial
import dataclasses
+
@dataclasses.dataclass
class Point:
x: int
from sqlalchemy.orm import DeclarativeBase, Mapped
from sqlalchemy.orm import composite, mapped_column
+
class Base(DeclarativeBase):
pass
from sqlalchemy import Integer
from sqlalchemy.orm import mapped_column, composite
+
class Vertex(Base):
__tablename__ = "vertices"
from sqlalchemy.orm import mapped_column, composite, Mapped
+
class Vertex(Base):
__tablename__ = "vertices"
},
)
-
.. _composite_legacy_no_dataclass:
Using Legacy Non-Dataclasses
return f"Point(x={self.x!r}, y={self.y!r})"
def __eq__(self, other):
- return (
- isinstance(other, Point)
- and other.x == self.x
- and other.y == self.y
- )
+ return isinstance(other, Point) and other.x == self.x and other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
from sqlalchemy.orm import mapped_column
from sqlalchemy.sql import and_
+
@dataclasses.dataclass
class Point:
x: int
]
)
+
class Base(DeclarativeBase):
pass
+
class Vertex(Base):
__tablename__ = "vertices"
id: Mapped[int] = mapped_column(primary_key=True)
start: Mapped[Point] = composite(
- mapped_column("x1"),
- mapped_column("y1"),
- comparator_factory=PointComparator
+ mapped_column("x1"), mapped_column("y1"), comparator_factory=PointComparator
)
end: Mapped[Point] = composite(
- mapped_column("x2"),
- mapped_column("y2"),
- comparator_factory=PointComparator
+ mapped_column("x2"), mapped_column("y2"), comparator_factory=PointComparator
)
Since ``Point`` is a dataclass, we may make use of
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
+
@dataclasses.dataclass
class Point:
x: int
def __composite_values__(self):
"""generate a row from a Vertex"""
- return (
- dataclasses.astuple(self.start) + dataclasses.astuple(self.end)
- )
+ return dataclasses.astuple(self.start) + dataclasses.astuple(self.end)
+
class Base(DeclarativeBase):
pass
session.add(hv)
session.commit()
- stmt = select(HasVertex).where(
- HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4))
- )
+ stmt = select(HasVertex).where(HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4)))
hv = session.scalars(stmt).first()
print(hv.vertex.start)
Session = scoped_session(sessionmaker(bind=some_engine), scopefunc=get_current_request)
+
@on_request_end
def remove_session(req):
Session.remove()
class Base(MappedAsDataclass, DeclarativeBase):
"""subclasses will be converted to dataclasses"""
+
class User(Base):
__tablename__ = "user_account"
class Base(DeclarativeBase):
pass
+
class User(MappedAsDataclass, Base):
"""User class will be converted to a dataclass"""
reg = registry()
+
@reg.mapped_as_dataclass
class User:
__tablename__ = "user_account"
class Base(DeclarativeBase):
pass
+
class User(MappedAsDataclass, Base, repr=False, unsafe_hash=True):
"""User class will be converted to a dataclass"""
reg = registry()
+
@reg.mapped_as_dataclass
class User:
__tablename__ = "user_account"
name: Mapped[str]
fullname: Mapped[str] = mapped_column(default=None)
+
# 'fullname' is optional keyword argument
- u1 = User('name')
+ u1 = User("name")
Column Defaults
~~~~~~~~~~~~~~~
reg = registry()
+
@reg.mapped_as_dataclass
class User:
__tablename__ = "user_account"
id: Mapped[int] = mapped_column(init=False, primary_key=True)
created_at: Mapped[datetime] = mapped_column(
- insert_default=func.utc_timestamp(),
- default=None
+ insert_default=func.utc_timestamp(), default=None
)
With the above mapping, an ``INSERT`` for a new ``User`` object where no
.. sourcecode:: pycon+sql
>>> with Session(e) as session:
- ... session.add(User())
+ ... session.add(User())
{sql}... session.commit()
BEGIN (implicit)
INSERT INTO user_account (created_at) VALUES (utc_timestamp())
reg = registry()
+
@reg.mapped_as_dataclass
class User:
__tablename__ = "user_account"
id: Mapped[intpk]
+
# typing error: Argument missing for parameter "id"
u1 = User()
reg = registry()
+
@reg.mapped_as_dataclass
class User:
__tablename__ = "user_account"
reg = registry()
+
@reg.mapped_as_dataclass
class Parent:
__tablename__ = "parent"
id: Mapped[int] = mapped_column(primary_key=True)
- children: Mapped[List["Child"]] = relationship(default_factory=list, back_populates='parent')
+ children: Mapped[List["Child"]] = relationship(
+ default_factory=list, back_populates="parent"
+ )
@reg.mapped_as_dataclass
user_id: int = field(
init=False, metadata={"sa": mapped_column(ForeignKey("user.id"))}
)
- email_address: str = field(
- default=None, metadata={"sa": mapped_column(String(50))}
- )
+ email_address: str = field(default=None, metadata={"sa": mapped_column(String(50))})
.. _orm_declarative_dataclasses_mixin:
user_id: int = field(
init=False, metadata={"sa": lambda: mapped_column(ForeignKey("user.id"))}
)
- email_address: str = field(
- default=None, metadata={"sa": mapped_column(String(50))}
- )
+ email_address: str = field(default=None, metadata={"sa": mapped_column(String(50))})
@mapper_registry.mapped
mapper_registry = registry()
+
@dataclass
class User:
id: int = field(init=False)
nickname: str = None
addresses: List[Address] = field(default_factory=list)
+
@dataclass
class Address:
id: int = field(init=False)
user_id: int = field(init=False)
email_address: str = None
+
metadata_obj = MetaData()
user = Table(
- 'user',
+ "user",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('fullname', String(50)),
- Column('nickname', String(12)),
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50)),
+ Column("fullname", String(50)),
+ Column("nickname", String(12)),
)
address = Table(
- 'address',
+ "address",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey('user.id')),
- Column('email_address', String(50)),
+ Column("id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("user.id")),
+ Column("email_address", String(50)),
)
- mapper_registry.map_imperatively(User, user, properties={
- 'addresses': relationship(Address, backref='user', order_by=address.c.id),
- })
+ mapper_registry.map_imperatively(
+ User,
+ user,
+ properties={
+ "addresses": relationship(Address, backref="user", order_by=address.c.id),
+ },
+ )
mapper_registry.map_imperatively(Address, address)
-
.. _orm_declarative_attrs_imperative_table:
Applying ORM mappings to an existing attrs class
}
}
+
@mapper_registry.mapped
@define(slots=False)
class Address:
user_id: int
email_address: Optional[str]
-
.. note:: The ``attrs`` ``slots=True`` option, which enables ``__slots__`` on
a mapped class, cannot be used with SQLAlchemy mappings without fully
implementing alternative
mapper_registry = registry()
+
@define(slots=False)
class User:
id: int
nickname: str
addresses: List[Address]
+
@define(slots=False)
class Address:
id: int
user_id: int
email_address: Optional[str]
+
metadata_obj = MetaData()
user = Table(
- 'user',
+ "user",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('fullname', String(50)),
- Column('nickname', String(12)),
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50)),
+ Column("fullname", String(50)),
+ Column("nickname", String(12)),
)
address = Table(
- 'address',
+ "address",
metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey('user.id')),
- Column('email_address', String(50)),
+ Column("id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("user.id")),
+ Column("email_address", String(50)),
)
- mapper_registry.map_imperatively(User, user, properties={
- 'addresses': relationship(Address, backref='user', order_by=address.c.id),
- })
+ mapper_registry.map_imperatively(
+ User,
+ user,
+ properties={
+ "addresses": relationship(Address, backref="user", order_by=address.c.id),
+ },
+ )
mapper_registry.map_imperatively(Address, address)
id: Mapped[int] = mapped_column(primary_key=True)
user_id: Mapped[int] = mapped_column(ForeignKey("user.id"))
email_address: Mapped[str]
- address_statistics: Mapped[Optional[str]] = mapped_column(
- Text, deferred=True
- )
+ address_statistics: Mapped[Optional[str]] = mapped_column(Text, deferred=True)
user: Mapped["User"] = relationship(back_populates="addresses")
from sqlalchemy.orm import deferred
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
Column("lastname", String(50)),
)
- fullname = column_property(
- __table__.c.firstname + " " + __table__.c.lastname
- )
+ fullname = column_property(__table__.c.firstname + " " + __table__.c.lastname)
addresses = relationship("Address", back_populates="user")
key for the class, independently of schema-level primary key constraints::
class GroupUsers(Base):
- __tablename__ = 'group_users'
+ __tablename__ = "group_users"
user_id = mapped_column(String(40))
group_id = mapped_column(String(40))
- __mapper_args__ = {
- "primary_key": [user_id, group_id]
- }
+ __mapper_args__ = {"primary_key": [user_id, group_id]}
.. seealso::
polymorphic_identity="employee",
)
-
.. seealso::
:ref:`single_inheritance` - background on the ORM single table inheritance
def __mapper_args__(cls):
return {
"exclude_properties": [
- column.key for column in cls.__table__.c if
- column.info.get("exclude", False)
+ column.key
+ for column in cls.__table__.c
+ if column.info.get("exclude", False)
]
}
+
class Base(DeclarativeBase):
pass
+
class SomeClass(ExcludeColsWFlag, Base):
- __tablename__ = 'some_table'
+ __tablename__ = "some_table"
id = mapped_column(Integer, primary_key=True)
data = mapped_column(String)
not_needed = mapped_column(String, info={"exclude": True})
-
Above, the ``ExcludeColsWFlag`` mixin provides a per-class ``__mapper_args__``
hook that will scan for :class:`.Column` objects that include the key/value
``'exclude': True`` passed to the :paramref:`.Column.info` parameter, and then
class MyClass(Base):
@classmethod
def __declare_last__(cls):
- ""
+ """ """
# do something with mappings
``__declare_first__()``
class MyClass(Base):
@classmethod
def __declare_first__(cls):
- ""
+ """ """
# do something before mappings are configured
.. versionadded:: 0.9.3
id = mapped_column(Integer, primary_key=True)
-
.. seealso::
:ref:`declarative_abstract`
__abstract__ = True
def some_helpful_method(self):
- """"""
+ """ """
@declared_attr
def __mapper_args__(cls):
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
class CommonMixin:
"""define a series of common elements that may be applied to mapped
classes using this class as a mixin class."""
id: Mapped[int] = mapped_column(primary_key=True)
+
class HasLogRecord:
"""mark classes that have a many-to-one relationship to the
``LogRecord`` class."""
def log_record(self) -> Mapped["LogRecord"]:
return relationship("LogRecord")
+
class LogRecord(CommonMixin, Base):
log_info: Mapped[str]
+
class MyModel(CommonMixin, HasLogRecord, Base):
name: Mapped[str]
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
"""define a series of common elements that may be applied to mapped
classes using this class as a base class."""
id: Mapped[int] = mapped_column(primary_key=True)
+
class HasLogRecord:
"""mark classes that have a many-to-one relationship to the
``LogRecord`` class."""
def log_record(self) -> Mapped["LogRecord"]:
return relationship("LogRecord")
+
class LogRecord(Base):
log_info: Mapped[str]
+
class MyModel(HasLogRecord, Base):
name: Mapped[str]
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
+
class Base:
"""define a series of common elements that may be applied to mapped
classes using this class as a base class."""
id = mapped_column(Integer, primary_key=True)
+
Base = declarative_base(cls=Base)
+
class HasLogRecord:
"""mark classes that have a many-to-one relationship to the
``LogRecord`` class."""
def log_record(self):
return relationship("LogRecord")
+
class LogRecord(Base):
log_info = mapped_column(String)
+
class MyModel(HasLogRecord, Base):
name = mapped_column(String)
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
class RefTargetMixin:
target_id: Mapped[int] = mapped_column(ForeignKey("target.id"))
def target(cls) -> Mapped["Target"]:
return relationship("Target")
+
class Foo(RefTargetMixin, Base):
__tablename__ = "foo"
id: Mapped[int] = mapped_column(primary_key=True)
__tablename__ = "target"
id: Mapped[int] = mapped_column(primary_key=True)
+
class RefTargetMixin:
target_id: Mapped[int] = mapped_column(ForeignKey("target.id"))
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
+
class Base(DeclarativeBase):
pass
+
class SomethingMixin:
x: Mapped[int]
y: Mapped[int]
def x_plus_y(cls) -> Mapped[int]:
return column_property(cls.x + cls.y)
+
class Something(SomethingMixin, Base):
__tablename__ = "something"
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
+
class Base(DeclarativeBase):
pass
class Engineer(Person):
- id: Mapped[int] = mapped_column(ForeignKey('person.id'), primary_key=True)
+ id: Mapped[int] = mapped_column(ForeignKey("person.id"), primary_key=True)
primary_language: Mapped[str]
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
+
class Base(DeclarativeBase):
pass
return cls.__name__.lower()
- id: Mapped[int] = mapped_column(ForeignKey('person.id'), primary_key=True)
+ id: Mapped[int] = mapped_column(ForeignKey("person.id"), primary_key=True)
primary_language: Mapped[str]
__mapper_args__ = {"polymorphic_identity": "manager"}
-
.. _mixin_inheritance_columns:
Using :func:`_orm.declared_attr` to generate table-specific inheriting columns
class HasId:
id: Mapped[int] = mapped_column(primary_key=True)
+
class Person(HasId, Base):
__tablename__ = "person"
discriminator: Mapped[str]
__mapper_args__ = {"polymorphic_on": "discriminator"}
+
# this mapping will fail, as there's no primary key
class Engineer(Person):
__tablename__ = "engineer"
@declared_attr
def __table_args__(cls):
- return (
- Index(f"test_idx_{cls.__tablename__}", "a", "b"),
- )
+ return (Index(f"test_idx_{cls.__tablename__}", "a", "b"),)
class MyModel(MyMixin, Base):
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
+
class Base(DeclarativeBase):
pass
+
class User(Base):
__tablename__ = "user"
addresses: Mapped[List["Address"]] = relationship(back_populates="user")
+
class Address(Base):
__tablename__ = "address"
from sqlalchemy.orm import DeclarativeBase
+
class Base(DeclarativeBase):
pass
-
All of the examples that follow illustrate a class inheriting from the above
``Base``. The decorator style introduced at :ref:`orm_declarative_decorator`
is fully supported with all the following examples as well, as are legacy
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import mapped_column
+
class Base(DeclarativeBase):
pass
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
+
class Base(DeclarativeBase):
pass
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import Mapped, mapped_column, registry
+
class Base(DeclarativeBase):
- registry = registry(type_annotation_map={
- int: BIGINT,
- datetime.datetime: TIMESTAMP(timezone=True),
- str: String().with_variant(NVARCHAR, "mssql"),
- })
+ registry = registry(
+ type_annotation_map={
+ int: BIGINT,
+ datetime.datetime: TIMESTAMP(timezone=True),
+ str: String().with_variant(NVARCHAR, "mssql"),
+ }
+ )
class SomeClass(Base):
- __tablename__ = 'some_table'
+ __tablename__ = "some_table"
id: Mapped[int] = mapped_column(primary_key=True)
date: Mapped[datetime.datetime]
num_12_4 = Annotated[Decimal, 12]
num_6_2 = Annotated[Decimal, 6]
+
class Base(DeclarativeBase):
- registry = registry(type_annotation_map={
- str_30: String(30),
- str_50: String(50),
- num_12_4: Numeric(12, 4),
- num_6_2: Numeric(6, 2)
- })
+ registry = registry(
+ type_annotation_map={
+ str_30: String(30),
+ str_50: String(50),
+ num_12_4: Numeric(12, 4),
+ num_6_2: Numeric(6, 2),
+ }
+ )
The Python type passed to the ``Annotated`` container, in the above example the
``str`` and ``Decimal`` types, is important only for the benefit of typing
these augmented types directly in our mapping where they will be matched to the
more specific type constructions, as in the following example::
- class SomeClass(Base):
- __tablename__ = 'some_table'
+ class SomeClass(Base):
+ __tablename__ = "some_table"
- short_name: Mapped[str_30] = mapped_column(primary_key=True)
- long_name: Mapped[str_50]
- num_value: Mapped[num_12_4]
- short_num_value: Mapped[num_6_2]
+ short_name: Mapped[str_30] = mapped_column(primary_key=True)
+ long_name: Mapped[str_50]
+ num_value: Mapped[num_12_4]
+ short_num_value: Mapped[num_6_2]
a CREATE TABLE for the above mapping will illustrate the different variants
of ``VARCHAR`` and ``NUMERIC`` we've configured, and looks like::
mapped_column(nullable=False),
]
+
class Base(DeclarativeBase):
pass
+
class SomeClass(Base):
# ...
mapped_column(nullable=False, server_default=func.CURRENT_TIMESTAMP()),
]
+
class Base(DeclarativeBase):
pass
+
class Parent(Base):
- __tablename__ = 'parent'
+ __tablename__ = "parent"
id: Mapped[intpk]
+
class SomeClass(Base):
- __tablename__ = 'some_table'
+ __tablename__ = "some_table"
# add ForeignKey to mapped_column(Integer, primary_key=True)
- id: Mapped[intpk] = mapped_column(ForeignKey('parent.id'))
+ id: Mapped[intpk] = mapped_column(ForeignKey("parent.id"))
# change server default from CURRENT_TIMESTAMP to UTC_TIMESTAMP
created_at: Mapped[timestamp] = mapped_column(server_default=func.UTC_TIMESTAMP())
from sqlalchemy.orm import DeclarativeBase
+
class Base(DeclarativeBase):
pass
+
class MyClass(Base):
__tablename__ = "sometable"
__table_args__ = {"schema": "some_schema"}
metadata_obj = MetaData(schema="some_schema")
+
class Base(DeclarativeBase):
metadata = metadata_obj
additional SQL statements::
class User(Base):
- __tablename__ = "user"
+ __tablename__ = "user"
- id: Mapped[int] = mapped_column(primary_key=True)
- important_identifier: Mapped[str] = mapped_column(active_history=True)
+ id: Mapped[int] = mapped_column(primary_key=True)
+ important_identifier: Mapped[str] = mapped_column(active_history=True)
See the docstring for :func:`_orm.mapped_column` for a list of supported
parameters.
given to the columns themselves::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
- id: Mapped[int] = mapped_column('user_id', primary_key=True)
- name: Mapped[str] = mapped_column('user_name')
+ id: Mapped[int] = mapped_column("user_id", primary_key=True)
+ name: Mapped[str] = mapped_column("user_name")
Where above ``User.id`` resolves to a column named ``user_id``
and ``User.name`` resolves to a column named ``user_name``. We
and will see the SQL names generated::
>>> from sqlalchemy import select
- >>> print(select(User.id, User.name).where(User.name == 'x'))
+ >>> print(select(User.id, User.name).where(User.name == "x"))
SELECT "user".user_id, "user".user_name
FROM "user"
WHERE "user".user_name = :user_name_1
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import DeclarativeBase
+
class Base(DeclarativeBase):
pass
+
# construct a Table directly. The Base.metadata collection is
# usually a good choice for MetaData but any MetaData
# collection may be used.
Column("user_name", String),
)
+
class User(Base):
__table__ = user_table
from sqlalchemy.orm import column_property
from sqlalchemy.orm import Mapped
+
class User(Base):
__table__ = user_table
Column("bio", Text),
)
+
class User(Base):
__table__ = user_table
bio = deferred(user_table.c.bio)
-
.. seealso::
:ref:`orm_queryguide_column_deferral` - full description of deferred column loading
collection when inspecting the history of the attribute. This may incur
additional SQL statements::
- from sqlalchemy.orm import deferred
+ from sqlalchemy.orm import deferred
- user_table = Table(
- "user",
- Base.metadata,
- Column("id", Integer, primary_key=True),
- Column("important_identifier", String)
- )
+ user_table = Table(
+ "user",
+ Base.metadata,
+ Column("id", Integer, primary_key=True),
+ Column("important_identifier", String),
+ )
- class User(Base):
- __table__ = user_table
- important_identifier = column_property(user_table.c.important_identifier, active_history=True)
+ class User(Base):
+ __table__ = user_table
+ important_identifier = column_property(
+ user_table.c.important_identifier, active_history=True
+ )
.. seealso::
from sqlalchemy import Table
from sqlalchemy.orm import DeclarativeBase
- engine = create_engine(
- "postgresql+psycopg2://user:pass@hostname/my_existing_database"
- )
+ engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database")
+
class Base(DeclarativeBase):
pass
+
class MyClass(Base):
__table__ = Table(
"mytable",
from sqlalchemy import Table
from sqlalchemy.orm import DeclarativeBase
- engine = create_engine(
- "postgresql+psycopg2://user:pass@hostname/my_existing_database"
- )
+ engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database")
+
class Base(DeclarativeBase):
pass
+
Base.metadata.reflect(engine)
+
class MyClass(Base):
- __table__ = Base.metadata.tables['mytable']
+ __table__ = Base.metadata.tables["mytable"]
One caveat to the approach of using ``__table__`` is that the mapped classes cannot
be declared until the tables have been reflected, which requires the database
from sqlalchemy.ext.declarative import DeferredReflection
from sqlalchemy.orm import DeclarativeBase
+
class Base(DeclarativeBase):
pass
+
class Reflected(DeferredReflection):
__abstract__ = True
complete until we do so, given an :class:`_engine.Engine`::
- engine = create_engine(
- "postgresql+psycopg2://user:pass@hostname/my_existing_database"
- )
+ engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database")
Reflected.prepare(engine)
The purpose of the ``Reflected`` class is to define the scope at which
from sqlalchemy import event
from sqlalchemy.orm import DeclarativeBase
+
class Base(DeclarativeBase):
pass
@event.listens_for(Base.metadata, "column_reflect")
def column_reflect(inspector, table, column_info):
# set column.key = "attr_<lower_case_name>"
- column_info['key'] = "attr_%s" % column_info['name'].lower()
+ column_info["key"] = "attr_%s" % column_info["name"].lower()
With the above event, the reflection of :class:`_schema.Column` objects will be intercepted
with our event that adds a new ".key" element, such as in a mapping as below::
class MyClass(Base):
- __table__ = Table("some_table", Base.metadata,
- autoload_with=some_engine)
+ __table__ = Table("some_table", Base.metadata, autoload_with=some_engine)
The approach also works with both the :class:`.DeferredReflection` base class
as well as with the :ref:`automap_toplevel` extension. For automap
metadata,
Column("user_id", String(40), nullable=False),
Column("group_id", String(40), nullable=False),
- UniqueConstraint("user_id", "group_id")
+ UniqueConstraint("user_id", "group_id"),
)
class GroupUsers(Base):
__table__ = group_users
- __mapper_args__ = {
- "primary_key": [group_users.c.user_id, group_users.c.group_id]
- }
+ __mapper_args__ = {"primary_key": [group_users.c.user_id, group_users.c.group_id]}
Above, the ``group_users`` table is an association table of some kind
with string columns ``user_id`` and ``group_id``, but no primary key is set up;
class User(Base):
__table__ = user_table
- __mapper_args__ = {
- 'include_properties': ['user_id', 'user_name']
- }
+ __mapper_args__ = {"include_properties": ["user_id", "user_name"]}
In the above example, the ``User`` class will map to the ``user_table`` table, only
including the ``user_id`` and ``user_name`` columns - the rest are not referenced.
class Address(Base):
__table__ = address_table
- __mapper_args__ = {
- 'exclude_properties': ["street", "city", "state", "zip"]
- }
+ __mapper_args__ = {"exclude_properties": ["street", "city", "state", "zip"]}
will map the ``Address`` class to the ``address_table`` table, including
all columns present except ``street``, ``city``, ``state``, and ``zip``.
class User(Base):
__table__ = user_table
__mapper_args__ = {
- 'include_properties': [user_table.c.user_id, user_table.c.user_name]
+ "include_properties": [user_table.c.user_id, user_table.c.user_name]
}
When columns are not included in a mapping, these columns will not be
from sqlalchemy import Column, ForeignKey, Integer, String, Table
from sqlalchemy.orm import DeclarativeBase, relationship
+
class Base(DeclarativeBase):
pass
+
class User(Base):
__tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import DeclarativeBase, relationship
+
class Base(DeclarativeBase):
pass
>>> user = User("log")
>>> for kw in (Keyword("new_from_blammo"), Keyword("its_big")):
... user.keywords.append(kw)
- ...
>>> print(user.keywords)
[Keyword('new_from_blammo'), Keyword('its_big')]
from sqlalchemy.orm import DeclarativeBase, relationship
from sqlalchemy.orm.collections import attribute_mapped_collection
+
class Base(DeclarativeBase):
pass
from sqlalchemy.orm import DeclarativeBase, relationship
from sqlalchemy.orm.collections import attribute_mapped_collection
+
class Base(DeclarativeBase):
pass
usage of the assignment operator, also appropriately handled by the
association proxy, to apply a dictionary value to the collection at once::
- >>> user = User('log')
- >>> user.keywords = {
- ... 'sk1':'kw1',
- ... 'sk2':'kw2'
- ... }
+ >>> user = User("log")
+ >>> user.keywords = {"sk1": "kw1", "sk2": "kw2"}
>>> print(user.keywords)
{'sk1': 'kw1', 'sk2': 'kw2'}
- >>> user.keywords['sk3'] = 'kw3'
- >>> del user.keywords['sk2']
+ >>> user.keywords["sk3"] = "kw3"
+ >>> del user.keywords["sk2"]
>>> print(user.keywords)
{'sk1': 'kw1', 'sk3': 'kw3'}
>>> # illustrate un-proxied usage
- ... print(user.user_keyword_associations['sk3'].kw)
+ ... print(user.user_keyword_associations["sk3"].kw)
<__main__.Keyword object at 0x12ceb90>
One caveat with our example above is that because ``Keyword`` objects are created
from sqlalchemy.orm import DeclarativeBase, relationship
from sqlalchemy.orm.collections import attribute_mapped_collection
+
class Base(DeclarativeBase):
pass
)
# column-targeted association proxy
- special_keys = association_proxy(
- "user_keyword_associations", "special_key"
- )
+ special_keys = association_proxy("user_keyword_associations", "special_key")
class UserKeywordAssociation(Base):
id = Column(Integer, primary_key=True)
keyword = Column("keyword", String(64))
-
The SQL generated takes the form of a correlated subquery against
the EXISTS SQL operator so that it can be used in a WHERE clause without
the need for additional modifications to the enclosing query. If the
:inherited-members:
.. autoclass:: AssociationProxyExtensionType
- :members:
\ No newline at end of file
+ :members:
from sqlalchemy.ext.asyncio import async_sessionmaker, create_async_engine
from sqlalchemy.orm import DeclarativeBase, relationship, selectinload
+
class Base(DeclarativeBase):
pass
asyncio.run(go())
-
The above example prints something along the lines of::
New DBAPI connection: <AdaptedConnection <asyncpg.connection.Connection ...>>
:meth:`_asyncio.async_scoped_session.remove` method::
async def some_function(some_async_session, some_object):
- # use the AsyncSession directly
- some_async_session.add(some_object)
+ # use the AsyncSession directly
+ some_async_session.add(some_object)
- # use the AsyncSession via the context-local proxy
- await AsyncScopedSession.commit()
+ # use the AsyncSession via the context-local proxy
+ await AsyncScopedSession.commit()
- # "remove" the current proxied AsyncSession for the local context
- await AsyncScopedSession.remove()
+ # "remove" the current proxied AsyncSession for the local context
+ await AsyncScopedSession.remove()
.. versionadded:: 1.4.19
my_simple_cache = {}
+
def lookup(session, id_argument):
if "my_key" not in my_simple_cache:
query = session.query(Model).filter(Model.id == bindparam("id"))
parameterized_query = bakery.bake(create_model_query)
if include_frobnizzle:
+
def include_frobnizzle_in_query(query):
return query.filter(Model.frobnizzle == True)
bakery = baked.bakery()
baked_query = bakery(lambda session: session.query(User))
- baked_query += lambda q: q.filter(
- User.name.in_(bindparam("username", expanding=True))
- )
+ baked_query += lambda q: q.filter(User.name.in_(bindparam("username", expanding=True)))
result = baked_query.with_session(session).params(username=["ed", "fred"]).all()
Mixin and Custom Base Classes
=============================
-See :ref:`orm_mixins_toplevel` for this section.
\ No newline at end of file
+See :ref:`orm_mixins_toplevel` for this section.
.. autoclass:: HybridExtensionType
- :members:
\ No newline at end of file
+ :members:
# a select() construct makes use of SQL expressions derived from the
# User class itself
- select_stmt = (
- select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s"))
- )
+ select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s"))
Above, the steps that the Mypy extension can take include:
)
name: Mapped[Optional[str]] = Mapped._special_method(Column(String))
- def __init__(
- self, id: Optional[int] = ..., name: Optional[str] = ...
- ) -> None:
+ def __init__(self, id: Optional[int] = ..., name: Optional[str] = ...) -> None:
...
print(f"Username: {some_user.name}")
- select_stmt = (
- select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s"))
- )
-
+ select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s"))
The key steps which have been taken above include:
id = Column(Integer, primary_key=True)
name = Column(String)
- addresses: Mapped[List["Address"]] = relationship(
- "Address", back_populates="user"
- )
+ addresses: Mapped[List["Address"]] = relationship("Address", back_populates="user")
class Address(Base):
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
+
class Base(DeclarativeBase):
pass
+
class Employee(Base):
__tablename__ = "employee"
id: Mapped[int] = mapped_column(primary_key=True)
from sqlalchemy.orm import relationship
+
class Company(Base):
__tablename__ = "company"
id: Mapped[int] = mapped_column(primary_key=True)
from datetime import datetime
+
class Employee(Base):
__tablename__ = "employee"
id: Mapped[int] = mapped_column(primary_key=True)
from sqlalchemy.ext.declarative import ConcreteBase
from sqlalchemy.orm import DeclarativeBase
+
class Base(DeclarativeBase):
pass
+
class Employee(ConcreteBase, Base):
__tablename__ = "employee"
id = mapped_column(Integer, primary_key=True)
from sqlalchemy.orm import DeclarativeBase
+
class Base(DeclarativeBase):
pass
+
class Employee(Base):
__abstract__ = True
from sqlalchemy.ext.declarative import AbstractConcreteBase
from sqlalchemy.orm import DeclarativeBase
+
class Base(DeclarativeBase):
pass
"concrete": True,
}
+
Base.registry.configure()
Above, the :meth:`_orm.registry.configure` method is invoked, which will
class and any attributes that are locally declared upon it, such as the
``Employee.name``::
- >>> stmt = select(Employee).where(Employee.name == 'n1')
+ >>> stmt = select(Employee).where(Employee.name == "n1")
>>> print(stmt)
SELECT pjoin.id, pjoin.name, pjoin.type, pjoin.manager_data, pjoin.engineer_info
FROM (
"concrete": True,
}
-
Above, we use :func:`.polymorphic_union` in the same manner as before, except
that we omit the ``employee`` table.
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
class Customer(Base):
- __tablename__ = 'customer'
+ __tablename__ = "customer"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String)
billing_address = relationship("Address")
shipping_address = relationship("Address")
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
id = mapped_column(Integer, primary_key=True)
street = mapped_column(String)
city = mapped_column(String)
the appropriate form is as follows::
class Customer(Base):
- __tablename__ = 'customer'
+ __tablename__ = "customer"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String)
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String)
- boston_addresses = relationship("Address",
- primaryjoin="and_(User.id==Address.user_id, "
- "Address.city=='Boston')")
+ boston_addresses = relationship(
+ "Address",
+ primaryjoin="and_(User.id==Address.user_id, " "Address.city=='Boston')",
+ )
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
id = mapped_column(Integer, primary_key=True)
- user_id = mapped_column(Integer, ForeignKey('user.id'))
+ user_id = mapped_column(Integer, ForeignKey("user.id"))
street = mapped_column(String)
city = mapped_column(String)
from sqlalchemy.orm import DeclarativeBase
+
class Base(DeclarativeBase):
pass
+
class HostEntry(Base):
- __tablename__ = 'host_entry'
+ __tablename__ = "host_entry"
id = mapped_column(Integer, primary_key=True)
ip_address = mapped_column(INET)
content = mapped_column(String(50))
# relationship() using explicit foreign_keys, remote_side
- parent_host = relationship("HostEntry",
- primaryjoin=ip_address == cast(content, INET),
- foreign_keys=content,
- remote_side=ip_address
- )
+ parent_host = relationship(
+ "HostEntry",
+ primaryjoin=ip_address == cast(content, INET),
+ foreign_keys=content,
+ remote_side=ip_address,
+ )
The above relationship will produce a join like::
from sqlalchemy.orm import foreign, remote
+
class HostEntry(Base):
- __tablename__ = 'host_entry'
+ __tablename__ = "host_entry"
id = mapped_column(Integer, primary_key=True)
ip_address = mapped_column(INET)
# relationship() using explicit foreign() and remote() annotations
# in lieu of separate arguments
- parent_host = relationship("HostEntry",
- primaryjoin=remote(ip_address) == \
- cast(foreign(content), INET),
- )
-
+ parent_host = relationship(
+ "HostEntry",
+ primaryjoin=remote(ip_address) == cast(foreign(content), INET),
+ )
.. _relationship_custom_operator:
a :func:`_orm.relationship`::
class IPA(Base):
- __tablename__ = 'ip_address'
+ __tablename__ = "ip_address"
id = mapped_column(Integer, primary_key=True)
v4address = mapped_column(INET)
- network = relationship("Network",
- primaryjoin="IPA.v4address.bool_op('<<')"
- "(foreign(Network.v4representation))",
- viewonly=True
- )
+ network = relationship(
+ "Network",
+ primaryjoin="IPA.v4address.bool_op('<<')" "(foreign(Network.v4representation))",
+ viewonly=True,
+ )
+
+
class Network(Base):
- __tablename__ = 'network'
+ __tablename__ = "network"
id = mapped_column(Integer, primary_key=True)
v4representation = mapped_column(CIDR)
from sqlalchemy import Column, Integer, func
from sqlalchemy.orm import relationship, foreign
+
class Polygon(Base):
__tablename__ = "polygon"
id = mapped_column(Integer, primary_key=True)
viewonly=True,
)
+
class Point(Base):
__tablename__ = "point"
id = mapped_column(Integer, primary_key=True)
``Article.magazine`` and ``Article.writer``::
class Magazine(Base):
- __tablename__ = 'magazine'
+ __tablename__ = "magazine"
id = mapped_column(Integer, primary_key=True)
class Article(Base):
- __tablename__ = 'article'
+ __tablename__ = "article"
article_id = mapped_column(Integer)
- magazine_id = mapped_column(ForeignKey('magazine.id'))
+ magazine_id = mapped_column(ForeignKey("magazine.id"))
writer_id = mapped_column()
magazine = relationship("Magazine")
writer = relationship("Writer")
__table_args__ = (
- PrimaryKeyConstraint('article_id', 'magazine_id'),
+ PrimaryKeyConstraint("article_id", "magazine_id"),
ForeignKeyConstraint(
- ['writer_id', 'magazine_id'],
- ['writer.id', 'writer.magazine_id']
+ ["writer_id", "magazine_id"], ["writer.id", "writer.magazine_id"]
),
)
class Writer(Base):
- __tablename__ = 'writer'
+ __tablename__ = "writer"
id = mapped_column(Integer, primary_key=True)
- magazine_id = mapped_column(ForeignKey('magazine.id'), primary_key=True)
+ magazine_id = mapped_column(ForeignKey("magazine.id"), primary_key=True)
magazine = relationship("Magazine")
When the above mapping is configured, we will see this warning emitted::
class Article(Base):
# ...
- writer = relationship("Writer", foreign_keys='Article.writer_id')
+ writer = relationship("Writer", foreign_keys="Article.writer_id")
However, this has the effect of ``Article.writer`` not taking
``Article.magazine_id`` into account when querying against ``Writer``:
writer = relationship(
"Writer",
primaryjoin="and_(Writer.id == foreign(Article.writer_id), "
- "Writer.magazine_id == Article.magazine_id)")
+ "Writer.magazine_id == Article.magazine_id)",
+ )
.. versionchanged:: 1.0.0 the ORM will attempt to warn when a column is used
as the synchronization target from more than one relationship
we'll be dealing with collections so we keep things configured as "one to many"::
class Element(Base):
- __tablename__ = 'element'
+ __tablename__ = "element"
path = mapped_column(String, primary_key=True)
- descendants = relationship('Element',
- primaryjoin=
- remote(foreign(path)).like(
- path.concat('/%')),
- viewonly=True,
- order_by=path)
+ descendants = relationship(
+ "Element",
+ primaryjoin=remote(foreign(path)).like(path.concat("/%")),
+ viewonly=True,
+ order_by=path,
+ )
Above, if given an ``Element`` object with a path attribute of ``"/foo/bar2"``,
we seek for a load of ``Element.descendants`` to look like::
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
- node_to_node = Table("node_to_node", Base.metadata,
+
+ node_to_node = Table(
+ "node_to_node",
+ Base.metadata,
Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True),
- Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True)
+ Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True),
)
+
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = mapped_column(Integer, primary_key=True)
label = mapped_column(String)
- right_nodes = relationship("Node",
- secondary=node_to_node,
- primaryjoin=id==node_to_node.c.left_node_id,
- secondaryjoin=id==node_to_node.c.right_node_id,
- backref="left_nodes"
+ right_nodes = relationship(
+ "Node",
+ secondary=node_to_node,
+ primaryjoin=id == node_to_node.c.left_node_id,
+ secondaryjoin=id == node_to_node.c.right_node_id,
+ backref="left_nodes",
)
Where above, SQLAlchemy can't know automatically which columns should connect
use the string name of the table as it is present in the :class:`_schema.MetaData`::
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = mapped_column(Integer, primary_key=True)
label = mapped_column(String)
- right_nodes = relationship("Node",
- secondary="node_to_node",
- primaryjoin="Node.id==node_to_node.c.left_node_id",
- secondaryjoin="Node.id==node_to_node.c.right_node_id",
- backref="left_nodes"
+ right_nodes = relationship(
+ "Node",
+ secondary="node_to_node",
+ primaryjoin="Node.id==node_to_node.c.left_node_id",
+ secondaryjoin="Node.id==node_to_node.c.right_node_id",
+ backref="left_nodes",
)
.. warning:: When passed as a Python-evaluable string, the
metadata_obj = MetaData()
mapper_registry = registry()
- node_to_node = Table("node_to_node", metadata_obj,
+ node_to_node = Table(
+ "node_to_node",
+ metadata_obj,
Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True),
- Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True)
+ Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True),
)
- node = Table("node", metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('label', String)
+ node = Table(
+ "node",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("label", String),
)
+
+
class Node:
pass
- mapper_registry.map_imperatively(Node, node, properties={
- 'right_nodes':relationship(Node,
- secondary=node_to_node,
- primaryjoin=node.c.id==node_to_node.c.left_node_id,
- secondaryjoin=node.c.id==node_to_node.c.right_node_id,
- backref="left_nodes"
- )})
+ mapper_registry.map_imperatively(
+ Node,
+ node,
+ properties={
+ "right_nodes": relationship(
+ Node,
+ secondary=node_to_node,
+ primaryjoin=node.c.id == node_to_node.c.left_node_id,
+ secondaryjoin=node.c.id == node_to_node.c.right_node_id,
+ backref="left_nodes",
+ )
+ },
+ )
Note that in both examples, the :paramref:`_orm.relationship.backref`
keyword specifies a ``left_nodes`` backref - when
join condition (requires version 0.9.2 at least to function as is)::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = mapped_column(Integer, primary_key=True)
- b_id = mapped_column(ForeignKey('b.id'))
+ b_id = mapped_column(ForeignKey("b.id"))
+
+ d = relationship(
+ "D",
+ secondary="join(B, D, B.d_id == D.id)." "join(C, C.d_id == D.id)",
+ primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)",
+ secondaryjoin="D.id == B.d_id",
+ uselist=False,
+ viewonly=True,
+ )
- d = relationship("D",
- secondary="join(B, D, B.d_id == D.id)."
- "join(C, C.d_id == D.id)",
- primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)",
- secondaryjoin="D.id == B.d_id",
- uselist=False,
- viewonly=True
- )
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = mapped_column(Integer, primary_key=True)
- d_id = mapped_column(ForeignKey('d.id'))
+ d_id = mapped_column(ForeignKey("d.id"))
+
class C(Base):
- __tablename__ = 'c'
+ __tablename__ = "c"
id = mapped_column(Integer, primary_key=True)
- a_id = mapped_column(ForeignKey('a.id'))
- d_id = mapped_column(ForeignKey('d.id'))
+ a_id = mapped_column(ForeignKey("a.id"))
+ d_id = mapped_column(ForeignKey("d.id"))
+
class D(Base):
- __tablename__ = 'd'
+ __tablename__ = "d"
id = mapped_column(Integer, primary_key=True)
the rows in both ``A`` and ``B`` simultaneously::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = mapped_column(Integer, primary_key=True)
- b_id = mapped_column(ForeignKey('b.id'))
+ b_id = mapped_column(ForeignKey("b.id"))
+
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = mapped_column(Integer, primary_key=True)
+
class C(Base):
- __tablename__ = 'c'
+ __tablename__ = "c"
id = mapped_column(Integer, primary_key=True)
- a_id = mapped_column(ForeignKey('a.id'))
+ a_id = mapped_column(ForeignKey("a.id"))
some_c_value = mapped_column(String)
+
class D(Base):
- __tablename__ = 'd'
+ __tablename__ = "d"
id = mapped_column(Integer, primary_key=True)
- c_id = mapped_column(ForeignKey('c.id'))
- b_id = mapped_column(ForeignKey('b.id'))
+ c_id = mapped_column(ForeignKey("c.id"))
+ b_id = mapped_column(ForeignKey("b.id"))
some_d_value = mapped_column(String)
+
# 1. set up the join() as a variable, so we can refer
# to it in the mapping multiple times.
j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id)
ten items for each collection::
class A(Base):
- __tablename__ = 'a'
+ __tablename__ = "a"
id = mapped_column(Integer, primary_key=True)
class B(Base):
- __tablename__ = 'b'
+ __tablename__ = "b"
id = mapped_column(Integer, primary_key=True)
a_id = mapped_column(ForeignKey("a.id"))
+
partition = select(
- B,
- func.row_number().over(
- order_by=B.id, partition_by=B.a_id
- ).label('index')
+ B, func.row_number().over(order_by=B.id, partition_by=B.a_id).label("index")
).alias()
partitioned_b = aliased(B, partition)
A.partitioned_bs = relationship(
- partitioned_b,
- primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10)
+ partitioned_b, primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10)
)
We can use the above ``partitioned_bs`` relationship with most of the loader
.. sourcecode:: python
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
@property
.. seealso::
- :ref:`mapper_hybrids`
\ No newline at end of file
+ :ref:`mapper_hybrids`
:orphan:
-Moved! :doc:`/orm/loading_relationships`
\ No newline at end of file
+Moved! :doc:`/orm/loading_relationships`
from sqlalchemy.orm import validates
+
class EmailAddress(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
id = mapped_column(Integer, primary_key=True)
email = mapped_column(String)
- @validates('email')
+ @validates("email")
def validate_email(self, key, address):
- if '@' not in address:
+ if "@" not in address:
raise ValueError("failed simple email validation")
return address
-
Validators also receive collection append events, when items are added to a
collection::
from sqlalchemy.orm import validates
+
class User(Base):
# ...
addresses = relationship("Address")
- @validates('addresses')
+ @validates("addresses")
def validate_address(self, key, address):
- if '@' not in address.email:
+ if "@" not in address.email:
raise ValueError("failed simplified email validation")
return address
-
The validation function by default does not get emitted for collection
remove events, as the typical expectation is that a value being discarded
doesn't require validation. However, :func:`.validates` supports reception
from sqlalchemy.orm import validates
+
class User(Base):
# ...
addresses = relationship("Address")
- @validates('addresses', include_removes=True)
+ @validates("addresses", include_removes=True)
def validate_address(self, key, address, is_remove):
if is_remove:
- raise ValueError(
- "not allowed to remove items from the collection")
+ raise ValueError("not allowed to remove items from the collection")
else:
- if '@' not in address.email:
+ if "@" not in address.email:
raise ValueError("failed simplified email validation")
return address
from sqlalchemy.orm import validates
+
class User(Base):
# ...
- addresses = relationship("Address", backref='user')
+ addresses = relationship("Address", backref="user")
- @validates('addresses', include_backrefs=False)
+ @validates("addresses", include_backrefs=False)
def validate_address(self, key, address):
- if '@' not in address:
+ if "@" not in address:
raise ValueError("failed simplified email validation")
return address
different name. Below we illustrate this using Python 2.6-style properties::
class EmailAddress(Base):
- __tablename__ = 'email_address'
+ __tablename__ = "email_address"
id = mapped_column(Integer, primary_key=True)
from sqlalchemy.ext.hybrid import hybrid_property
+
class EmailAddress(Base):
- __tablename__ = 'email_address'
+ __tablename__ = "email_address"
id = mapped_column(Integer, primary_key=True)
logic::
class EmailAddress(Base):
- __tablename__ = 'email_address'
+ __tablename__ = "email_address"
id = mapped_column(Integer, primary_key=True)
from sqlalchemy.orm import synonym
+
class MyClass(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = mapped_column(Integer, primary_key=True)
job_status = mapped_column(String(50))
``.status`` that will behave as one attribute, both at the expression
level::
- >>> print(MyClass.job_status == 'some_status')
+ >>> print(MyClass.job_status == "some_status")
my_table.job_status = :job_status_1
- >>> print(MyClass.status == 'some_status')
+ >>> print(MyClass.status == "some_status")
my_table.job_status = :job_status_1
and at the instance level::
- >>> m1 = MyClass(status='x')
+ >>> m1 = MyClass(status="x")
>>> m1.status, m1.job_status
('x', 'x')
- >>> m1.job_status = 'y'
+ >>> m1.job_status = "y"
>>> m1.status, m1.job_status
('y', 'y')
``status`` synonym with a ``@property``::
class MyClass(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = mapped_column(Integer, primary_key=True)
status = mapped_column(String(50))
from sqlalchemy.ext.declarative import synonym_for
+
class MyClass(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = mapped_column(Integer, primary_key=True)
status = mapped_column(String(50))
from sqlalchemy.ext.hybrid import hybrid_property
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
firstname = mapped_column(String(50))
lastname = mapped_column(String(50))
as well as usable within queries::
- some_user = session.scalars(select(User).where(User.fullname == "John Smith").limit(1)).first()
-
+ some_user = session.scalars(
+ select(User).where(User.fullname == "John Smith").limit(1)
+ ).first()
The string concatenation example is a simple one, where the Python expression
can be dual purposed at the instance and class level. Often, the SQL expression
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.sql import case
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
firstname = mapped_column(String(50))
lastname = mapped_column(String(50))
@fullname.expression
def fullname(cls):
- return case([
- (cls.firstname != None, cls.firstname + " " + cls.lastname),
- ], else_ = cls.lastname)
+ return case(
+ [
+ (cls.firstname != None, cls.firstname + " " + cls.lastname),
+ ],
+ else_=cls.lastname,
+ )
.. _mapper_column_property_sql_expressions:
from sqlalchemy.orm import column_property
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
firstname = mapped_column(String(50))
lastname = mapped_column(String(50))
from sqlalchemy.orm import DeclarativeBase
+
class Base(DeclarativeBase):
pass
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
id = mapped_column(Integer, primary_key=True)
- user_id = mapped_column(Integer, ForeignKey('user.id'))
+ user_id = mapped_column(Integer, ForeignKey("user.id"))
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
address_count = column_property(
- select(func.count(Address.id)).
- where(Address.user_id==id).
- correlate_except(Address).
- scalar_subquery()
+ select(func.count(Address.id))
+ .where(Address.user_id == id)
+ .correlate_except(Address)
+ .scalar_subquery()
)
In the above example, we define a :func:`_expression.ScalarSelect` construct like the following::
stmt = (
- select(func.count(Address.id)).
- where(Address.user_id==id).
- correlate_except(Address).
- scalar_subquery()
+ select(func.count(Address.id))
+ .where(Address.user_id == id)
+ .correlate_except(Address)
+ .scalar_subquery()
)
Above, we first use :func:`_sql.select` to create a :class:`_sql.Select`
from sqlalchemy import and_
+
class Author(Base):
# ...
book_count = column_property(
- select(func.count(books.c.id)
- ).where(
+ select(func.count(books.c.id))
+ .where(
and_(
- book_authors.c.author_id==authors.c.id,
- book_authors.c.book_id==books.c.id
+ book_authors.c.author_id == authors.c.id,
+ book_authors.c.book_id == books.c.id,
)
- ).scalar_subquery()
+ )
+ .scalar_subquery()
)
-
Adding column_property() to an existing Declarative mapped class
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# only works if a declarative base class is in use
User.address_count = column_property(
- select(func.count(Address.id)).
- where(Address.user_id==User.id).
- scalar_subquery()
+ select(func.count(Address.id)).where(Address.user_id == User.id).scalar_subquery()
)
When using mapping styles that don't use Declarative base classes
reg = registry()
+
@reg.mapped
class User:
- __tablename__ = 'user'
+ __tablename__ = "user"
# ... additional mapping directives
# works for any kind of mapping
from sqlalchemy import inspect
+
inspect(User).add_property(
column_property(
- select(func.count(Address.id)).
- where(Address.user_id==User.id).
- scalar_subquery()
+ select(func.count(Address.id))
+ .where(Address.user_id == User.id)
+ .scalar_subquery()
)
)
class File(Base):
- __tablename__ = 'file'
+ __tablename__ = "file"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String(64))
extension = mapped_column(String(8))
- filename = column_property(name + '.' + extension)
- path = column_property('C:/' + filename.expression)
+ filename = column_property(name + "." + extension)
+ path = column_property("C:/" + filename.expression)
When the ``File`` class is used in expressions normally, the attributes
assigned to ``filename`` and ``path`` are usable directly. The use of the
:attr:`.ColumnProperty.expression` attribute is only necessary when using
the :class:`.ColumnProperty` directly within the mapping definition::
- stmt = select(File.path).where(File.filename == 'foo.txt')
+ stmt = select(File.path).where(File.filename == "foo.txt")
Using Column Deferral with ``column_property()``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
from sqlalchemy.orm import deferred
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
firstname: Mapped[str] = mapped_column()
from sqlalchemy.orm import object_session
from sqlalchemy import select, func
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
firstname = mapped_column(String(50))
lastname = mapped_column(String(50))
@property
def address_count(self):
- return object_session(self).\
- scalar(
- select(func.count(Address.id)).\
- where(Address.user_id==self.id)
- )
+ return object_session(self).scalar(
+ select(func.count(Address.id)).where(Address.user_id == self.id)
+ )
The plain descriptor approach is useful as a last resort, but is less performant
in the usual case than both the hybrid and column property approaches, in that
.. toctree::
:hidden:
- scalar_mapping
\ No newline at end of file
+ scalar_mapping
class Base(DeclarativeBase):
pass
+
# an example mapping using the base
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
mapper_registry = registry()
user_table = Table(
- 'user',
+ "user",
mapper_registry.metadata,
- Column('id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('fullname', String(50)),
- Column('nickname', String(12))
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50)),
+ Column("fullname", String(50)),
+ Column("nickname", String(12)),
)
+
class User:
pass
- mapper_registry.map_imperatively(User, user_table)
+ mapper_registry.map_imperatively(User, user_table)
Information about mapped attributes, such as relationships to other classes, are provided
via the ``properties`` dictionary. The example below illustrates a second :class:`_schema.Table`
object, mapped to a class called ``Address``, then linked to ``User`` via :func:`_orm.relationship`::
- address = Table('address', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey('user.id')),
- Column('email_address', String(50))
- )
+ address = Table(
+ "address",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("user.id")),
+ Column("email_address", String(50)),
+ )
- mapper_registry.map_imperatively(User, user, properties={
- 'addresses' : relationship(Address, backref='user', order_by=address.c.id)
- })
+ mapper_registry.map_imperatively(
+ User,
+ user,
+ properties={
+ "addresses": relationship(Address, backref="user", order_by=address.c.id)
+ },
+ )
mapper_registry.map_imperatively(Address, address)
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
+
class Base(DeclarativeBase):
pass
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
An object of type ``User`` above will have a constructor which allows
``User`` objects to be created as::
- u1 = User(name='some name', fullname='some fullname')
+ u1 = User(name="some name", fullname="some fullname")
.. tip::
mapper_registry = registry()
user_table = Table(
- 'user',
+ "user",
mapper_registry.metadata,
- Column('id', Integer, primary_key=True),
- Column('name', String(50))
+ Column("id", Integer, primary_key=True),
+ Column("name", String(50)),
)
+
class User:
pass
+
mapper_registry.map_imperatively(User, user_table)
The above class, mapped imperatively as described at :ref:`orm_imperative_mapping`,
>>> insp.attrs.nickname.value
'nickname'
- >>> u1.nickname = 'new nickname'
+ >>> u1.nickname = "new nickname"
>>> insp.attrs.nickname.history
History(added=['new nickname'], unchanged=(), deleted=['nickname'])
multiple tables, complete with its own composite primary key, which can be
mapped in the same way as a :class:`_schema.Table`::
- from sqlalchemy import Table, Column, Integer, \
- String, MetaData, join, ForeignKey
+ from sqlalchemy import Table, Column, Integer, String, MetaData, join, ForeignKey
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import column_property
metadata_obj = MetaData()
# define two Table objects
- user_table = Table('user', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('name', String),
- )
-
- address_table = Table('address', metadata_obj,
- Column('id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey('user.id')),
- Column('email_address', String)
- )
+ user_table = Table(
+ "user",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("name", String),
+ )
+
+ address_table = Table(
+ "address",
+ metadata_obj,
+ Column("id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("user.id")),
+ Column("email_address", String),
+ )
# define a join between them. This
# takes place across the user.id and address.user_id
# columns.
user_address_join = join(user_table, address_table)
+
class Base(DeclarativeBase):
metadata = metadata_obj
+
# map to it
class AddressUser(Base):
__table__ = user_address_join
from sqlalchemy import select, func
- subq = select(
- func.count(orders.c.id).label('order_count'),
- func.max(orders.c.price).label('highest_order'),
- orders.c.customer_id
- ).group_by(orders.c.customer_id).subquery()
+ subq = (
+ select(
+ func.count(orders.c.id).label("order_count"),
+ func.max(orders.c.price).label("highest_order"),
+ orders.c.customer_id,
+ )
+ .group_by(orders.c.customer_id)
+ .subquery()
+ )
+
+ customer_select = (
+ select(customers, subq)
+ .join_from(customers, subq, customers.c.id == subq.c.customer_id)
+ .subquery()
+ )
- customer_select = select(customers, subq).join_from(
- customers, subq, customers.c.id == subq.c.customer_id
- ).subquery()
class Customer(Base):
__table__ = customer_select
value = mapped_column(Integer)
+
someobject = session.get(SomeClass, 5)
# set 'value' attribute to a SQL expression adding one
session = Session()
# execute a string statement
- result = session.execute("select * from table where id=:id", {'id':7})
+ result = session.execute("select * from table where id=:id", {"id": 7})
# execute a SQL expression construct
- result = session.execute(select(mytable).where(mytable.c.id==7))
+ result = session.execute(select(mytable).where(mytable.c.id == 7))
The current :class:`~sqlalchemy.engine.Connection` held by the
:class:`~sqlalchemy.orm.session.Session` is accessible using the
# need to specify mapper or class when executing
result = session.execute(
text("select * from table where id=:id"),
- {'id':7},
- bind_arguments={'mapper': MyMappedClass}
+ {"id": 7},
+ bind_arguments={"mapper": MyMappedClass},
)
result = session.execute(
- select(mytable).where(mytable.c.id==7),
- bind_arguments={'mapper': MyMappedClass}
+ select(mytable).where(mytable.c.id == 7), bind_arguments={"mapper": MyMappedClass}
)
connection = session.connection(MyMappedClass)
"default" case; the attribute will be omitted from the INSERT statement::
class MyObject(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = mapped_column(Integer, primary_key=True)
data = mapped_column(String(50), nullable=True)
+
obj = MyObject(id=1)
session.add(obj)
session.commit() # INSERT with the 'data' column omitted; the database
- # itself will persist this as the NULL value
+ # itself will persist this as the NULL value
Omitting a column from the INSERT means that the column will
have the NULL value set, *unless* the column has a default set up,
defaults::
class MyObject(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = mapped_column(Integer, primary_key=True)
data = mapped_column(String(50), nullable=True, server_default="default")
+
obj = MyObject(id=1)
session.add(obj)
session.commit() # INSERT with the 'data' column omitted; the database
- # itself will persist this as the value 'default'
+ # itself will persist this as the value 'default'
However, in the ORM, even if one assigns the Python value ``None`` explicitly
to the object, this is treated the **same** as though the value were never
assigned::
class MyObject(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = mapped_column(Integer, primary_key=True)
data = mapped_column(String(50), nullable=True, server_default="default")
+
obj = MyObject(id=1, data=None)
session.add(obj)
session.commit() # INSERT with the 'data' column explicitly set to None;
- # the ORM still omits it from the statement and the
- # database will still persist this as the value 'default'
+ # the ORM still omits it from the statement and the
+ # database will still persist this as the value 'default'
The above operation will persist into the ``data`` column the
server default value of ``"default"`` and not SQL NULL, even though ``None``
obj = MyObject(id=1, data=null())
session.add(obj)
session.commit() # INSERT with the 'data' column explicitly set as null();
- # the ORM uses this directly, bypassing all client-
- # and server-side defaults, and the database will
- # persist this as the NULL value
+ # the ORM uses this directly, bypassing all client-
+ # and server-side defaults, and the database will
+ # persist this as the NULL value
The :obj:`_expression.null` SQL construct always translates into the SQL
NULL value being directly present in the target INSERT statement.
value and pass it through, rather than omitting it as a "missing" value::
class MyObject(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = mapped_column(Integer, primary_key=True)
data = mapped_column(
- String(50).evaluates_none(), # indicate that None should always be passed
- nullable=True, server_default="default")
+ String(50).evaluates_none(), # indicate that None should always be passed
+ nullable=True,
+ server_default="default",
+ )
+
obj = MyObject(id=1, data=None)
session.add(obj)
session.commit() # INSERT with the 'data' column explicitly set to None;
- # the ORM uses this directly, bypassing all client-
- # and server-side defaults, and the database will
- # persist this as the NULL value
+ # the ORM uses this directly, bypassing all client-
+ # and server-side defaults, and the database will
+ # persist this as the NULL value
.. topic:: Evaluating None
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = mapped_column(Integer, primary_key=True)
timestamp = mapped_column(DateTime(), server_default=func.now())
:paramref:`.orm.mapper.eager_defaults`::
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = mapped_column(Integer, primary_key=True)
timestamp = mapped_column(DateTime(), server_default=func.now())
the :class:`.Sequence` construct::
class MyOracleModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = mapped_column(Integer, Sequence("my_sequence"), primary_key=True)
data = mapped_column(String(50))
SQL Server TIMESTAMP column as the primary key, which generates values automatically::
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
- timestamp = mapped_column(TIMESTAMP(), server_default=FetchedValue(), primary_key=True)
+ timestamp = mapped_column(
+ TIMESTAMP(), server_default=FetchedValue(), primary_key=True
+ )
An INSERT for the above table on SQL Server looks like:
pre-execute-supported default using the "NOW()" SQL function::
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
timestamp = mapped_column(DateTime(), default=func.now(), primary_key=True)
from sqlalchemy import cast, Binary
+
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
timestamp = mapped_column(
- TIMESTAMP(),
- default=cast(func.now(), Binary),
- primary_key=True)
+ TIMESTAMP(), default=cast(func.now(), Binary), primary_key=True
+ )
Above, in addition to selecting the "NOW()" function, we additionally make
use of the :class:`.Binary` datatype in conjunction with :func:`.cast` so that
by passing this as the ``type_`` parameter::
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
timestamp = mapped_column(
DateTime,
- default=func.datetime('now', 'localtime', type_=DateTime),
- primary_key=True)
+ default=func.datetime("now", "localtime", type_=DateTime),
+ primary_key=True,
+ )
The above mapping upon INSERT will look like:
to ensure that the fetch occurs::
class MyModel(Base):
- __tablename__ = 'my_table'
+ __tablename__ = "my_table"
id = mapped_column(Integer, primary_key=True)
- created = mapped_column(DateTime(), default=func.now(), server_default=FetchedValue())
- updated = mapped_column(DateTime(), onupdate=func.now(), server_default=FetchedValue(), server_onupdate=FetchedValue())
+ created = mapped_column(
+ DateTime(), default=func.now(), server_default=FetchedValue()
+ )
+ updated = mapped_column(
+ DateTime(),
+ onupdate=func.now(),
+ server_default=FetchedValue(),
+ server_onupdate=FetchedValue(),
+ )
__mapper_args__ = {"eager_defaults": True}
emit SQL on behalf of a particular kind of mapped class in order to locate
the appropriate source of database connectivity::
- engine1 = create_engine('postgresql+psycopg2://db1')
- engine2 = create_engine('postgresql+psycopg2://db2')
+ engine1 = create_engine("postgresql+psycopg2://db1")
+ engine2 = create_engine("postgresql+psycopg2://db2")
Session = sessionmaker()
# bind User operations to engine 1, Account operations to engine 2
- Session.configure(binds={User:engine1, Account:engine2})
+ Session.configure(binds={User: engine1, Account: engine2})
session = Session()
::
engines = {
- 'leader':create_engine("sqlite:///leader.db"),
- 'other':create_engine("sqlite:///other.db"),
- 'follower1':create_engine("sqlite:///follower1.db"),
- 'follower2':create_engine("sqlite:///follower2.db"),
+ "leader": create_engine("sqlite:///leader.db"),
+ "other": create_engine("sqlite:///other.db"),
+ "follower1": create_engine("sqlite:///follower1.db"),
+ "follower2": create_engine("sqlite:///follower2.db"),
}
from sqlalchemy.sql import Update, Delete
from sqlalchemy.orm import Session, sessionmaker
import random
+
class RoutingSession(Session):
def get_bind(self, mapper=None, clause=None):
if mapper and issubclass(mapper.class_, MyOtherClass):
- return engines['other']
+ return engines["other"]
elif self._flushing or isinstance(clause, (Update, Delete)):
- return engines['leader']
+ return engines["leader"]
else:
- return engines[
- random.choice(['follower1','follower2'])
- ]
+ return engines[random.choice(["follower1", "follower2"])]
The above :class:`.Session` class is plugged in using the ``class_``
argument to :class:`.sessionmaker`::
method, making direct use of :class:`_dml.Insert` and :class:`_dml.Update`
constructs. See the document at :doc:`queryguide/dml` for documentation,
including :ref:`orm_queryguide_legacy_bulk` which illustrates migration
- from the older methods to the new methods.
\ No newline at end of file
+ from the older methods to the new methods.
>>>
>>> class Base(DeclarativeBase):
... pass
- ...
>>> class User(Base):
... __tablename__ = "user_account"
... id: Mapped[int] = mapped_column(primary_key=True)
... name: Mapped[str]
... fullname: Mapped[Optional[str]]
... books: Mapped[List["Book"]] = relationship(back_populates="owner")
+ ...
... def __repr__(self) -> str:
... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
- ...
>>> class Book(Base):
... __tablename__ = "book"
... id: Mapped[int] = mapped_column(primary_key=True)
... summary: Mapped[str] = mapped_column(Text)
... cover_photo: Mapped[bytes] = mapped_column(LargeBinary)
... owner: Mapped["User"] = relationship(back_populates="books")
+ ...
... def __repr__(self) -> str:
... return f"Book(id={self.id!r}, title={self.title!r})"
- ...
- ...
>>> engine = create_engine("sqlite+pysqlite:///:memory:", echo=True)
>>> Base.metadata.create_all(engine)
BEGIN ...
... name="spongebob",
... fullname="Spongebob Squarepants",
... books=[
- ... Book(title="100 Years of Krabby Patties", summary="some long summary", cover_photo=b'binary_image_data'),
- ... Book(title="Sea Catch 22", summary="another long summary", cover_photo=b'binary_image_data'),
- ... Book(title="The Sea Grapes of Wrath", summary="yet another summary", cover_photo=b'binary_image_data'),
+ ... Book(
+ ... title="100 Years of Krabby Patties",
+ ... summary="some long summary",
+ ... cover_photo=b"binary_image_data",
+ ... ),
+ ... Book(
+ ... title="Sea Catch 22",
+ ... summary="another long summary",
+ ... cover_photo=b"binary_image_data",
+ ... ),
+ ... Book(
+ ... title="The Sea Grapes of Wrath",
+ ... summary="yet another summary",
+ ... cover_photo=b"binary_image_data",
+ ... ),
... ],
... ),
... User(
... name="sandy",
... fullname="Sandy Cheeks",
... books=[
- ... Book(title="A Nut Like No Other", summary="some long summary", cover_photo=b'binary_image_data'),
- ... Book(title="Geodesic Domes: A Retrospective", summary="another long summary", cover_photo=b'binary_image_data'),
- ... Book(title="Rocketry for Squirrels", summary="yet another summary", cover_photo=b'binary_image_data'),
+ ... Book(
+ ... title="A Nut Like No Other",
+ ... summary="some long summary",
+ ... cover_photo=b"binary_image_data",
+ ... ),
+ ... Book(
+ ... title="Geodesic Domes: A Retrospective",
+ ... summary="another long summary",
+ ... cover_photo=b"binary_image_data",
+ ... ),
+ ... Book(
+ ... title="Rocketry for Squirrels",
+ ... summary="yet another summary",
+ ... cover_photo=b"binary_image_data",
+ ... ),
... ],
... ),
... ]
>>>
>>> class Base(DeclarativeBase):
... pass
- ...
>>> class User(Base):
... __tablename__ = "user_account"
... id: Mapped[int] = mapped_column(primary_key=True)
... fullname: Mapped[Optional[str]]
... species: Mapped[Optional[str]]
... addresses: Mapped[List["Address"]] = relationship(back_populates="user")
+ ...
... def __repr__(self) -> str:
... return f"User(name={self.name!r}, fullname={self.fullname!r})"
- ...
>>> class Address(Base):
... __tablename__ = "address"
... id: Mapped[int] = mapped_column(primary_key=True)
... user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"))
... email_address: Mapped[str]
... user: Mapped[User] = relationship(back_populates="addresses")
+ ...
... def __repr__(self) -> str:
... return f"Address(email_address={self.email_address!r})"
- ...
>>> class LogRecord(Base):
... __tablename__ = "log_record"
... id: Mapped[int] = mapped_column(primary_key=True)
... message: Mapped[str]
... code: Mapped[str]
... timestamp: Mapped[datetime.datetime]
+ ...
... def __repr__(self):
... return f"LogRecord({self.message!r}, {self.code!r}, {self.timestamp!r})"
... id: Mapped[int] = mapped_column(primary_key=True)
... name: Mapped[str]
... type: Mapped[str]
+ ...
... def __repr__(self):
... return f"{self.__class__.__name__}({self.name!r})"
+ ...
... __mapper_args__ = {
... "polymorphic_identity": "employee",
... "polymorphic_on": "type",
... }
- ...
>>> class Manager(Employee):
... __tablename__ = "manager"
- ... id: Mapped[int] = mapped_column(
- ... ForeignKey("employee.id"), primary_key=True
- ... )
+ ... id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True)
... manager_name: Mapped[str]
+ ...
... def __repr__(self):
... return f"{self.__class__.__name__}({self.name!r}, manager_name={self.manager_name!r})"
+ ...
... __mapper_args__ = {
... "polymorphic_identity": "manager",
... }
- ...
>>> class Engineer(Employee):
... __tablename__ = "engineer"
- ... id: Mapped[int] = mapped_column(
- ... ForeignKey("employee.id"), primary_key=True
- ... )
+ ... id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True)
... engineer_info: Mapped[str]
+ ...
... def __repr__(self):
... return f"{self.__class__.__name__}({self.name!r}, engineer_info={self.engineer_info!r})"
+ ...
... __mapper_args__ = {
... "polymorphic_identity": "engineer",
... }
- ...
>>> engine = create_engine("sqlite+pysqlite:///:memory:", echo=True)
>>> Base.metadata.create_all(engine)
>>> session.close()
>>> conn.close()
- ...
>>>
>>> class Base(DeclarativeBase):
... pass
- ...
>>> class Company(Base):
... __tablename__ = "company"
... id: Mapped[int] = mapped_column(primary_key=True)
... name: Mapped[str]
... employees: Mapped[list["Employee"]] = relationship(back_populates="company")
- ...
>>>
>>> class Employee(Base):
... __tablename__ = "employee"
... type: Mapped[str]
... company_id: Mapped[int] = mapped_column(ForeignKey("company.id"))
... company: Mapped[Company] = relationship(back_populates="employees")
+ ...
... def __repr__(self):
... return f"{self.__class__.__name__}({self.name!r})"
+ ...
... __mapper_args__ = {
... "polymorphic_identity": "employee",
... "polymorphic_on": "type",
... }
- ...
>>>
>>> class Manager(Employee):
... __tablename__ = "manager"
- ... id: Mapped[int] = mapped_column(
- ... ForeignKey("employee.id"), primary_key=True
- ... )
+ ... id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True)
... manager_name: Mapped[str]
... paperwork: Mapped[list["Paperwork"]] = relationship()
... __mapper_args__ = {
... "polymorphic_identity": "manager",
... }
- ...
>>> class Paperwork(Base):
... __tablename__ = "paperwork"
- ... id: Mapped[int] = mapped_column(
- ... primary_key=True
- ... )
- ... manager_id: Mapped[int] = mapped_column(ForeignKey('manager.id'))
+ ... id: Mapped[int] = mapped_column(primary_key=True)
+ ... manager_id: Mapped[int] = mapped_column(ForeignKey("manager.id"))
... document_name: Mapped[str]
+ ...
... def __repr__(self):
... return f"Paperwork({self.document_name!r})"
- ...
>>>
>>> class Engineer(Employee):
... __tablename__ = "engineer"
- ... id: Mapped[int] = mapped_column(
- ... ForeignKey("employee.id"), primary_key=True
- ... )
+ ... id: Mapped[int] = mapped_column(ForeignKey("employee.id"), primary_key=True)
... engineer_info: Mapped[str]
... __mapper_args__ = {
... "polymorphic_identity": "engineer",
... }
- ...
>>>
>>> engine = create_engine("sqlite://", echo=True)
>>>
>>> from sqlalchemy.orm import Session
>>> session = Session(conn)
>>> session.add(
- ... Company(
- ... name="Krusty Krab",
- ... employees=[
- ... Manager(
- ... name="Mr. Krabs", manager_name="Eugene H. Krabs",
- ... paperwork=[
- ... Paperwork(document_name="Secret Recipes"),
- ... Paperwork(document_name="Krabby Patty Orders"),
- ... ]
- ... ),
- ... Engineer(
- ... name="SpongeBob", engineer_info="Krabby Patty Master"
- ... ),
- ... Engineer(name="Squidward", engineer_info="Senior Customer Engagement Engineer"),
- ... ],
- ... )
+ ... Company(
+ ... name="Krusty Krab",
+ ... employees=[
+ ... Manager(
+ ... name="Mr. Krabs",
+ ... manager_name="Eugene H. Krabs",
+ ... paperwork=[
+ ... Paperwork(document_name="Secret Recipes"),
+ ... Paperwork(document_name="Krabby Patty Orders"),
+ ... ],
+ ... ),
+ ... Engineer(name="SpongeBob", engineer_info="Krabby Patty Master"),
+ ... Engineer(
+ ... name="Squidward",
+ ... engineer_info="Senior Customer Engagement Engineer",
+ ... ),
+ ... ],
... )
+ ... )
>>> session.commit()
- ...
BEGIN ...
>>>
>>> class Base(DeclarativeBase):
... pass
- ...
>>> class User(Base):
... __tablename__ = "user_account"
... id: Mapped[int] = mapped_column(primary_key=True)
... fullname: Mapped[Optional[str]]
... addresses: Mapped[List["Address"]] = relationship(back_populates="user")
... orders: Mapped[List["Order"]] = relationship()
+ ...
... def __repr__(self) -> str:
... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
- ...
>>> class Address(Base):
... __tablename__ = "address"
... id: Mapped[int] = mapped_column(primary_key=True)
... user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"))
... email_address: Mapped[str]
... user: Mapped[User] = relationship(back_populates="addresses")
+ ...
... def __repr__(self) -> str:
... return f"Address(id={self.id!r}, email_address={self.email_address!r})"
- ...
>>> order_items_table = Table(
... "order_items",
... Base.metadata,
... id: Mapped[int] = mapped_column(primary_key=True)
... user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"))
... items: Mapped[List["Item"]] = relationship(secondary=order_items_table)
- ...
>>> class Item(Base):
... __tablename__ = "item"
... id: Mapped[int] = mapped_column(primary_key=True)
... name: Mapped[str]
... description: Mapped[str]
- ...
>>> engine = create_engine("sqlite+pysqlite:///:memory:", echo=True)
>>> Base.metadata.create_all(engine)
BEGIN ...
>>>
>>> class Base(DeclarativeBase):
... pass
- ...
>>> class Employee(Base):
... __tablename__ = "employee"
... id: Mapped[int] = mapped_column(primary_key=True)
... name: Mapped[str]
... type: Mapped[str]
+ ...
... def __repr__(self):
... return f"{self.__class__.__name__}({self.name!r})"
+ ...
... __mapper_args__ = {
... "polymorphic_identity": "employee",
... "polymorphic_on": "type",
... }
- ...
>>> class Manager(Employee):
... manager_name: Mapped[str] = mapped_column(nullable=True)
... __mapper_args__ = {
... "polymorphic_identity": "manager",
... }
- ...
>>> class Engineer(Employee):
... engineer_info: Mapped[str] = mapped_column(nullable=True)
... __mapper_args__ = {
... "polymorphic_identity": "engineer",
... }
- ...
>>>
>>> engine = create_engine("sqlite://", echo=True)
>>>
>>> from sqlalchemy.orm import Session
>>> session = Session(conn)
>>> session.add_all(
- ... [
- ... Manager(
- ... name="Mr. Krabs", manager_name="Eugene H. Krabs",
- ... ),
- ... Engineer(
- ... name="SpongeBob", engineer_info="Krabby Patty Master"
- ... ),
- ... Engineer(name="Squidward", engineer_info="Senior Customer Engagement Engineer"),
- ... ],
- ... )
+ ... [
+ ... Manager(
+ ... name="Mr. Krabs",
+ ... manager_name="Eugene H. Krabs",
+ ... ),
+ ... Engineer(name="SpongeBob", engineer_info="Krabby Patty Master"),
+ ... Engineer(
+ ... name="Squidward",
+ ... engineer_info="Senior Customer Engagement Engineer",
+ ... ),
+ ... ],
+ ... )
>>> session.commit()
- ...
BEGIN ...
returned is a list of dictionaries::
>>> from pprint import pprint
- >>> user_alias = aliased(User, name='user2')
+ >>> user_alias = aliased(User, name="user2")
>>> stmt = select(User, User.id, user_alias)
>>> pprint(stmt.column_descriptions)
[{'aliased': False,
``Book``, we would make use of two separate options::
>>> stmt = (
- ... select(User, Book).
- ... join_from(User, Book).
- ... options(load_only(User.name), load_only(Book.title))
+ ... select(User, Book)
+ ... .join_from(User, Book)
+ ... .options(load_only(User.name), load_only(Book.title))
... )
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, book.id AS id_1, book.title
>>> from sqlalchemy.orm import selectinload
>>> stmt = select(User).options(selectinload(User.books).load_only(Book.title))
>>> for user in session.scalars(stmt):
- ... print(f"{user.fullname} {[b.title for b in user.books]}")
+ ... print(f"{user.fullname} {[b.title for b in user.books]}")
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
[...] ()
>>> from sqlalchemy.orm import defaultload
>>> stmt = select(User).options(defaultload(User.books).load_only(Book.title))
>>> for user in session.scalars(stmt):
- ... print(f"{user.fullname} {[b.title for b in user.books]}")
+ ... print(f"{user.fullname} {[b.title for b in user.books]}")
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
[...] ()
access::
>>> book = session.scalar(
- ... select(Book).
- ... options(defer(Book.cover_photo, raiseload=True)).
- ... where(Book.id == 4)
+ ... select(Book).options(defer(Book.cover_photo, raiseload=True)).where(Book.id == 4)
... )
{opensql}SELECT book.id, book.owner_id, book.title, book.summary
FROM book
>>> session.expunge_all()
>>> book = session.scalar(
- ... select(Book).
- ... options(load_only(Book.title, raiseload=True)).
- ... where(Book.id == 5)
+ ... select(Book).options(load_only(Book.title, raiseload=True)).where(Book.id == 5)
... )
{opensql}SELECT book.id, book.title
FROM book
>>> class Base(DeclarativeBase):
... pass
- ...
The functionality of :func:`_orm.defer` is available as a default behavior for
mapped columns, as may be appropriate for columns that should not be loaded
... title: Mapped[str]
... summary: Mapped[str] = mapped_column(Text, deferred=True)
... cover_photo: Mapped[bytes] = mapped_column(LargeBinary, deferred=True)
+ ...
... def __repr__(self) -> str:
... return f"Book(id={self.id!r}, title={self.title!r})"
Using the above mapping, queries against ``Book`` will automatically not
include the ``summary`` and ``cover_photo`` columns::
- >>> book = session.scalar(
- ... select(Book).where(Book.id == 2)
- ... )
+ >>> book = session.scalar(select(Book).where(Book.id == 2))
{opensql}SELECT book.id, book.owner_id, book.title
FROM book
WHERE book.id = ?
mapper_registry = registry()
book_table = Table(
- 'book',
+ "book",
mapper_registry.metadata,
- Column('id', Integer, primary_key=True),
- Column('title', String(50)),
- Column('summary', Text),
- Column('cover_image', Blob)
+ Column("id", Integer, primary_key=True),
+ Column("title", String(50)),
+ Column("summary", Text),
+ Column("cover_image", Blob),
)
+
class Book:
pass
+
mapper_registry.map_imperatively(
Book,
book_table,
properties={
"summary": deferred(book_table.c.summary),
"cover_image": deferred(book_table.c.cover_image),
- }
+ },
)
:func:`_orm.deferred` may also be used in place of :func:`_orm.column_property`
from sqlalchemy.orm import deferred
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
firstname: Mapped[str] = mapped_column()
as deferred::
>>> from sqlalchemy.orm import undefer
- >>> book = session.scalar(
- ... select(Book).where(Book.id == 2).options(undefer(Book.summary))
- ... )
+ >>> book = session.scalar(select(Book).where(Book.id == 2).options(undefer(Book.summary)))
{opensql}SELECT book.summary, book.id, book.owner_id, book.title
FROM book
WHERE book.id = ?
>>> class Base(DeclarativeBase):
... pass
- ...
Normally when a column is mapped with ``mapped_column(deferred=True)``, when
the deferred attribute is accessed on an object, SQL will be emitted to load
... id: Mapped[int] = mapped_column(primary_key=True)
... owner_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"))
... title: Mapped[str]
- ... summary: Mapped[str] = mapped_column(Text, deferred=True, deferred_group="book_attrs")
- ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary, deferred=True, deferred_group="book_attrs")
+ ... summary: Mapped[str] = mapped_column(
+ ... Text, deferred=True, deferred_group="book_attrs"
+ ... )
+ ... cover_photo: Mapped[bytes] = mapped_column(
+ ... LargeBinary, deferred=True, deferred_group="book_attrs"
+ ... )
+ ...
... def __repr__(self) -> str:
... return f"Book(id={self.id!r}, title={self.title!r})"
Using the above mapping, accessing either ``summary`` or ``cover_photo``
will load both columns at once using just one SELECT statement::
- >>> book = session.scalar(
- ... select(Book).where(Book.id == 2)
- ... )
+ >>> book = session.scalar(select(Book).where(Book.id == 2))
{opensql}SELECT book.id, book.owner_id, book.title
FROM book
WHERE book.id = ?
>>> from sqlalchemy.orm import undefer_group
>>> book = session.scalar(
- ... select(Book).where(Book.id == 2).options(undefer_group("book_attrs"))
+ ... select(Book).where(Book.id == 2).options(undefer_group("book_attrs"))
... )
{opensql}SELECT book.summary, book.cover_photo, book.id, book.owner_id, book.title
FROM book
columns can be undeferred at once, without using a group name, by indicating
a wildcard::
- >>> book = session.scalar(
- ... select(Book).where(Book.id == 3).options(undefer("*"))
- ... )
+ >>> book = session.scalar(select(Book).where(Book.id == 3).options(undefer("*")))
{opensql}SELECT book.summary, book.cover_photo, book.id, book.owner_id, book.title
FROM book
WHERE book.id = ?
>>> class Base(DeclarativeBase):
... pass
- ...
The "raiseload" behavior first introduced at :ref:`orm_queryguide_deferred_raiseload` may
also be applied as a default mapper-level behavior, using the
... owner_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"))
... title: Mapped[str]
... summary: Mapped[str] = mapped_column(Text, deferred=True, deferred_raiseload=True)
- ... cover_photo: Mapped[bytes] = mapped_column(LargeBinary, deferred=True, deferred_raiseload=True)
+ ... cover_photo: Mapped[bytes] = mapped_column(
+ ... LargeBinary, deferred=True, deferred_raiseload=True
+ ... )
+ ...
... def __repr__(self) -> str:
... return f"Book(id={self.id!r}, title={self.title!r})"
Using the above mapping, the ``.summary`` and ``.cover_photo`` columns are
by default not loadable::
- >>> book = session.scalar(
- ... select(Book).where(Book.id == 2)
- ... )
+ >>> book = session.scalar(select(Book).where(Book.id == 2))
{opensql}SELECT book.id, book.owner_id, book.title
FROM book
WHERE book.id = ?
:ref:`orm_queryguide_populate_existing` to refresh the already-loaded object's loader options::
>>> book = session.scalar(
- ... select(Book).
- ... where(Book.id == 2).
- ... options(undefer('*')).
- ... execution_options(populate_existing=True)
+ ... select(Book)
+ ... .where(Book.id == 2)
+ ... .options(undefer("*"))
+ ... .execution_options(populate_existing=True)
... )
{opensql}SELECT book.summary, book.cover_photo, book.id, book.owner_id, book.title
FROM book
>>> class Base(DeclarativeBase):
... pass
- ...
>>> class User(Base):
... __tablename__ = "user_account"
... id: Mapped[int] = mapped_column(primary_key=True)
... name: Mapped[str]
... fullname: Mapped[Optional[str]]
... books: Mapped[List["Book"]] = relationship(back_populates="owner")
+ ...
... def __repr__(self) -> str:
... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
- ...
>>> class Book(Base):
... __tablename__ = "book"
... id: Mapped[int] = mapped_column(primary_key=True)
... summary: Mapped[str] = mapped_column(Text)
... cover_photo: Mapped[bytes] = mapped_column(LargeBinary)
... owner: Mapped["User"] = relationship(back_populates="books")
+ ...
... def __repr__(self) -> str:
... return f"Book(id={self.id!r}, title={self.title!r})"
entries, one for ``User`` and one for ``func.count(Book.id)``::
>>> from sqlalchemy import func
- >>> stmt = (
- ... select(User, func.count(Book.id)).
- ... join_from(User, Book).
- ... group_by(Book.owner_id)
- ... )
+ >>> stmt = select(User, func.count(Book.id)).join_from(User, Book).group_by(Book.owner_id)
>>> for user, book_count in session.execute(stmt):
- ... print(f"Username: {user.name} Number of books: {book_count}")
+ ... print(f"Username: {user.name} Number of books: {book_count}")
{opensql}SELECT user_account.id, user_account.name, user_account.fullname,
count(book.id) AS count_1
FROM user_account JOIN book ON user_account.id = book.owner_id
>>> class Base(DeclarativeBase):
... pass
- ...
>>> class Book(Base):
... __tablename__ = "book"
... id: Mapped[int] = mapped_column(primary_key=True)
... title: Mapped[str]
... summary: Mapped[str] = mapped_column(Text)
... cover_photo: Mapped[bytes] = mapped_column(LargeBinary)
+ ...
... def __repr__(self) -> str:
... return f"Book(id={self.id!r}, title={self.title!r})"
... name: Mapped[str]
... fullname: Mapped[Optional[str]]
... book_count: Mapped[int] = query_expression()
+ ...
... def __repr__(self) -> str:
... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
- ...
With the ``User.book_count`` attribute configured in our mapping, we may populate
it with data from a SQL expression using the
>>> from sqlalchemy.orm import with_expression
>>> stmt = (
- ... select(User).
- ... join_from(User, Book).
- ... group_by(Book.owner_id).
- ... options(with_expression(User.book_count, func.count(Book.id)))
+ ... select(User)
+ ... .join_from(User, Book)
+ ... .group_by(Book.owner_id)
+ ... .options(with_expression(User.book_count, func.count(Book.id)))
... )
>>> for user in session.scalars(stmt):
- ... print(f"Username: {user.name} Number of books: {user.book_count}")
+ ... print(f"Username: {user.name} Number of books: {user.book_count}")
{opensql}SELECT count(book.id) AS count_1, user_account.id, user_account.name,
user_account.fullname
FROM user_account JOIN book ON user_account.id = book.owner_id
# load the same A with an option; expression will **not** be applied
# to the already-loaded object
- obj = session.scalars(
- select(A).options(with_expression(A.expr, some_expr))
- ).first()
+ obj = session.scalars(select(A).options(with_expression(A.expr, some_expr))).first()
To ensure the attribute is re-loaded on an existing object, use the
:ref:`orm_queryguide_populate_existing` execution option to ensure
.. sourcecode:: python
obj = session.scalars(
- select(A).
- options(with_expression(A.expr, some_expr)).
- execution_options(populate_existing=True)
+ select(A)
+ .options(with_expression(A.expr, some_expr))
+ .execution_options(populate_existing=True)
).first()
* The :func:`_orm.with_expression` SQL expression **is lost when when the object is
.. sourcecode:: python
# can't refer to A.expr elsewhere in the query
- stmt = select(A).options(
- with_expression(A.expr, A.x + A.y)
- ).filter(A.expr > 5).order_by(A.expr)
+ stmt = (
+ select(A)
+ .options(with_expression(A.expr, A.x + A.y))
+ .filter(A.expr > 5)
+ .order_by(A.expr)
+ )
The ``A.expr`` expression will resolve to NULL in the above WHERE clause
and ORDER BY clause. To use the expression throughout the query, assign to a
# assign desired expression up front, then refer to that in
# the query
a_expr = A.x + A.y
- stmt = select(A).options(
- with_expression(A.expr, a_expr)
- ).filter(a_expr > 5).order_by(a_expr)
+ stmt = (
+ select(A)
+ .options(with_expression(A.expr, a_expr))
+ .filter(a_expr > 5)
+ .order_by(a_expr)
+ )
.. seealso::
>>> session.close()
>>> conn.close()
- ROLLBACK...
\ No newline at end of file
+ ROLLBACK...
>>> session.execute(
... insert(User),
... [
- ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
- ... {"name": "sandy", "fullname": "Sandy Cheeks"},
- ... {"name": "patrick", "fullname": "Patrick Star"},
- ... {"name": "squidward", "fullname": "Squidward Tentacles"},
- ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
- ... ]
+ ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
+ ... {"name": "sandy", "fullname": "Sandy Cheeks"},
+ ... {"name": "patrick", "fullname": "Patrick Star"},
+ ... {"name": "squidward", "fullname": "Squidward Tentacles"},
+ ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
+ ... ],
... )
{opensql}INSERT INTO user_account (name, fullname) VALUES (?, ?)
[...] [('spongebob', 'Spongebob Squarepants'), ('sandy', 'Sandy Cheeks'), ('patrick', 'Patrick Star'),
.. Setup code, not for display
- >>> session.rollback(); session.connection()
+ >>> session.rollback()
ROLLBACK...
+ >>> session.connection()
+ BEGIN (implicit)...
The bulk ORM insert feature supports INSERT..RETURNING for selected
backends, which can return a :class:`.Result` object that may yield individual
>>> users = session.scalars(
... insert(User).returning(User),
... [
- ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
- ... {"name": "sandy", "fullname": "Sandy Cheeks"},
- ... {"name": "patrick", "fullname": "Patrick Star"},
- ... {"name": "squidward", "fullname": "Squidward Tentacles"},
- ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
- ... ]
+ ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
+ ... {"name": "sandy", "fullname": "Sandy Cheeks"},
+ ... {"name": "patrick", "fullname": "Patrick Star"},
+ ... {"name": "squidward", "fullname": "Squidward Tentacles"},
+ ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
+ ... ],
... )
{opensql}INSERT INTO user_account (name, fullname)
VALUES (?, ?), (?, ?), (?, ?), (?, ?), (?, ?) RETURNING id, name, fullname, species
.. Setup code, not for display
- >>> session.rollback(); session.connection()
+ >>> session.rollback()
ROLLBACK...
+ >>> session.connection()
+ BEGIN (implicit)...
The ORM bulk insert feature supports lists of parameter dictionaries that are
"heterogenous", which basically means "individual dictionaries can have different
>>> users = session.scalars(
... insert(User).returning(User),
... [
- ... {"name": "spongebob", "fullname": "Spongebob Squarepants", "species": "Sea Sponge"},
- ... {"name": "sandy", "fullname": "Sandy Cheeks", "species": "Squirrel"},
- ... {"name": "patrick", "species": "Starfish"},
- ... {"name": "squidward", "fullname": "Squidward Tentacles", "species": "Squid"},
- ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs", "species": "Crab"},
- ... ]
+ ... {
+ ... "name": "spongebob",
+ ... "fullname": "Spongebob Squarepants",
+ ... "species": "Sea Sponge",
+ ... },
+ ... {"name": "sandy", "fullname": "Sandy Cheeks", "species": "Squirrel"},
+ ... {"name": "patrick", "species": "Starfish"},
+ ... {
+ ... "name": "squidward",
+ ... "fullname": "Squidward Tentacles",
+ ... "species": "Squid",
+ ... },
+ ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs", "species": "Crab"},
+ ... ],
... )
{opensql}INSERT INTO user_account (name, fullname, species) VALUES (?, ?, ?), (?, ?, ?) RETURNING id, name, fullname, species
[... (insertmanyvalues)] ('spongebob', 'Spongebob Squarepants', 'Sea Sponge', 'sandy', 'Sandy Cheeks', 'Squirrel')
.. Setup code, not for display
- >>> session.rollback(); session.connection()
+ >>> session.rollback()
ROLLBACK
+ >>> session.connection()
BEGIN...
ORM bulk insert builds upon the internal system that is used by the
>>> managers = session.scalars(
... insert(Manager).returning(Manager),
... [
- ... {"name": "sandy", "manager_name": "Sandy Cheeks"},
- ... {"name": "ehkrabs", "manager_name": "Eugene H. Krabs"},
- ... ]
+ ... {"name": "sandy", "manager_name": "Sandy Cheeks"},
+ ... {"name": "ehkrabs", "manager_name": "Eugene H. Krabs"},
+ ... ],
... )
{opensql}INSERT INTO employee (name, type) VALUES (?, ?), (?, ?) RETURNING id, name, type
[... (insertmanyvalues)] ('sandy', 'manager', 'ehkrabs', 'manager')
import datetime
+
class LogRecord(Base):
__tablename__ = "log_record"
id: Mapped[int] = mapped_column(primary_key=True)
>>> from sqlalchemy import func
>>> log_record_result = session.scalars(
... insert(LogRecord).values(code="SQLA", timestamp=func.now()).returning(LogRecord),
- ... [
- ... {"message": "log message #1"},
- ... {"message": "log message #2"},
- ... {"message": "log message #3"},
- ... {"message": "log message #4"},
- ... ]
+ ... [
+ ... {"message": "log message #1"},
+ ... {"message": "log message #2"},
+ ... {"message": "log message #3"},
+ ... {"message": "log message #4"},
+ ... ],
... )
{opensql}INSERT INTO log_record (message, code, timestamp)
VALUES (?, ?, CURRENT_TIMESTAMP), (?, ?, CURRENT_TIMESTAMP), (?, ?, CURRENT_TIMESTAMP),
>>> session.execute(
... insert(User),
... [
- ... {"name": "spongebob", "fullname": "Spongebob Squarepants", "species": "Sea Sponge"},
- ... {"name": "sandy", "fullname": "Sandy Cheeks", "species": "Squirrel"},
- ... {"name": "patrick", "species": "Starfish"},
- ... {"name": "squidward", "fullname": "Squidward Tentacles", "species": "Squid"},
- ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs", "species": "Crab"},
- ... ]
+ ... {
+ ... "name": "spongebob",
+ ... "fullname": "Spongebob Squarepants",
+ ... "species": "Sea Sponge",
+ ... },
+ ... {"name": "sandy", "fullname": "Sandy Cheeks", "species": "Squirrel"},
+ ... {"name": "patrick", "species": "Starfish"},
+ ... {
+ ... "name": "squidward",
+ ... "fullname": "Squidward Tentacles",
+ ... "species": "Squid",
+ ... },
+ ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs", "species": "Crab"},
+ ... ],
... )
BEGIN...
>>> from sqlalchemy import select
>>> address_result = session.scalars(
- ... insert(Address).values(
- ... [
- ... {
- ... "user_id": select(User.id).where(User.name == 'sandy'),
- ... "email_address": "sandy@company.com"
- ... },
- ... {
- ... "user_id": select(User.id).where(User.name == 'spongebob'),
- ... "email_address": "spongebob@company.com"
- ... },
- ... {
- ... "user_id": select(User.id).where(User.name == 'patrick'),
- ... "email_address": "patrick@company.com"
- ... },
- ... ]
- ... ).returning(Address),
+ ... insert(Address)
+ ... .values(
+ ... [
+ ... {
+ ... "user_id": select(User.id).where(User.name == "sandy"),
+ ... "email_address": "sandy@company.com",
+ ... },
+ ... {
+ ... "user_id": select(User.id).where(User.name == "spongebob"),
+ ... "email_address": "spongebob@company.com",
+ ... },
+ ... {
+ ... "user_id": select(User.id).where(User.name == "patrick"),
+ ... "email_address": "patrick@company.com",
+ ... },
+ ... ]
+ ... )
+ ... .returning(Address),
... )
{opensql}INSERT INTO address (user_id, email_address) VALUES
((SELECT user_account.id
Code which makes use of :meth:`.Session.bulk_insert_mappings` for example
can port code as follows, starting with this mappings example::
- session.bulk_insert_mappings(
- User,
- [{"name": "u1"}, {"name": "u2"}, {"name": "u3"}]
- )
+ session.bulk_insert_mappings(User, [{"name": "u1"}, {"name": "u2"}, {"name": "u3"}])
The above is expressed using the new API as::
from sqlalchemy import insert
- session.execute(
- insert(User),
- [{"name": "u1"}, {"name": "u2"}, {"name": "u3"}]
- )
+
+ session.execute(insert(User), [{"name": "u1"}, {"name": "u2"}, {"name": "u3"}])
.. seealso::
.. Setup code, not for display
- >>> session.rollback();
+ >>> session.rollback()
ROLLBACK
- >>> session.execute(insert(User).values(
- ... [
- ... dict(name="sandy"),
- ... dict(name="spongebob", fullname="Spongebob Squarepants"),
- ... ]
- ... ))
+ >>> session.execute(
+ ... insert(User).values(
+ ... [
+ ... dict(name="sandy"),
+ ... dict(name="spongebob", fullname="Spongebob Squarepants"),
+ ... ]
+ ... )
+ ... )
BEGIN...
::
>>> from sqlalchemy.dialects.sqlite import insert as sqlite_upsert
>>> stmt = sqlite_upsert(User).values(
... [
- ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
- ... {"name": "sandy", "fullname": "Sandy Cheeks"},
- ... {"name": "patrick", "fullname": "Patrick Star"},
- ... {"name": "squidward", "fullname": "Squidward Tentacles"},
- ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
- ... ]
+ ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
+ ... {"name": "sandy", "fullname": "Sandy Cheeks"},
+ ... {"name": "patrick", "fullname": "Patrick Star"},
+ ... {"name": "squidward", "fullname": "Squidward Tentacles"},
+ ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
+ ... ]
... )
>>> stmt = stmt.on_conflict_do_update(
- ... index_elements=[User.name],
- ... set_=dict(fullname=stmt.excluded.fullname)
+ ... index_elements=[User.name], set_=dict(fullname=stmt.excluded.fullname)
... )
>>> session.execute(stmt)
{opensql}INSERT INTO user_account (name, fullname)
relevant ORM entity class may be passed. Continuing from the
example in the previous section::
- >>> result = session.scalars(stmt.returning(User), execution_options={"populate_existing": True})
+ >>> result = session.scalars(
+ ... stmt.returning(User), execution_options={"populate_existing": True}
+ ... )
{opensql}INSERT INTO user_account (name, fullname)
VALUES (?, ?), (?, ?), (?, ?), (?, ?), (?, ?)
ON CONFLICT (name) DO UPDATE SET fullname = excluded.fullname
.. Setup code, not for display
- >>> session.rollback();
+ >>> session.rollback()
ROLLBACK
>>> session.execute(
... insert(User),
... [
- ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
- ... {"name": "sandy", "fullname": "Sandy Cheeks"},
- ... {"name": "patrick", "fullname": "Patrick Star"},
- ... {"name": "squidward", "fullname": "Squidward Tentacles"},
- ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
- ... ]
+ ... {"name": "spongebob", "fullname": "Spongebob Squarepants"},
+ ... {"name": "sandy", "fullname": "Sandy Cheeks"},
+ ... {"name": "patrick", "fullname": "Patrick Star"},
+ ... {"name": "squidward", "fullname": "Squidward Tentacles"},
+ ... {"name": "ehkrabs", "fullname": "Eugene H. Krabs"},
+ ... ],
... )
BEGIN ...
- >>> session.commit(); session.connection()
+ >>> session.commit()
COMMIT...
+ >>> session.connection()
+ BEGIN ...
The :class:`_dml.Update` construct may be used with
:meth:`_orm.Session.execute` in a similar way as the :class:`_dml.Insert`
>>> session.execute(
... update(User),
... [
- ... {"id": 1, "fullname": "Spongebob Squarepants"},
- ... {"id": 3, "fullname": "Patrick Star"},
- ... {"id": 5, "fullname": "Eugene H. Krabs"},
- ... ]
+ ... {"id": 1, "fullname": "Spongebob Squarepants"},
+ ... {"id": 3, "fullname": "Patrick Star"},
+ ... {"id": 5, "fullname": "Eugene H. Krabs"},
+ ... ],
... )
{opensql}UPDATE user_account SET fullname=? WHERE user_account.id = ?
[...] [('Spongebob Squarepants', 1), ('Patrick Star', 3), ('Eugene H. Krabs', 5)]
>>> session.execute(
... insert(Manager).returning(Manager),
... [
- ... {"name": "sandy", "manager_name": "Sandy Cheeks"},
- ... {"name": "ehkrabs", "manager_name": "Eugene H. Krabs"},
- ... ]
- ... ); session.commit(); session.connection()
+ ... {"name": "sandy", "manager_name": "Sandy Cheeks"},
+ ... {"name": "ehkrabs", "manager_name": "Eugene H. Krabs"},
+ ... ],
+ ... )
INSERT...
+ >>> session.commit()
+ COMMIT...
+ >>> session.connection()
+ BEGIN (implicit)...
ORM bulk update has similar behavior to ORM bulk insert when using mappings
with joined table inheritance; as described at
>>> session.execute(
... update(Manager),
... [
- ... {"id": 1, "name": "scheeks", "manager_name": "Sandy Cheeks, President"},
- ... {"id": 2, "name": "eugene", "manager_name": "Eugene H. Krabs, VP Marketing"},
- ... ]
+ ... {
+ ... "id": 1,
+ ... "name": "scheeks",
+ ... "manager_name": "Sandy Cheeks, President",
+ ... },
+ ... {
+ ... "id": 2,
+ ... "name": "eugene",
+ ... "manager_name": "Eugene H. Krabs, VP Marketing",
+ ... },
+ ... ],
... )
{opensql}UPDATE employee SET name=? WHERE employee.id = ?
[...] [('scheeks', 1), ('eugene', 2)]
The example below::
session.bulk_update_mappings(
- User,
- [
- {"id": 1, "name": "scheeks", "manager_name": "Sandy Cheeks, President"},
- {"id": 2, "name": "eugene", "manager_name": "Eugene H. Krabs, VP Marketing"},
- ]
+ User,
+ [
+ {"id": 1, "name": "scheeks", "manager_name": "Sandy Cheeks, President"},
+ {"id": 2, "name": "eugene", "manager_name": "Eugene H. Krabs, VP Marketing"},
+ ],
)
Is expressed using the new API as::
from sqlalchemy import update
+
session.execute(
update(User),
[
{"id": 1, "name": "scheeks", "manager_name": "Sandy Cheeks, President"},
{"id": 2, "name": "eugene", "manager_name": "Eugene H. Krabs, VP Marketing"},
- ]
+ ],
)
.. seealso::
.. Setup code, not for display
- >>> session.rollback(); session.connection()
+ >>> session.rollback()
ROLLBACK...
+ >>> session.connection()
+ BEGIN (implicit)...
The :class:`_dml.Update` and :class:`_dml.Delete` constructs, when constructed
with custom WHERE criteria (that is, using the :meth:`_dml.Update.where` and
::
>>> from sqlalchemy import update
- >>> stmt = update(User).where(User.name.in_(["squidward", "sandy"])).values(fullname="Name starts with S")
+ >>> stmt = (
+ ... update(User)
+ ... .where(User.name.in_(["squidward", "sandy"]))
+ ... .values(fullname="Name starts with S")
+ ... )
>>> session.execute(stmt)
{opensql}UPDATE user_account SET fullname=? WHERE user_account.name IN (?, ?)
[...] ('Name starts with S', 'squidward', 'sandy')
.. Setup code, not for display
- >>> session.rollback(); session.connection()
+ >>> session.rollback()
ROLLBACK...
+ >>> session.connection()
+ BEGIN (implicit)...
.. _orm_queryguide_update_delete_sync:
>>> from sqlalchemy import update
>>> stmt = (
- ... update(User).
- ... where(User.name == "squidward").
- ... values(fullname="Squidward Tentacles")
+ ... update(User).where(User.name == "squidward").values(fullname="Squidward Tentacles")
... )
>>> session.execute(stmt, execution_options={"synchronize_session": False})
{opensql}UPDATE user_account SET fullname=? WHERE user_account.name = ?
>>> from sqlalchemy import update
>>> stmt = (
- ... update(User).
- ... where(User.name == "squidward").
- ... values(fullname="Squidward Tentacles").
- ... execution_options(synchronize_session=False)
+ ... update(User)
+ ... .where(User.name == "squidward")
+ ... .values(fullname="Squidward Tentacles")
+ ... .execution_options(synchronize_session=False)
... )
>>> session.execute(stmt)
{opensql}UPDATE user_account SET fullname=? WHERE user_account.name = ?
>>> from sqlalchemy import update
>>> stmt = (
- ... update(User).
- ... where(User.name == "squidward").
- ... values(fullname="Squidward Tentacles").
- ... returning(User)
+ ... update(User)
+ ... .where(User.name == "squidward")
+ ... .values(fullname="Squidward Tentacles")
+ ... .returning(User)
... )
>>> result = session.scalars(stmt)
{opensql}UPDATE user_account SET fullname=? WHERE user_account.name = ?
.. Setup code, not for display
- >>> session.rollback(); session.connection()
+ >>> session.rollback()
ROLLBACK...
+ >>> session.connection()
+ BEGIN (implicit)...
The UPDATE/DELETE with WHERE criteria feature, unlike the
:ref:`orm_queryguide_bulk_update`, only emits a single UPDATE or DELETE
>>> stmt = (
- ... update(Manager).
- ... where(Manager.id == 1).
- ... values(manager_name="Sandy Cheeks, President")
+ ... update(Manager)
+ ... .where(Manager.id == 1)
+ ... .values(manager_name="Sandy Cheeks, President")
... )
>>> session.execute(stmt)
UPDATE manager SET manager_name=? WHERE manager.id = ?
to locate rows which will work on any SQL backend is so use a subquery::
>>> stmt = (
- ... update(Manager).
- ... where(
- ... Manager.id ==
- ... select(Employee.id).
- ... where(Employee.name == "sandy").scalar_subquery()
- ... ).
- ... values(manager_name="Sandy Cheeks, President")
+ ... update(Manager)
+ ... .where(
+ ... Manager.id
+ ... == select(Employee.id).where(Employee.name == "sandy").scalar_subquery()
+ ... )
+ ... .values(manager_name="Sandy Cheeks, President")
... )
>>> session.execute(stmt)
{opensql}UPDATE manager SET manager_name=? WHERE manager.id = (SELECT employee.id
tables must be stated explicitly in some way::
>>> stmt = (
- ... update(Manager).
- ... where(
- ... Manager.id == Employee.id,
- ... Employee.name == "sandy"
- ... ).
- ... values(manager_name="Sandy Cheeks, President")
+ ... update(Manager)
+ ... .where(Manager.id == Employee.id, Employee.name == "sandy")
+ ... .values(manager_name="Sandy Cheeks, President")
... )
>>> session.execute(stmt)
{opensql}UPDATE manager SET manager_name=? FROM employee
.. Setup code, not for display
- >>> session.close(); conn.close()
- ROLLBACK
+ >>> session.close()
+ ROLLBACK...
+ >>> conn.close()
>>> from sqlalchemy.orm import selectinload
>>> from sqlalchemy.orm import selectin_polymorphic
- >>> stmt = select(Employee).order_by(Employee.id).options(
- ... selectin_polymorphic(Employee, [Manager, Engineer]),
- ... selectinload(Manager.paperwork)
+ >>> stmt = (
+ ... select(Employee)
+ ... .order_by(Employee.id)
+ ... .options(
+ ... selectin_polymorphic(Employee, [Manager, Engineer]),
+ ... selectinload(Manager.paperwork),
+ ... )
... )
{opensql}>>> objects = session.scalars(stmt).all()
BEGIN (implicit)
the previous loader option (in this case :func:`_orm.selectinload`), so
we only indicate the additional target subclasses we wish to load::
- >>> stmt = (
- ... select(Company).
- ... options(selectinload(Company.employees).selectin_polymorphic([Manager, Engineer]))
+ >>> stmt = select(Company).options(
+ ... selectinload(Company.employees).selectin_polymorphic([Manager, Engineer])
... )
>>> for company in session.scalars(stmt):
- ... print(f"company: {company.name}")
- ... print(f"employees: {company.employees}")
+ ... print(f"company: {company.name}")
+ ... print(f"employees: {company.employees}")
{opensql}SELECT company.id, company.name
FROM company
[...] ()
.. sourcecode:: python
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String(50))
type = mapped_column(String(50))
- __mapper_args__ = {
- 'polymorphic_identity': 'employee',
- 'polymorphic_on': type
- }
+ __mapper_args__ = {"polymorphic_identity": "employee", "polymorphic_on": type}
+
class Engineer(Employee):
- __tablename__ = 'engineer'
- id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "engineer"
+ id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True)
engineer_info = mapped_column(String(30))
__mapper_args__ = {
- 'polymorphic_load': 'selectin',
- 'polymorphic_identity': 'engineer',
+ "polymorphic_load": "selectin",
+ "polymorphic_identity": "engineer",
}
+
class Manager(Employee):
- __tablename__ = 'manager'
- id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "manager"
+ id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True)
manager_name = mapped_column(String(30))
__mapper_args__ = {
- 'polymorphic_load': 'selectin',
- 'polymorphic_identity': 'manager',
+ "polymorphic_load": "selectin",
+ "polymorphic_identity": "manager",
}
With the above mapping, SELECT statements against the ``Employee`` class will
>>> from sqlalchemy import or_
>>> employee_poly = with_polymorphic(Employee, [Engineer, Manager])
>>> stmt = (
- ... select(employee_poly).
- ... where(
- ... or_(
- ... employee_poly.Manager.manager_name == "Eugene H. Krabs",
- ... employee_poly.Engineer.engineer_info == "Senior Customer Engagement Engineer"
- ... )
- ... ).
- ... order_by(employee_poly.id)
+ ... select(employee_poly)
+ ... .where(
+ ... or_(
+ ... employee_poly.Manager.manager_name == "Eugene H. Krabs",
+ ... employee_poly.Engineer.engineer_info
+ ... == "Senior Customer Engagement Engineer",
+ ... )
+ ... )
+ ... .order_by(employee_poly.id)
... )
>>> objects = session.scalars(stmt).all()
{opensql}SELECT employee.id, employee.name, employee.type, employee.company_id, manager.id AS id_1,
>>> manager_employee = with_polymorphic(Employee, [Manager], aliased=True, flat=True)
>>> engineer_employee = with_polymorphic(Employee, [Engineer], aliased=True, flat=True)
>>> stmt = (
- ... select(manager_employee, engineer_employee).
- ... join(
+ ... select(manager_employee, engineer_employee)
+ ... .join(
... engineer_employee,
... engineer_employee.company_id == manager_employee.company_id,
- ... ).
- ... where(
+ ... )
+ ... .where(
... or_(
... manager_employee.name == "Mr. Krabs",
- ... manager_employee.Manager.manager_name == "Eugene H. Krabs"
+ ... manager_employee.Manager.manager_name == "Eugene H. Krabs",
... )
- ... ).
- ... order_by(engineer_employee.name, manager_employee.name)
+ ... )
+ ... .order_by(engineer_employee.name, manager_employee.name)
... )
>>> for manager, engineer in session.execute(stmt):
... print(f"{manager} {engineer}")
>>> manager_employee = with_polymorphic(Employee, [Manager], aliased=True)
>>> engineer_employee = with_polymorphic(Employee, [Engineer], aliased=True)
>>> stmt = (
- ... select(manager_employee, engineer_employee).
- ... join(
+ ... select(manager_employee, engineer_employee)
+ ... .join(
... engineer_employee,
... engineer_employee.company_id == manager_employee.company_id,
- ... ).
- ... where(
+ ... )
+ ... .where(
... or_(
... manager_employee.name == "Mr. Krabs",
- ... manager_employee.Manager.manager_name == "Eugene H. Krabs"
+ ... manager_employee.Manager.manager_name == "Eugene H. Krabs",
... )
- ... ).
- ... order_by(engineer_employee.name, manager_employee.name)
+ ... )
+ ... .order_by(engineer_employee.name, manager_employee.name)
... )
>>> print(stmt)
{opensql}SELECT anon_1.employee_id, anon_1.employee_name, anon_1.employee_type,
.. sourcecode:: python
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String(50))
type = mapped_column(String(50))
- __mapper_args__ = {
- 'polymorphic_identity': 'employee',
- 'polymorphic_on': type
- }
+ __mapper_args__ = {"polymorphic_identity": "employee", "polymorphic_on": type}
+
class Engineer(Employee):
- __tablename__ = 'engineer'
- id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "engineer"
+ id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True)
engineer_info = mapped_column(String(30))
__mapper_args__ = {
- 'polymorphic_load': 'inline',
- 'polymorphic_identity': 'engineer',
+ "polymorphic_load": "inline",
+ "polymorphic_identity": "engineer",
}
+
class Manager(Employee):
- __tablename__ = 'manager'
- id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "manager"
+ id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True)
manager_name = mapped_column(String(30))
__mapper_args__ = {
- 'polymorphic_load': 'inline',
- 'polymorphic_identity': 'manager',
+ "polymorphic_load": "inline",
+ "polymorphic_identity": "manager",
}
With the above mapping, SELECT statements against the ``Employee`` class will
.. sourcecode:: python
class Employee(Base):
- __tablename__ = 'employee'
+ __tablename__ = "employee"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String(50))
type = mapped_column(String(50))
__mapper_args__ = {
- 'polymorphic_identity': 'employee',
- 'with_polymorphic': '*',
- 'polymorphic_on': type
+ "polymorphic_identity": "employee",
+ "with_polymorphic": "*",
+ "polymorphic_on": type,
}
+
class Engineer(Employee):
- __tablename__ = 'engineer'
- id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "engineer"
+ id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True)
engineer_info = mapped_column(String(30))
__mapper_args__ = {
- 'polymorphic_identity': 'engineer',
+ "polymorphic_identity": "engineer",
}
+
class Manager(Employee):
- __tablename__ = 'manager'
- id = mapped_column(Integer, ForeignKey('employee.id'), primary_key=True)
+ __tablename__ = "manager"
+ id = mapped_column(Integer, ForeignKey("employee.id"), primary_key=True)
manager_name = mapped_column(String(30))
__mapper_args__ = {
- 'polymorphic_identity': 'manager',
+ "polymorphic_identity": "manager",
}
Overall, the LEFT OUTER JOIN format used by :func:`_orm.with_polymorphic` and
>>> employee_plus_engineer = with_polymorphic(Employee, [Engineer])
>>> stmt = (
- ... select(Company.name, employee_plus_engineer.name).
- ... join(Company.employees.of_type(employee_plus_engineer)).
- ... where(
+ ... select(Company.name, employee_plus_engineer.name)
+ ... .join(Company.employees.of_type(employee_plus_engineer))
+ ... .where(
... or_(
... employee_plus_engineer.name == "SpongeBob",
- ... employee_plus_engineer.Engineer.engineer_info == "Senior Customer Engagement Engineer"
+ ... employee_plus_engineer.Engineer.engineer_info
+ ... == "Senior Customer Engagement Engineer",
... )
... )
... )
query could be written strictly in terms of ``Engineer`` targets as follows::
>>> stmt = (
- ... select(Company.name, Engineer.name).
- ... join(Company.employees.of_type(Engineer)).
- ... where(
+ ... select(Company.name, Engineer.name)
+ ... .join(Company.employees.of_type(Engineer))
+ ... .where(
... or_(
... Engineer.name == "SpongeBob",
- ... Engineer.engineer_info == "Senior Customer Engagement Engineer"
+ ... Engineer.engineer_info == "Senior Customer Engagement Engineer",
... )
... )
... )
eagerly load all elements of ``Company.employees`` using the
:func:`_orm.with_polymorphic` construct against the full hierarchy, we may write::
- >>> all_employees = with_polymorphic(Employee, '*')
- >>> stmt = (
- ... select(Company).
- ... options(selectinload(Company.employees.of_type(all_employees)))
- ... )
+ >>> all_employees = with_polymorphic(Employee, "*")
+ >>> stmt = select(Company).options(selectinload(Company.employees.of_type(all_employees)))
>>> for company in session.scalars(stmt):
- ... print(f"company: {company.name}")
- ... print(f"employees: {company.employees}")
+ ... print(f"company: {company.name}")
+ ... print(f"employees: {company.employees}")
{opensql}SELECT company.id, company.name
FROM company
[...] ()
>>> stmt = select(Employee).order_by(Employee.id)
>>> for obj in session.scalars(stmt):
- ... print(f"{obj}")
+ ... print(f"{obj}")
{opensql}BEGIN (implicit)
SELECT employee.id, employee.name, employee.type
FROM employee ORDER BY employee.id
simply includes the additional columns and from a SQL perspective is more
efficient for single-inheritance mappers::
- >>> employees = with_polymorphic(Employee, '*')
+ >>> employees = with_polymorphic(Employee, "*")
>>> stmt = select(employees).order_by(employees.id)
>>> objects = session.scalars(stmt).all()
{opensql}BEGIN (implicit)
>>> class Base(DeclarativeBase):
... pass
- ...
>>> class Employee(Base):
... __tablename__ = "employee"
... id: Mapped[int] = mapped_column(primary_key=True)
... name: Mapped[str]
... type: Mapped[str]
+ ...
... def __repr__(self):
... return f"{self.__class__.__name__}({self.name!r})"
+ ...
... __mapper_args__ = {
... "polymorphic_identity": "employee",
... "polymorphic_on": "type",
... }
- ...
>>> class Manager(Employee):
... manager_name: Mapped[str] = mapped_column(nullable=True)
... __mapper_args__ = {
... "polymorphic_identity": "manager",
- ... "polymorphic_load": "inline"
+ ... "polymorphic_load": "inline",
... }
- ...
>>> class Engineer(Employee):
... engineer_info: Mapped[str] = mapped_column(nullable=True)
... __mapper_args__ = {
... "polymorphic_identity": "engineer",
- ... "polymorphic_load": "inline"
+ ... "polymorphic_load": "inline",
... }
>>> session.close()
ROLLBACK
- >>> conn.close()
\ No newline at end of file
+ >>> conn.close()
class Base(DeclarativeBase):
pass
+
class Parent(Base):
__tablename__ = "parent"
id: Mapped[int] = mapped_column(primary_key=True)
children: Mapped[list["Child"]] = relationship(lazy="selectin")
+
class Child(Base):
__tablename__ = "child"
from sqlalchemy.orm import joinedload
stmt = select(Parent).options(
- joinedload(Parent.children).
- subqueryload(Child.subelements)
+ joinedload(Parent.children).subqueryload(Child.subelements)
)
Chained loader options can be applied against a "lazy" loaded collection.
from sqlalchemy import select
from sqlalchemy.orm import lazyload
- stmt = select(Parent).options(
- lazyload(Parent.children).
- subqueryload(Child.subelements)
- )
+ stmt = select(Parent).options(lazyload(Parent.children).subqueryload(Child.subelements))
Above, the query will return ``Parent`` objects without the ``children``
collections loaded. When the ``children`` collection on a particular
from sqlalchemy.orm import lazyload
stmt = (
- select(A).
- options(lazyload(A.bs.and_(B.id > 5))).
- execution_options(populate_existing=True)
+ select(A)
+ .options(lazyload(A.bs.and_(B.id > 5)))
+ .execution_options(populate_existing=True)
)
In order to add filtering criteria to all occurrences of an entity throughout
from sqlalchemy import select
from sqlalchemy.orm import defaultload
- stmt = select(A).options(
- defaultload(A.atob).
- joinedload(B.btoc)
- )
+ stmt = select(A).options(defaultload(A.atob).joinedload(B.btoc))
A similar approach can be used to specify multiple sub-options at once, using
the :meth:`_orm.Load.options` method::
from sqlalchemy.orm import joinedload
stmt = select(A).options(
- defaultload(A.atob).options(
- joinedload(B.btoc),
- joinedload(B.btod)
- )
+ defaultload(A.atob).options(joinedload(B.btoc), joinedload(B.btod))
)
-
.. seealso::
:ref:`orm_queryguide_load_only_related` - illustrates examples of combining
upon collections loaded by that specific object for as long as it exists in
memory. For example, given the previous example::
- stmt = select(Parent).options(
- lazyload(Parent.children).
- subqueryload(Child.subelements)
- )
+ stmt = select(Parent).options(lazyload(Parent.children).subqueryload(Child.subelements))
if the ``children`` collection on a particular ``Parent`` object loaded by
the above query is expired (such as when a :class:`.Session` object's
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import raiseload
- stmt = select(Order).options(
- joinedload(Order.items), raiseload('*')
- )
+ stmt = select(Order).options(joinedload(Order.items), raiseload("*"))
The above wildcard will apply to **all** relationships not just on ``Order``
besides ``items``, but all those on the ``Item`` objects as well. To set up
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import Load
- stmt = select(Order).options(
- joinedload(Order.items), Load(Order).raiseload('*')
- )
+ stmt = select(Order).options(joinedload(Order.items), Load(Order).raiseload("*"))
Conversely, to set up the raise for just the ``Item`` objects::
- stmt = select(Order).options(
- joinedload(Order.items).raiseload('*')
- )
-
+ stmt = select(Order).options(joinedload(Order.items).raiseload("*"))
The :func:`.raiseload` option applies only to relationship attributes. For
column-oriented attributes, the :func:`.defer` option supports the
>>> from sqlalchemy import select
>>> from sqlalchemy.orm import joinedload
- >>> stmt = (
- ... select(User).
- ... options(joinedload(User.addresses)).\
- ... filter_by(name='spongebob')
- ... )
+ >>> stmt = select(User).options(joinedload(User.addresses)).filter_by(name="spongebob")
>>> spongebob = session.scalars(stmt).unique().all()
{opensql}SELECT
addresses_1.id AS addresses_1_id,
class Address(Base):
# ...
- user_id: Mapped[int] = mapped_column(ForeignKey('users.id'))
+ user_id: Mapped[int] = mapped_column(ForeignKey("users.id"))
user: Mapped[User] = relationship(lazy="joined", innerjoin=True)
At the query option level, via the :paramref:`_orm.joinedload.innerjoin` flag::
from sqlalchemy import select
from sqlalchemy.orm import joinedload
- stmt = select(Address).options(
- joinedload(Address.user, innerjoin=True)
- )
+ stmt = select(Address).options(joinedload(Address.user, innerjoin=True))
The JOIN will right-nest itself when applied in a chain that includes
an OUTER JOIN:
>>> from sqlalchemy import select
>>> from sqlalchemy.orm import joinedload
>>> stmt = select(User).options(
- ... joinedload(User.addresses).
- ... joinedload(Address.widgets, innerjoin=True)
+ ... joinedload(User.addresses).joinedload(Address.widgets, innerjoin=True)
... )
>>> results = session.scalars(stmt).unique().all()
{opensql}SELECT
>>> from sqlalchemy import select
>>> from sqlalchemy.orm import joinedload
>>> stmt = (
- ... select(User).
- ... options(joinedload(User.addresses)).
- ... filter(User.name == 'spongebob').
- ... order_by(Address.email_address)
+ ... select(User)
+ ... .options(joinedload(User.addresses))
+ ... .filter(User.name == "spongebob")
+ ... .order_by(Address.email_address)
... )
>>> result = session.scalars(stmt).unique().all()
{opensql}SELECT
>>> from sqlalchemy import select
>>> stmt = (
- ... select(User).
- ... join(User.addresses).
- ... filter(User.name == 'spongebob').
- ... order_by(Address.email_address)
+ ... select(User)
+ ... .join(User.addresses)
+ ... .filter(User.name == "spongebob")
+ ... .order_by(Address.email_address)
... )
>>> result = session.scalars(stmt).unique().all()
{opensql}
>>> stmt = (
- ... select(User).
- ... join(User.addresses).
- ... options(joinedload(User.addresses)).
- ... filter(User.name == 'spongebob').
- ... order_by(Address.email_address)
+ ... select(User)
+ ... .join(User.addresses)
+ ... .options(joinedload(User.addresses))
+ ... .filter(User.name == "spongebob")
+ ... .order_by(Address.email_address)
... )
>>> result = session.scalars(stmt).unique().all()
{opensql}SELECT
.. sourcecode:: python+sql
>>> stmt = (
- ... select(User).
- ... join(User.addresses).
- ... options(joinedload(User.addresses)).
- ... filter(User.name=='spongebob').
- ... filter(Address.email_address=='someaddress@foo.com')
+ ... select(User)
+ ... .join(User.addresses)
+ ... .options(joinedload(User.addresses))
+ ... .filter(User.name == "spongebob")
+ ... .filter(Address.email_address == "someaddress@foo.com")
... )
>>> result = session.scalars(stmt).unique().all()
{opensql}SELECT
.. sourcecode:: python+sql
>>> stmt = (
- ... select(User).
- ... join(User.addresses).
- ... options(subqueryload(User.addresses)).
- ... filter(User.name=='spongebob').
- ... filter(Address.email_address=='someaddress@foo.com')
+ ... select(User)
+ ... .join(User.addresses)
+ ... .options(subqueryload(User.addresses))
+ ... .filter(User.name == "spongebob")
+ ... .filter(Address.email_address == "someaddress@foo.com")
... )
>>> result = session.scalars(stmt).all()
{opensql}SELECT
>>> from sqlalchemy import select
>>> from sqlalchemy import selectinload
>>> stmt = (
- ... select(User).
- ... options(selectinload(User.addresses)).
- ... filter(or_(User.name == 'spongebob', User.name == 'ed'))
+ ... select(User)
+ ... .options(selectinload(User.addresses))
+ ... .filter(or_(User.name == "spongebob", User.name == "ed"))
... )
>>> result = session.scalars(stmt).all()
{opensql}SELECT
>>> from sqlalchemy import select
>>> from sqlalchemy.orm import subqueryload
- >>> stmt = (
- ... select(User)
- ... options(subqueryload(User.addresses))
- ... filter_by(name="spongebob")
- ... )
+ >>> stmt = select(User).options(subqueryload(User.addresses)).filter_by(name="spongebob")
>>> results = session.scalars(stmt).all()
{opensql}SELECT
users.id AS users_id,
that the inner query could return the wrong rows::
# incorrect, no ORDER BY
- stmt = select(User).options(
- subqueryload(User.addresses).limit(1)
- )
+ stmt = select(User).options(subqueryload(User.addresses).limit(1))
# incorrect if User.name is not unique
- stmt = select(User).options(
- subqueryload(User.addresses)
- ).order_by(User.name).limit(1)
+ stmt = select(User).options(subqueryload(User.addresses)).order_by(User.name).limit(1)
# correct
- stmt = select(User).options(
- subqueryload(User.addresses)
- ).order_by(User.name, User.id).limit(1)
+ stmt = (
+ select(User)
+ .options(subqueryload(User.addresses))
+ .order_by(User.name, User.id)
+ .limit(1)
+ )
.. seealso::
from sqlalchemy import select
from sqlalchemy.orm import lazyload
- stmt = select(MyClass).options(lazyload('*'))
+ stmt = select(MyClass).options(lazyload("*"))
Above, the ``lazyload('*')`` option will supersede the ``lazy`` setting
of all :func:`_orm.relationship` constructs in use for that query,
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import joinedload
- stmt = select(MyClass).options(
- lazyload('*'),
- joinedload(MyClass.widget)
- )
+ stmt = select(MyClass).options(lazyload("*"), joinedload(MyClass.widget))
If multiple ``'*'`` options are passed, the last one overrides
those previously passed.
from sqlalchemy import select
from sqlalchemy.orm import Load
- stmt = select(User, Address).options(
- Load(Address).lazyload('*')
- )
+ stmt = select(User, Address).options(Load(Address).lazyload("*"))
Above, all relationships on ``Address`` will be set to a lazy load.
and additionally establish this as the basis for eager loading of ``User.addresses``::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
addresses = relationship("Address")
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
# ...
- from sqlalchemy.orm import contains_eager
- stmt = (
- select(User).
- join(User.addresses).
- options(contains_eager(User.addresses))
- )
+ from sqlalchemy.orm import contains_eager
+ stmt = select(User).join(User.addresses).options(contains_eager(User.addresses))
If the "eager" portion of the statement is "aliased", the path
should be specified using :meth:`.PropComparator.of_type`, which allows
to be a full path from the starting entity. For example if we were loading
``Users->orders->Order->items->Item``, the option would be used as::
- stmt = select(User).options(
- contains_eager(User.orders).
- contains_eager(Order.items)
- )
+ stmt = select(User).options(contains_eager(User.orders).contains_eager(Order.items))
Using contains_eager() to load a custom-filtered collection result
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
are overwritten::
stmt = (
- select(User).
- join(User.addresses).
- filter(Address.email_address.like('%@aol.com')).
- options(contains_eager(User.addresses)).
- execution_options(populate_existing=True)
+ select(User)
+ .join(User.addresses)
+ .filter(Address.email_address.like("%@aol.com"))
+ .options(contains_eager(User.addresses))
+ .execution_options(populate_existing=True)
)
The above query will load only ``User`` objects which contain at
statement, such as the :meth:`_sql.Select.where` method illustrated below::
>>> from sqlalchemy import select
- >>> stmt = select(User).where(User.name == 'spongebob')
+ >>> stmt = select(User).where(User.name == "spongebob")
Given a completed :class:`_sql.Select` object, in order to execute it within
the ORM to get rows back, the object is passed to
the result rows for a SELECT against ``User`` and ``Address`` will
refer to them under the names ``User`` and ``Address``::
- >>> stmt = (
- ... select(User, Address).
- ... join(User.addresses).
- ... order_by(User.id, Address.id)
- ... )
+ >>> stmt = select(User, Address).join(User.addresses).order_by(User.id, Address.id)
>>> for row in session.execute(stmt):
- ... print(f"{row.User.name} {row.Address.email_address}")
+ ... print(f"{row.User.name} {row.Address.email_address}")
{opensql}SELECT user_account.id, user_account.name, user_account.fullname,
address.id AS id_1, address.user_id, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
>>> user_cls = aliased(User, name="user_cls")
>>> email_cls = aliased(Address, name="email")
>>> stmt = (
- ... select(user_cls, email_cls).
- ... join(user_cls.addresses.of_type(email_cls)).
- ... order_by(user_cls.id, email_cls.id)
+ ... select(user_cls, email_cls)
+ ... .join(user_cls.addresses.of_type(email_cls))
+ ... .order_by(user_cls.id, email_cls.id)
... )
>>> row = session.execute(stmt).first()
{opensql}SELECT user_cls.id, user_cls.name, user_cls.fullname,
above using this form as well::
>>> stmt = (
- ... select(User).
- ... join(User.addresses).
- ... add_columns(Address).
- ... order_by(User.id, Address.id)
+ ... select(User).join(User.addresses).add_columns(Address).order_by(User.id, Address.id)
... )
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname,
as table columns are used::
>>> result = session.execute(
- ... select(User.name, Address.email_address).
- ... join(User.addresses).
- ... order_by(User.id, Address.id)
+ ... select(User.name, Address.email_address)
+ ... .join(User.addresses)
+ ... .order_by(User.id, Address.id)
... )
{opensql}SELECT user_account.name, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
>>> from sqlalchemy.orm import Bundle
>>> stmt = select(
... Bundle("user", User.name, User.fullname),
- ... Bundle("email", Address.email_address)
+ ... Bundle("email", Address.email_address),
... ).join_from(User, Address)
>>> for row in session.execute(stmt):
... print(f"{row.user.name} {row.user.fullname} {row.email.email_address}")
>>> from sqlalchemy import union_all
>>> u = union_all(
- ... select(User).where(User.id < 2),
- ... select(User).where(User.id == 3)
+ ... select(User).where(User.id < 2), select(User).where(User.id == 3)
... ).order_by(User.id)
>>> stmt = select(User).from_statement(u)
>>> for user_obj in session.execute(stmt).scalars():
and order by criteria based on its exported columns::
>>> subq = union_all(
- ... select(User).where(User.id < 2),
- ... select(User).where(User.id == 3)
+ ... select(User).where(User.id < 2), select(User).where(User.id == 3)
... ).subquery()
>>> user_alias = aliased(User, subq)
>>> stmt = select(user_alias).order_by(user_alias.id)
relationship, it results in two separate JOIN elements, for a total of three
JOIN elements in the resulting SQL::
- >>> stmt = (
- ... select(User).
- ... join(User.orders).
- ... join(Order.items)
- ... )
+ >>> stmt = select(User).join(User.orders).join(Order.items)
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
other elements to join FROM the ``User`` entity above, for example adding
on the ``User.addresses`` relationship to our chain of joins::
- >>> stmt = (
- ... select(User).
- ... join(User.orders).
- ... join(Order.items).
- ... join(User.addresses)
- ... )
+ >>> stmt = select(User).join(User.orders).join(Order.items).join(User.addresses)
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
as the ON clause to be passed explicitly. A example that includes
a SQL expression as the ON clause is as follows::
- >>> stmt = select(User).join(Address, User.id==Address.user_id)
+ >>> stmt = select(User).join(Address, User.id == Address.user_id)
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account JOIN address ON user_account.id = address.user_id
.. sourcecode:: pycon+sql
- >>> stmt = (
- ... select(User.fullname).
- ... join(User.addresses.and_(Address.email_address == 'squirrel@squirrelpower.org'))
+ >>> stmt = select(User.fullname).join(
+ ... User.addresses.and_(Address.email_address == "squirrel@squirrelpower.org")
... )
>>> session.execute(stmt).all()
{opensql}SELECT user_account.fullname
>>> address_alias_1 = aliased(Address)
>>> address_alias_2 = aliased(Address)
>>> stmt = (
- ... select(User).
- ... join(address_alias_1, User.addresses).
- ... where(address_alias_1.email_address == 'patrick@aol.com').
- ... join(address_alias_2, User.addresses).
- ... where(address_alias_2.email_address == 'patrick@gmail.com')
+ ... select(User)
+ ... .join(address_alias_1, User.addresses)
+ ... .where(address_alias_1.email_address == "patrick@aol.com")
+ ... .join(address_alias_2, User.addresses)
+ ... .where(address_alias_2.email_address == "patrick@gmail.com")
... )
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
to produce the same SQL statement as the one just illustrated::
>>> print(
- ... select(User).
- ... join(User.addresses.of_type(address_alias_1)).
- ... where(address_alias_1.email_address == 'patrick@aol.com').
- ... join(User.addresses.of_type(address_alias_2)).
- ... where(address_alias_2.email_address == 'patrick@gmail.com')
- ... )
+ ... select(User)
+ ... .join(User.addresses.of_type(address_alias_1))
+ ... .where(address_alias_1.email_address == "patrick@aol.com")
+ ... .join(User.addresses.of_type(address_alias_2))
+ ... .where(address_alias_2.email_address == "patrick@gmail.com")
+ ... )
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
JOIN address AS address_1 ON user_account.id = address_1.user_id
construct directly::
>>> user_alias_1 = aliased(User)
- >>> print(
- ... select(user_alias_1.name).
- ... join(user_alias_1.addresses)
- ... )
+ >>> print(select(user_alias_1.name).join(user_alias_1.addresses))
{opensql}SELECT user_account_1.name
FROM user_account AS user_account_1
JOIN address ON user_account_1.id = address.user_id
object using :meth:`_sql.Select.subquery`, which may then be used as the
target of the :meth:`_sql.Select.join` method::
- >>> subq = (
- ... select(Address).
- ... where(Address.email_address == 'pat999@aol.com').
- ... subquery()
- ... )
+ >>> subq = select(Address).where(Address.email_address == "pat999@aol.com").subquery()
>>> stmt = select(User).join(subq, User.id == subq.c.user_id)
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
Given for example a subquery that refers to both ``User`` and ``Address``::
>>> user_address_subq = (
- ... select(User.id, User.name, User.fullname, Address.id, Address.email_address).
- ... join_from(User, Address).
- ... where(Address.email_address.in_(['pat999@aol.com', 'squirrel@squirrelpower.org'])).
- ... subquery()
+ ... select(User.id, User.name, User.fullname, Address.id, Address.email_address)
+ ... .join_from(User, Address)
+ ... .where(Address.email_address.in_(["pat999@aol.com", "squirrel@squirrelpower.org"]))
+ ... .subquery()
... )
We can create :func:`_orm.aliased` constructs against both ``User`` and
subquery once, but in a result-row context can return objects of both
``User`` and ``Address`` classes at the same time::
- >>> stmt = select(user_alias, address_alias).where(user_alias.name == 'sandy')
+ >>> stmt = select(user_alias, address_alias).where(user_alias.name == "sandy")
>>> for row in session.execute(stmt):
... print(f"{row.user} {row.address}")
{opensql}SELECT anon_1.id, anon_1.name, anon_1.fullname, anon_1.id_1, anon_1.email_address
:class:`_sql.Select` is not in line with what we want to join from,
the :meth:`_sql.Select.join_from` method may be used::
- >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == 'sandy')
+ >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == "sandy")
>>> print(stmt)
SELECT address.id, address.user_id, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
in the form ``(<join from>, <onclause>)``, or ``(<join from>, <join to>,
[<onclause>])``::
- >>> stmt = select(Address).join_from(User, Address).where(User.name == 'sandy')
+ >>> stmt = select(Address).join_from(User, Address).where(User.name == "sandy")
>>> print(stmt)
SELECT address.id, address.user_id, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
be used::
- >>> stmt = select(Address).select_from(User).join(Address).where(User.name == 'sandy')
+ >>> stmt = select(Address).select_from(User).join(Address).where(User.name == "sandy")
>>> print(stmt)
SELECT address.id, address.user_id, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
such a :class:`_sql.Join` object. Therefore we can see the contents
of :meth:`_sql.Select.select_from` being overridden in a case like this::
- >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == 'sandy')
+ >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == "sandy")
>>> print(stmt)
SELECT address.id, address.user_id, address.email_address
FROM address JOIN user_account ON user_account.id = address.user_id
>>>
>>> j = address_table.join(user_table, user_table.c.id == address_table.c.user_id)
>>> stmt = (
- ... select(address_table).select_from(user_table).select_from(j).
- ... where(user_table.c.name == 'sandy')
+ ... select(address_table)
+ ... .select_from(user_table)
+ ... .select_from(j)
+ ... .where(user_table.c.name == "sandy")
... )
>>> print(stmt)
SELECT address.id, address.user_id, address.email_address
.. sourcecode:: pycon+sql
- >>> stmt = (
- ... select(User.fullname).
- ... where(User.addresses.any(Address.email_address == 'squirrel@squirrelpower.org'))
+ >>> stmt = select(User.fullname).where(
+ ... User.addresses.any(Address.email_address == "squirrel@squirrelpower.org")
... )
>>> session.execute(stmt).all()
{opensql}SELECT user_account.fullname
.. sourcecode:: pycon+sql
- >>> stmt = (
- ... select(User.fullname).
- ... where(~User.addresses.any())
- ... )
+ >>> stmt = select(User.fullname).where(~User.addresses.any())
>>> session.execute(stmt).all()
{opensql}SELECT user_account.fullname
FROM user_account
.. sourcecode:: pycon+sql
- >>> stmt = (
- ... select(Address.email_address).
- ... where(Address.user.has(User.name=="sandy"))
- ... )
+ >>> stmt = select(Address.email_address).where(Address.user.has(User.name == "sandy"))
>>> session.execute(stmt).all()
{opensql}SELECT address.email_address
FROM address
>>> class User(Base):
... __tablename__ = "user_account"
- ...
+ ...
... id: Mapped[int] = mapped_column(primary_key=True)
... name: Mapped[str] = mapped_column(String(30))
... fullname: Mapped[Optional[str]]
- ...
+ ...
... addresses: Mapped[list["Address"]] = relationship(
... back_populates="user", cascade="all, delete-orphan"
... )
- ...
+ ...
... def __repr__(self) -> str:
... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
>>> class Address(Base):
... __tablename__ = "address"
- ...
+ ...
... id: Mapped[int] = mapped_column(primary_key=True)
... email_address: Mapped[str]
... user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id"))
- ...
+ ...
... user: Mapped["User"] = relationship(back_populates="addresses")
- ...
+ ...
... def __repr__(self) -> str:
... return f"Address(id={self.id!r}, email_address={self.email_address!r})"
>>> from sqlalchemy.orm import Session
>>> with Session(engine) as session:
- ...
+ ...
... spongebob = User(
... name="spongebob",
... fullname="Spongebob Squarepants",
... ],
... )
... patrick = User(name="patrick", fullname="Patrick Star")
- ...
+ ...
... session.add_all([spongebob, sandy, patrick])
- ...
+ ...
... session.commit()
{opensql}BEGIN (implicit)
INSERT INTO user_account (name, fullname) VALUES (?, ?), (?, ?), (?, ?) RETURNING id
.. sourcecode:: pycon+sql
>>> stmt = (
- ... select(Address)
- ... .join(Address.user)
- ... .where(User.name == "sandy")
- ... .where(Address.email_address == "sandy@sqlalchemy.org")
+ ... select(Address)
+ ... .join(Address.user)
+ ... .where(User.name == "sandy")
+ ... .where(Address.email_address == "sandy@sqlalchemy.org")
... )
>>> sandy_address = session.scalars(stmt).one()
{opensql}SELECT address.id, address.email_address, address.user_id
[...] ('patrick',)
{stop}
- >>> patrick.addresses.append(
- ... Address(email_address="patrickstar@sqlalchemy.org")
- ... )
+ >>> patrick.addresses.append(Address(email_address="patrickstar@sqlalchemy.org"))
{opensql}SELECT address.id AS address_id, address.email_address AS address_email_address, address.user_id AS address_user_id
FROM address
WHERE ? = address.user_id
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
class Entry(Base):
- __tablename__ = 'entry'
+ __tablename__ = "entry"
entry_id = mapped_column(Integer, primary_key=True)
- widget_id = mapped_column(Integer, ForeignKey('widget.widget_id'))
+ widget_id = mapped_column(Integer, ForeignKey("widget.widget_id"))
name = mapped_column(String(50))
+
class Widget(Base):
- __tablename__ = 'widget'
+ __tablename__ = "widget"
widget_id = mapped_column(Integer, primary_key=True)
- favorite_entry_id = mapped_column(Integer,
- ForeignKey('entry.entry_id',
- name="fk_favorite_entry"))
+ favorite_entry_id = mapped_column(
+ Integer, ForeignKey("entry.entry_id", name="fk_favorite_entry")
+ )
name = mapped_column(String(50))
- entries = relationship(Entry, primaryjoin=
- widget_id==Entry.widget_id)
- favorite_entry = relationship(Entry,
- primaryjoin=
- favorite_entry_id==Entry.entry_id,
- post_update=True)
+ entries = relationship(Entry, primaryjoin=widget_id == Entry.widget_id)
+ favorite_entry = relationship(
+ Entry, primaryjoin=favorite_entry_id == Entry.entry_id, post_update=True
+ )
When a structure against the above configuration is flushed, the "widget" row will be
INSERTed minus the "favorite_entry_id" value, then all the "entry" rows will
.. sourcecode:: pycon+sql
- >>> w1 = Widget(name='somewidget')
- >>> e1 = Entry(name='someentry')
+ >>> w1 = Widget(name="somewidget")
+ >>> e1 = Entry(name="someentry")
>>> w1.favorite_entry = e1
>>> w1.entries = [e1]
>>> session.add_all([w1, e1])
that also refers to this ``Widget``. We can use a composite foreign key,
as illustrated below::
- from sqlalchemy import Integer, ForeignKey, String, \
- UniqueConstraint, ForeignKeyConstraint
+ from sqlalchemy import (
+ Integer,
+ ForeignKey,
+ String,
+ UniqueConstraint,
+ ForeignKeyConstraint,
+ )
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
+
class Base(DeclarativeBase):
pass
+
class Entry(Base):
- __tablename__ = 'entry'
+ __tablename__ = "entry"
entry_id = mapped_column(Integer, primary_key=True)
- widget_id = mapped_column(Integer, ForeignKey('widget.widget_id'))
+ widget_id = mapped_column(Integer, ForeignKey("widget.widget_id"))
name = mapped_column(String(50))
- __table_args__ = (
- UniqueConstraint("entry_id", "widget_id"),
- )
+ __table_args__ = (UniqueConstraint("entry_id", "widget_id"),)
+
class Widget(Base):
- __tablename__ = 'widget'
+ __tablename__ = "widget"
- widget_id = mapped_column(Integer, autoincrement='ignore_fk', primary_key=True)
+ widget_id = mapped_column(Integer, autoincrement="ignore_fk", primary_key=True)
favorite_entry_id = mapped_column(Integer)
name = mapped_column(String(50))
ForeignKeyConstraint(
["widget_id", "favorite_entry_id"],
["entry.widget_id", "entry.entry_id"],
- name="fk_favorite_entry"
+ name="fk_favorite_entry",
),
)
- entries = relationship(Entry, primaryjoin=
- widget_id==Entry.widget_id,
- foreign_keys=Entry.widget_id)
- favorite_entry = relationship(Entry,
- primaryjoin=
- favorite_entry_id==Entry.entry_id,
- foreign_keys=favorite_entry_id,
- post_update=True)
+ entries = relationship(
+ Entry, primaryjoin=widget_id == Entry.widget_id, foreign_keys=Entry.widget_id
+ )
+ favorite_entry = relationship(
+ Entry,
+ primaryjoin=favorite_entry_id == Entry.entry_id,
+ foreign_keys=favorite_entry_id,
+ post_update=True,
+ )
The above mapping features a composite :class:`_schema.ForeignKeyConstraint`
bridging the ``widget_id`` and ``favorite_entry_id`` columns. To ensure
illustrates this is::
class User(Base):
- __tablename__ = 'user'
- __table_args__ = {'mysql_engine': 'InnoDB'}
+ __tablename__ = "user"
+ __table_args__ = {"mysql_engine": "InnoDB"}
username = mapped_column(String(50), primary_key=True)
fullname = mapped_column(String(100))
class Address(Base):
- __tablename__ = 'address'
- __table_args__ = {'mysql_engine': 'InnoDB'}
+ __tablename__ = "address"
+ __table_args__ = {"mysql_engine": "InnoDB"}
email = mapped_column(String(50), primary_key=True)
- username = mapped_column(String(50),
- ForeignKey('user.username', onupdate="cascade")
- )
+ username = mapped_column(
+ String(50), ForeignKey("user.username", onupdate="cascade")
+ )
Above, we illustrate ``onupdate="cascade"`` on the :class:`_schema.ForeignKey`
object, and we also illustrate the ``mysql_engine='InnoDB'`` setting
Our previous mapping using ``passive_updates=False`` looks like::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
username = mapped_column(String(50), primary_key=True)
fullname = mapped_column(String(100))
# does not implement ON UPDATE CASCADE
addresses = relationship("Address", passive_updates=False)
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
email = mapped_column(String(50), primary_key=True)
- username = mapped_column(String(50), ForeignKey('user.username'))
+ username = mapped_column(String(50), ForeignKey("user.username"))
Key limitations of ``passive_updates=False`` include:
class called ``Node``, representing a tree structure::
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = mapped_column(Integer, primary_key=True)
- parent_id = mapped_column(Integer, ForeignKey('node.id'))
+ parent_id = mapped_column(Integer, ForeignKey("node.id"))
data = mapped_column(String(50))
children = relationship("Node")
that indicate those which should be considered to be "remote"::
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = mapped_column(Integer, primary_key=True)
- parent_id = mapped_column(Integer, ForeignKey('node.id'))
+ parent_id = mapped_column(Integer, ForeignKey("node.id"))
data = mapped_column(String(50))
parent = relationship("Node", remote_side=[id])
relationship using the :func:`.backref` function::
class Node(Base):
- __tablename__ = 'node'
+ __tablename__ = "node"
id = mapped_column(Integer, primary_key=True)
- parent_id = mapped_column(Integer, ForeignKey('node.id'))
+ parent_id = mapped_column(Integer, ForeignKey("node.id"))
data = mapped_column(String(50))
- children = relationship("Node",
- backref=backref('parent', remote_side=[id])
- )
+ children = relationship("Node", backref=backref("parent", remote_side=[id]))
.. seealso::
to a specific folder within that account::
class Folder(Base):
- __tablename__ = 'folder'
+ __tablename__ = "folder"
__table_args__ = (
- ForeignKeyConstraint(
- ['account_id', 'parent_id'],
- ['folder.account_id', 'folder.folder_id']),
+ ForeignKeyConstraint(
+ ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"]
+ ),
)
account_id = mapped_column(Integer, primary_key=True)
parent_id = mapped_column(Integer)
name = mapped_column(String)
- parent_folder = relationship("Folder",
- backref="child_folders",
- remote_side=[account_id, folder_id]
- )
+ parent_folder = relationship(
+ "Folder", backref="child_folders", remote_side=[account_id, folder_id]
+ )
Above, we pass ``account_id`` into the :paramref:`_orm.relationship.remote_side` list.
:func:`_orm.relationship` recognizes that the ``account_id`` column here
Querying of self-referential structures works like any other query::
# get all nodes named 'child2'
- session.scalars(select(Node).where(Node.data=='child2'))
+ session.scalars(select(Node).where(Node.data == "child2"))
However extra care is needed when attempting to join along
the foreign key from one level of the tree to the next. In SQL,
session_events
session_api
-
# an Engine, which the Session will use for connection
# resources
- engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/')
+ engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/")
# create session and add objects
with Session(engine) as session:
# create session and add objects
with Session(engine) as session:
with session.begin():
- session.add(some_object)
- session.add(some_other_object)
+ session.add(some_object)
+ session.add(some_other_object)
# inner context calls session.commit(), if there were no exceptions
# outer context calls session.close()
# an Engine, which the Session will use for connection
# resources, typically in module scope
- engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/')
+ engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/")
# a sessionmaker(), also in the same scope as the engine
Session = sessionmaker(engine)
# an Engine, which the Session will use for connection
# resources
- engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/')
+ engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/")
# a sessionmaker(), also in the same scope as the engine
Session = sessionmaker(engine)
# list of ``User`` objects
user_obj = session.scalars(statement).all()
-
# query for individual columns
statement = select(User.name, User.fullname)
(i.e. have been removed from a session) may be re-associated with a session
using this method::
- user1 = User(name='user1')
- user2 = User(name='user2')
+ user1 = User(name="user1")
+ user2 = User(name="user2")
session.add(user1)
session.add(user2)
- session.commit() # write changes to the database
+ session.commit() # write changes to the database
To add a list of items to the session at once, use
:meth:`~.Session.add_all`::
re-populated from their contents in the database::
u2 = session.scalars(
- select(User)
- .where(User.id == 5)
- .execution_options(populate_existing=True)
+ select(User).where(User.id == 5).execution_options(populate_existing=True)
).one()
..
### this is the **wrong way to do it** ###
+
class ThingOne:
def go(self):
session = Session()
session.rollback()
raise
+
class ThingTwo:
def go(self):
session = Session()
session.rollback()
raise
+
def run_my_program():
ThingOne().go()
ThingTwo().go()
### this is a **better** (but not the only) way to do it ###
+
class ThingOne:
def go(self, session):
session.execute(update(FooBar).values(x=5))
+
class ThingTwo:
def go(self, session):
session.execute(update(Widget).values(q=18))
+
def run_my_program():
with Session() as session:
with session.begin():
ThingOne().go(session)
ThingTwo().go(session)
-
.. versionchanged:: 1.4 The :class:`_orm.Session` may be used as a context
manager without the use of external helper functions.
The newer :ref:`core_inspection_toplevel` system can also be used::
from sqlalchemy import inspect
+
session = inspect(someobject).session
.. _session_faq_threadsafe:
Session = sessionmaker(engine, future=True)
+
@event.listens_for(Session, "do_orm_execute")
def _do_orm_execute(orm_execute_state):
if orm_execute_state.is_select:
# ORDER BY if so
col_descriptions = orm_execute_state.statement.column_descriptions
- if col_descriptions[0]['entity'] is MyEntity:
+ if col_descriptions[0]["entity"] is MyEntity:
orm_execute_state.statement = statement.order_by(MyEntity.name)
The above example illustrates some simple modifications to SELECT statements.
Session = sessionmaker(engine, future=True)
+
@event.listens_for(Session, "do_orm_execute")
def _do_orm_execute(orm_execute_state):
if (
- orm_execute_state.is_select and
- not orm_execute_state.is_column_load and
- not orm_execute_state.is_relationship_load
+ orm_execute_state.is_select
+ and not orm_execute_state.is_column_load
+ and not orm_execute_state.is_relationship_load
):
orm_execute_state.statement = orm_execute_state.statement.options(
with_loader_criteria(MyEntity.public == True)
import datetime
+
class HasTimestamp:
timestamp = mapped_column(DateTime, default=datetime.datetime.now)
__tablename__ = "some_entity"
id = mapped_column(Integer, primary_key=True)
+
class SomeOtherEntity(HasTimestamp, Base):
__tablename__ = "some_entity"
id = mapped_column(Integer, primary_key=True)
-
The above classes ``SomeEntity`` and ``SomeOtherEntity`` will each have a column
``timestamp`` that defaults to the current date and time. An event may be used
to intercept all objects that extend from ``HasTimestamp`` and filter their
@event.listens_for(Session, "do_orm_execute")
def _do_orm_execute(orm_execute_state):
if (
- orm_execute_state.is_select
- and not orm_execute_state.is_column_load
- and not orm_execute_state.is_relationship_load
+ orm_execute_state.is_select
+ and not orm_execute_state.is_column_load
+ and not orm_execute_state.is_relationship_load
):
one_month_ago = datetime.datetime.today() - datetime.timedelta(months=1)
with_loader_criteria(
HasTimestamp,
lambda cls: cls.timestamp >= one_month_ago,
- include_aliases=True
+ include_aliases=True,
)
)
cache = {}
+
@event.listens_for(Session, "do_orm_execute")
def _do_orm_execute(orm_execute_state):
if "my_cache_key" in orm_execute_state.execution_options:
With the above hook in place, an example of using the cache would look like::
- stmt = select(User).where(User.name == 'sandy').execution_options(my_cache_key="key_sandy")
+ stmt = (
+ select(User).where(User.name == "sandy").execution_options(my_cache_key="key_sandy")
+ )
result = session.execute(stmt)
session = Session()
- @event.listens_for(session, 'transient_to_pending')
+
+ @event.listens_for(session, "transient_to_pending")
def object_is_pending(session, obj):
print("new pending: %s" % obj)
maker = sessionmaker()
- @event.listens_for(maker, 'transient_to_pending')
+
+ @event.listens_for(maker, "transient_to_pending")
def object_is_pending(session, obj):
print("new pending: %s" % obj)
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy import event
+
class Base(DeclarativeBase):
pass
+
@event.listens_for(Base, "init", propagate=True)
def intercept_init(instance, args, kwargs):
print("new transient: %s" % instance)
-
Transient to Pending
^^^^^^^^^^^^^^^^^^^^
def intercept_transient_to_pending(session, object_):
print("transient to pending: %s" % object_)
-
Pending to Persistent
^^^^^^^^^^^^^^^^^^^^^
def intercept_loaded_as_persistent(session, object_):
print("object loaded into persistent state: %s" % object_)
-
Persistent to Transient
^^^^^^^^^^^^^^^^^^^^^^^
def intercept_persistent_to_deleted(session, object_):
print("object was DELETEd, is now in deleted state: %s" % object_)
-
Deleted to Detached
^^^^^^^^^^^^^^^^^^^
def intercept_deleted_to_detached(session, object_):
print("deleted to detached: %s" % object_)
-
.. note::
While the object is in the deleted state, the :attr:`.InstanceState.deleted`
def intercept_detached_to_persistent(session, object_):
print("object became persistent again: %s" % object_)
-
Deleted to Persistent
^^^^^^^^^^^^^^^^^^^^^
from sqlalchemy import event
+
def strong_reference_session(session):
@event.listens_for(session, "pending_to_persistent")
@event.listens_for(session, "deleted_to_persistent")
@event.listens_for(session, "detached_to_persistent")
@event.listens_for(session, "loaded_as_persistent")
def strong_ref_object(sess, instance):
- if 'refs' not in sess.info:
- sess.info['refs'] = refs = set()
+ if "refs" not in sess.info:
+ sess.info["refs"] = refs = set()
else:
- refs = sess.info['refs']
+ refs = sess.info["refs"]
refs.add(instance)
-
@event.listens_for(session, "persistent_to_detached")
@event.listens_for(session, "persistent_to_deleted")
@event.listens_for(session, "persistent_to_transient")
def deref_object(sess, instance):
- sess.info['refs'].discard(instance)
+ sess.info["refs"].discard(instance)
Above, we intercept the :meth:`.SessionEvents.pending_to_persistent`,
:meth:`.SessionEvents.detached_to_persistent`,
maker = sessionmaker()
strong_reference_session(maker)
-
.. _unitofwork_merging:
Merging
Lets use the canonical example of the User and Address objects::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String(50), nullable=False)
addresses = relationship("Address", backref="user")
+
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
id = mapped_column(Integer, primary_key=True)
email_address = mapped_column(String(50), nullable=False)
- user_id = mapped_column(Integer, ForeignKey('user.id'), nullable=False)
+ user_id = mapped_column(Integer, ForeignKey("user.id"), nullable=False)
Assume a ``User`` object with one ``Address``, already persistent::
- >>> u1 = User(name='ed', addresses=[Address(email_address='ed@ed.com')])
+ >>> u1 = User(name="ed", addresses=[Address(email_address="ed@ed.com")])
>>> session.add(u1)
>>> session.commit()
that is in the :term:`persistent` state. For example, if we load an object
as follows::
- user = session.scalars(select(User).filter_by(name='user1').limit(1)).first()
+ user = session.scalars(select(User).filter_by(name="user1").limit(1)).first()
The above ``User`` object is persistent, and has a series of attributes
present; if we were to look inside its ``__dict__``, we'd see that state
is that all un-flushed changes on an object are discarded. That is,
if we were to modify an attribute on our ``User``::
- >>> user.name = 'user2'
+ >>> user.name = "user2"
but then we call :meth:`~.Session.expire` without first calling :meth:`~.Session.flush`,
our pending value of ``'user2'`` is discarded::
attributes to be marked as expired::
# expire only attributes obj1.attr1, obj1.attr2
- session.expire(obj1, ['attr1', 'attr2'])
+ session.expire(obj1, ["attr1", "attr2"])
The :meth:`.Session.expire_all` method allows us to essentially call
:meth:`.Session.expire` on all objects contained within the :class:`.Session`
be that of a column-mapped attribute::
# reload obj1.attr1, obj1.attr2
- session.refresh(obj1, ['attr1', 'attr2'])
+ session.refresh(obj1, ["attr1", "attr2"])
.. tip::
Below, assume we start with a :class:`_orm.Session`::
from sqlalchemy.orm import Session
+
session = Session(engine)
We can now run operations within a demarcated transaction using a context
session.add(u1)
session.add(u2)
- nested = session.begin_nested() # establish a savepoint
+ nested = session.begin_nested() # establish a savepoint
session.add(u3)
nested.rollback() # rolls back u3, keeps u1 and u2
for record in records:
try:
with session.begin_nested():
- session.merge(record)
+ session.merge(record)
except:
- print("Skipped record %s" % record)
+ print("Skipped record %s" % record)
session.commit()
When the context manager yielded by :meth:`_orm.Session.begin_nested`
[
{"data": "some data one"},
{"data": "some data two"},
- {"data": "some data three"}
- ]
+ {"data": "some data three"},
+ ],
)
conn.commit()
Session = sessionmaker(engine, future=True)
with Session() as session:
- session.add_all([
- SomeClass(data="some data one"),
- SomeClass(data="some data two"),
- SomeClass(data="some data three")
- ])
+ session.add_all(
+ [
+ SomeClass(data="some data one"),
+ SomeClass(data="some data two"),
+ SomeClass(data="some data three"),
+ ]
+ )
session.commit()
Begin Once
[
{"data": "some data one"},
{"data": "some data two"},
- {"data": "some data three"}
- ]
+ {"data": "some data three"},
+ ],
)
# commits and closes automatically
Session = sessionmaker(engine, future=True)
with Session.begin() as session:
- session.add_all([
- SomeClass(data="some data one"),
- SomeClass(data="some data two"),
- SomeClass(data="some data three")
- ])
+ session.add_all(
+ [
+ SomeClass(data="some data one"),
+ SomeClass(data="some data two"),
+ SomeClass(data="some data three"),
+ ]
+ )
# commits and closes automatically
-
Nested Transaction
~~~~~~~~~~~~~~~~~~~~
[
{"data": "some data one"},
{"data": "some data two"},
- {"data": "some data three"}
- ]
+ {"data": "some data three"},
+ ],
)
savepoint.commit() # or rollback
with Session.begin() as session:
savepoint = session.begin_nested()
- session.add_all([
- SomeClass(data="some data one"),
- SomeClass(data="some data two"),
- SomeClass(data="some data three")
- ])
+ session.add_all(
+ [
+ SomeClass(data="some data one"),
+ SomeClass(data="some data two"),
+ SomeClass(data="some data three"),
+ ]
+ )
savepoint.commit() # or rollback
# commits automatically
-
-
-
.. _session_explicit_begin:
Explicit Begin
try:
item1 = session.get(Item, 1)
item2 = session.get(Item, 2)
- item1.foo = 'bar'
- item2.bar = 'foo'
+ item1.foo = "bar"
+ item2.bar = "foo"
session.commit()
except:
session.rollback()
with session.begin():
item1 = session.get(Item, 1)
item2 = session.get(Item, 2)
- item1.foo = 'bar'
- item2.bar = 'foo'
+ item1.foo = "bar"
+ item2.bar = "foo"
The :meth:`_orm.Session.begin` method and the session's "autobegin" process
use the same sequence of steps to begin the transaction. This includes
interacting with transactions not managed by SQLAlchemy. To use two phase
transactions set the flag ``twophase=True`` on the session::
- engine1 = create_engine('postgresql+psycopg2://db1')
- engine2 = create_engine('postgresql+psycopg2://db2')
+ engine1 = create_engine("postgresql+psycopg2://db1")
+ engine2 = create_engine("postgresql+psycopg2://db2")
Session = sessionmaker(twophase=True)
# bind User operations to engine 1, Account operations to engine 2
- Session.configure(binds={User:engine1, Account:engine2})
+ Session.configure(binds={User: engine1, Account: engine2})
session = Session()
# before committing both transactions
session.commit()
-
.. _session_transaction_isolation:
Setting Transaction Isolation Levels / DBAPI AUTOCOMMIT
eng = create_engine(
"postgresql+psycopg2://scott:tiger@localhost/test",
- isolation_level='REPEATABLE READ'
+ isolation_level="REPEATABLE READ",
)
Session = sessionmaker(eng)
-
Another option, useful if there are to be two engines with different isolation
levels at once, is to use the :meth:`_engine.Engine.execution_options` method,
which will produce a shallow copy of the original :class:`_engine.Engine` which
transactional_session = sessionmaker(eng)
autocommit_session = sessionmaker(autocommit_engine)
-
Above, both "``eng``" and ``"autocommit_engine"`` share the same dialect and
connection pool. However the "AUTOCOMMIT" mode will be set upon connections
when they are acquired from the ``autocommit_engine``. The two
with Session() as session:
session.bind_mapper(User, autocommit_engine)
-
Setting Isolation for Individual Transactions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# call connection() with options before any other operations proceed.
# this will procure a new connection from the bound engine and begin a real
# database transaction.
- sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'})
+ sess.connection(execution_options={"isolation_level": "SERIALIZABLE"})
# ... work with session in SERIALIZABLE isolation level...
# call connection() with options before any other operations proceed.
# this will procure a new connection from the bound engine and begin a
# real database transaction.
- sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'})
+ sess.connection(execution_options={"isolation_level": "SERIALIZABLE"})
# ... work with session in SERIALIZABLE isolation level...
# outside the block, the transaction has been committed. the connection is
# released and reverted to its previous isolation level.
-
-
Tracking Transaction State with Events
--------------------------------------
# global application scope. create Session class, engine
Session = sessionmaker()
- engine = create_engine('postgresql+psycopg2://...')
+ engine = create_engine("postgresql+psycopg2://...")
+
class SomeTest(TestCase):
def setUp(self):
# begin a non-ORM transaction
self.trans = self.connection.begin()
-
# bind an individual Session to the connection
self.session = Session(bind=self.connection)
-
### optional ###
# if the database supports SAVEPOINT (SQLite needs special
mapper options::
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
version_id = mapped_column(Integer, nullable=False)
name = mapped_column(String(50), nullable=False)
- __mapper_args__ = {
- "version_id_col": version_id
- }
+ __mapper_args__ = {"version_id_col": version_id}
.. note:: It is **strongly recommended** that the ``version_id`` column
be made NOT NULL. The versioning feature **does not support** a NULL
import uuid
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
version_uuid = mapped_column(String(32), nullable=False)
name = mapped_column(String(50), nullable=False)
__mapper_args__ = {
- 'version_id_col':version_uuid,
- 'version_id_generator':lambda version: uuid.uuid4().hex
+ "version_id_col": version_uuid,
+ "version_id_generator": lambda version: uuid.uuid4().hex,
}
The persistence engine will call upon ``uuid.uuid4()`` each time a
from sqlalchemy import FetchedValue
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String(50), nullable=False)
xmin = mapped_column("xmin", String, system=True, server_default=FetchedValue())
- __mapper_args__ = {
- 'version_id_col': xmin,
- 'version_id_generator': False
- }
+ __mapper_args__ = {"version_id_col": xmin, "version_id_generator": False}
With the above mapping, the ORM will rely upon the ``xmin`` column for
automatically providing the new value of the version id counter.
import uuid
+
class User(Base):
- __tablename__ = 'user'
+ __tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
version_uuid = mapped_column(String(32), nullable=False)
name = mapped_column(String(50), nullable=False)
- __mapper_args__ = {
- 'version_id_col':version_uuid,
- 'version_id_generator': False
- }
+ __mapper_args__ = {"version_id_col": version_uuid, "version_id_generator": False}
+
- u1 = User(name='u1', version_uuid=uuid.uuid4())
+ u1 = User(name="u1", version_uuid=uuid.uuid4())
session.add(u1)
session.commit()
- u1.name = 'u2'
+ u1.name = "u2"
u1.version_uuid = uuid.uuid4()
session.commit()
issues::
# will leave version_uuid unchanged
- u1.name = 'u3'
+ u1.name = "u3"
session.commit()
.. versionadded:: 0.9.0
and the VALUES clause at once::
>>> from sqlalchemy import insert
- >>> stmt = insert(user_table).values(name='spongebob', fullname="Spongebob Squarepants")
+ >>> stmt = insert(user_table).values(name="spongebob", fullname="Spongebob Squarepants")
The above ``stmt`` variable is an instance of :class:`_sql.Insert`. Most
SQL expressions can be stringified in place as a means to see the general
... insert(user_table),
... [
... {"name": "sandy", "fullname": "Sandy Cheeks"},
- ... {"name": "patrick", "fullname": "Patrick Star"}
- ... ]
+ ... {"name": "patrick", "fullname": "Patrick Star"},
+ ... ],
... )
... conn.commit()
{opensql}BEGIN (implicit)
>>> from sqlalchemy import select, bindparam
>>> scalar_subq = (
- ... select(user_table.c.id).
- ... where(user_table.c.name==bindparam('username')).
- ... scalar_subquery()
+ ... select(user_table.c.id)
+ ... .where(user_table.c.name == bindparam("username"))
+ ... .scalar_subquery()
... )
>>> with engine.connect() as conn:
... result = conn.execute(
... insert(address_table).values(user_id=scalar_subq),
... [
- ... {"username": 'spongebob', "email_address": "spongebob@sqlalchemy.org"},
- ... {"username": 'sandy', "email_address": "sandy@sqlalchemy.org"},
- ... {"username": 'sandy', "email_address": "sandy@squirrelpower.org"},
- ... ]
+ ... {
+ ... "username": "spongebob",
+ ... "email_address": "spongebob@sqlalchemy.org",
+ ... },
+ ... {"username": "sandy", "email_address": "sandy@sqlalchemy.org"},
+ ... {"username": "sandy", "email_address": "sandy@squirrelpower.org"},
+ ... ],
... )
... conn.commit()
{opensql}BEGIN (implicit)
object that's returned when the statement is executed has rows which
can be fetched::
- >>> insert_stmt = insert(address_table).returning(address_table.c.id, address_table.c.email_address)
+ >>> insert_stmt = insert(address_table).returning(
+ ... address_table.c.id, address_table.c.email_address
+ ... )
>>> print(insert_stmt)
{opensql}INSERT INTO address (id, user_id, email_address)
VALUES (:id, :user_id, :email_address)
it can be stringified in place::
>>> from sqlalchemy import select
- >>> stmt = select(user_table).where(user_table.c.name == 'spongebob')
+ >>> stmt = select(user_table).where(user_table.c.name == "spongebob")
>>> print(stmt)
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
.. sourcecode:: pycon+sql
- >>> stmt = select(User).where(User.name == 'spongebob')
+ >>> stmt = select(User).where(User.name == "spongebob")
>>> with Session(engine) as session:
... for row in session.execute(stmt):
... print(row)
:class:`.FromClause` such as :class:`.Table`, multiple columns may be specified
for a :func:`_sql.select` by using a tuple of string names::
- >>> print(select(user_table.c['name', 'fullname']))
+ >>> print(select(user_table.c["name", "fullname"]))
{opensql}SELECT user_account.name, user_account.fullname
FROM user_account
it with full ``Address`` entities in the second element::
>>> session.execute(
- ... select(User.name, Address).
- ... where(User.id==Address.user_id).
- ... order_by(Address.id)
+ ... select(User.name, Address).where(User.id == Address.user_id).order_by(Address.id)
... ).all()
{opensql}SELECT user_account.name, address.id, address.email_address, address.user_id
FROM user_account, address
.. sourcecode:: pycon+sql
>>> from sqlalchemy import func, cast
- >>> stmt = (
- ... select(
- ... ("Username: " + user_table.c.name).label("username"),
- ... ).order_by(user_table.c.name)
- ... )
+ >>> stmt = select(
+ ... ("Username: " + user_table.c.name).label("username"),
+ ... ).order_by(user_table.c.name)
>>> with engine.connect() as conn:
... for row in conn.execute(stmt):
... print(f"{row.username}")
SELECT statement::
>>> from sqlalchemy import text
- >>> stmt = (
- ... select(
- ... text("'some phrase'"), user_table.c.name
- ... ).order_by(user_table.c.name)
- ... )
+ >>> stmt = select(text("'some phrase'"), user_table.c.name).order_by(user_table.c.name)
>>> with engine.connect() as conn:
... print(conn.execute(stmt).all())
{opensql}BEGIN (implicit)
>>> from sqlalchemy import literal_column
- >>> stmt = (
- ... select(
- ... literal_column("'some phrase'").label("p"), user_table.c.name
- ... ).order_by(user_table.c.name)
+ >>> stmt = select(literal_column("'some phrase'").label("p"), user_table.c.name).order_by(
+ ... user_table.c.name
... )
>>> with engine.connect() as conn:
... for row in conn.execute(stmt):
Python operators such as ``==``, ``!=``, ``<``, ``>=`` etc. generate new
SQL Expression objects, rather than plain boolean ``True``/``False`` values::
- >>> print(user_table.c.name == 'squidward')
+ >>> print(user_table.c.name == "squidward")
user_account.name = :name_1
>>> print(address_table.c.user_id > 10)
We can use expressions like these to generate the WHERE clause by passing
the resulting objects to the :meth:`_sql.Select.where` method::
- >>> print(select(user_table).where(user_table.c.name == 'squidward'))
+ >>> print(select(user_table).where(user_table.c.name == "squidward"))
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
WHERE user_account.name = :name_1
method may be invoked any number of times::
>>> print(
- ... select(address_table.c.email_address).
- ... where(user_table.c.name == 'squidward').
- ... where(address_table.c.user_id == user_table.c.id)
+ ... select(address_table.c.email_address)
+ ... .where(user_table.c.name == "squidward")
+ ... .where(address_table.c.user_id == user_table.c.id)
... )
{opensql}SELECT address.email_address
FROM address, user_account
with the same effect::
>>> print(
- ... select(address_table.c.email_address).
- ... where(
- ... user_table.c.name == 'squidward',
- ... address_table.c.user_id == user_table.c.id
+ ... select(address_table.c.email_address).where(
+ ... user_table.c.name == "squidward",
+ ... address_table.c.user_id == user_table.c.id,
... )
... )
{opensql}SELECT address.email_address
>>> from sqlalchemy import and_, or_
>>> print(
- ... select(Address.email_address).
- ... where(
+ ... select(Address.email_address).where(
... and_(
- ... or_(User.name == 'squidward', User.name == 'sandy'),
- ... Address.user_id == User.id
+ ... or_(User.name == "squidward", User.name == "sandy"),
+ ... Address.user_id == User.id,
... )
... )
... )
arguments that match to column keys or ORM attribute names. It will filter
against the leftmost FROM clause or the last entity joined::
- >>> print(
- ... select(User).filter_by(name='spongebob', fullname='Spongebob Squarepants')
- ... )
+ >>> print(select(User).filter_by(name="spongebob", fullname="Spongebob Squarepants"))
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
WHERE user_account.name = :name_1 AND user_account.fullname = :fullname_1
explicitly::
>>> print(
- ... select(user_table.c.name, address_table.c.email_address).
- ... join_from(user_table, address_table)
+ ... select(user_table.c.name, address_table.c.email_address).join_from(
+ ... user_table, address_table
+ ... )
... )
{opensql}SELECT user_account.name, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
The other is the the :meth:`_sql.Select.join` method, which indicates only the
right side of the JOIN, the left hand-side is inferred::
- >>> print(
- ... select(user_table.c.name, address_table.c.email_address).
- ... join(address_table)
- ... )
+ >>> print(select(user_table.c.name, address_table.c.email_address).join(address_table))
{opensql}SELECT user_account.name, address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
clause and :meth:`_sql.Select.join` to establish ``address_table`` as
the second::
- >>> print(
- ... select(address_table.c.email_address).
- ... select_from(user_table).join(address_table)
- ... )
+ >>> print(select(address_table.c.email_address).select_from(user_table).join(address_table))
{opensql}SELECT address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
produce the SQL ``count()`` function::
>>> from sqlalchemy import func
- >>> print (
- ... select(func.count('*')).select_from(user_table)
- ... )
+ >>> print(select(func.count("*")).select_from(user_table))
{opensql}SELECT count(:count_2) AS count_1
FROM user_account
same SQL Expression mechanics as we saw about in :ref:`tutorial_select_where_clause`::
>>> print(
- ... select(address_table.c.email_address).
- ... select_from(user_table).
- ... join(address_table, user_table.c.id == address_table.c.user_id)
+ ... select(address_table.c.email_address)
+ ... .select_from(user_table)
+ ... .join(address_table, user_table.c.id == address_table.c.user_id)
... )
{opensql}SELECT address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
:paramref:`_sql.Select.join.full` which will render LEFT OUTER JOIN
and FULL OUTER JOIN, respectively::
- >>> print(
- ... select(user_table).join(address_table, isouter=True)
- ... )
+ >>> print(select(user_table).join(address_table, isouter=True))
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account LEFT OUTER JOIN address ON user_account.id = address.user_id{stop}
- >>> print(
- ... select(user_table).join(address_table, full=True)
- ... )
+ >>> print(select(user_table).join(address_table, full=True))
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account FULL OUTER JOIN address ON user_account.id = address.user_id{stop}
>>> with engine.connect() as conn:
... result = conn.execute(
- ... select(User.name, func.count(Address.id).label("count")).
- ... join(Address).
- ... group_by(User.name).
- ... having(func.count(Address.id) > 1)
+ ... select(User.name, func.count(Address.id).label("count"))
+ ... .join(Address)
+ ... .group_by(User.name)
+ ... .having(func.count(Address.id) > 1)
... )
... print(result.all())
{opensql}BEGIN (implicit)
.. sourcecode:: pycon+sql
>>> from sqlalchemy import func, desc
- >>> stmt = select(
- ... Address.user_id,
- ... func.count(Address.id).label('num_addresses')).\
- ... group_by("user_id").order_by("user_id", desc("num_addresses"))
+ >>> stmt = (
+ ... select(Address.user_id, func.count(Address.id).label("num_addresses"))
+ ... .group_by("user_id")
+ ... .order_by("user_id", desc("num_addresses"))
+ ... )
>>> print(stmt)
{opensql}SELECT address.user_id, count(address.id) AS num_addresses
FROM address GROUP BY address.user_id ORDER BY address.user_id, num_addresses DESC
>>> user_alias_1 = user_table.alias()
>>> user_alias_2 = user_table.alias()
>>> print(
- ... select(user_alias_1.c.name, user_alias_2.c.name).
- ... join_from(user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id)
+ ... select(user_alias_1.c.name, user_alias_2.c.name).join_from(
+ ... user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id
+ ... )
... )
{opensql}SELECT user_account_1.name, user_account_2.name AS name_1
FROM user_account AS user_account_1
>>> address_alias_1 = aliased(Address)
>>> address_alias_2 = aliased(Address)
>>> print(
- ... select(User).
- ... join_from(User, address_alias_1).
- ... where(address_alias_1.email_address == 'patrick@aol.com').
- ... join_from(User, address_alias_2).
- ... where(address_alias_2.email_address == 'patrick@gmail.com')
+ ... select(User)
+ ... .join_from(User, address_alias_1)
+ ... .where(address_alias_1.email_address == "patrick@aol.com")
+ ... .join_from(User, address_alias_2)
+ ... .where(address_alias_2.email_address == "patrick@gmail.com")
... )
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account
of rows from the ``address`` table (aggregate functions and GROUP BY were
introduced previously at :ref:`tutorial_group_by_w_aggregates`):
- >>> subq = select(
- ... func.count(address_table.c.id).label("count"),
- ... address_table.c.user_id
- ... ).group_by(address_table.c.user_id).subquery()
+ >>> subq = (
+ ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id)
+ ... .group_by(address_table.c.user_id)
+ ... .subquery()
+ ... )
Stringifying the subquery by itself without it being embedded inside of another
:class:`_sql.Select` or other statement produces the plain SELECT statement
the object to a larger :class:`_sql.Select` that will join the data to
the ``user_account`` table::
- >>> stmt = select(
- ... user_table.c.name,
- ... user_table.c.fullname,
- ... subq.c.count
- ... ).join_from(user_table, subq)
+ >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from(
+ ... user_table, subq
+ ... )
>>> print(stmt)
{opensql}SELECT user_account.name, user_account.fullname, anon_1.count
element in the same way, but the SQL rendered is the very different common
table expression syntax::
- >>> subq = select(
- ... func.count(address_table.c.id).label("count"),
- ... address_table.c.user_id
- ... ).group_by(address_table.c.user_id).cte()
+ >>> subq = (
+ ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id)
+ ... .group_by(address_table.c.user_id)
+ ... .cte()
+ ... )
- >>> stmt = select(
- ... user_table.c.name,
- ... user_table.c.fullname,
- ... subq.c.count
- ... ).join_from(user_table, subq)
+ >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from(
+ ... user_table, subq
+ ... )
>>> print(stmt)
{opensql}WITH anon_1 AS
.. sourcecode:: python+sql
- >>> subq = select(Address).where(~Address.email_address.like('%@aol.com')).subquery()
+ >>> subq = select(Address).where(~Address.email_address.like("%@aol.com")).subquery()
>>> address_subq = aliased(Address, subq)
- >>> stmt = select(User, address_subq).join_from(User, address_subq).order_by(User.id, address_subq.id)
+ >>> stmt = (
+ ... select(User, address_subq)
+ ... .join_from(User, address_subq)
+ ... .order_by(User.id, address_subq.id)
+ ... )
>>> with Session(engine) as session:
... for user, address in session.execute(stmt):
... print(f"{user} {address}")
.. sourcecode:: python+sql
- >>> cte_obj = select(Address).where(~Address.email_address.like('%@aol.com')).cte()
+ >>> cte_obj = select(Address).where(~Address.email_address.like("%@aol.com")).cte()
>>> address_cte = aliased(Address, cte_obj)
- >>> stmt = select(User, address_cte).join_from(User, address_cte).order_by(User.id, address_cte.id)
+ >>> stmt = (
+ ... select(User, address_cte)
+ ... .join_from(User, address_cte)
+ ... .order_by(User.id, address_cte.id)
+ ... )
>>> with Session(engine) as session:
... for user, address in session.execute(stmt):
... print(f"{user} {address}")
method as below. It's default string form when stringified by itself
renders as an ordinary SELECT statement that is selecting from two tables::
- >>> subq = select(func.count(address_table.c.id)).\
- ... where(user_table.c.id == address_table.c.user_id).\
- ... scalar_subquery()
+ >>> subq = (
+ ... select(func.count(address_table.c.id))
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .scalar_subquery()
+ ... )
>>> print(subq)
{opensql}(SELECT count(address.id) AS count_1
FROM address, user_account
However, in the case where the correlation is ambiguous, SQLAlchemy will let
us know that more clarity is needed::
- >>> stmt = select(
- ... user_table.c.name,
- ... address_table.c.email_address,
- ... subq.label("address_count")
- ... ).\
- ... join_from(user_table, address_table).\
- ... order_by(user_table.c.id, address_table.c.id)
+ >>> stmt = (
+ ... select(
+ ... user_table.c.name,
+ ... address_table.c.email_address,
+ ... subq.label("address_count"),
+ ... )
+ ... .join_from(user_table, address_table)
+ ... .order_by(user_table.c.id, address_table.c.id)
+ ... )
>>> print(stmt)
Traceback (most recent call last):
...
this using the :meth:`_sql.ScalarSelect.correlate` or
:meth:`_sql.ScalarSelect.correlate_except` methods::
- >>> subq = select(func.count(address_table.c.id)).\
- ... where(user_table.c.id == address_table.c.user_id).\
- ... scalar_subquery().correlate(user_table)
+ >>> subq = (
+ ... select(func.count(address_table.c.id))
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .scalar_subquery()
+ ... .correlate(user_table)
+ ... )
The statement then can return the data for this column like any other:
... select(
... user_table.c.name,
... address_table.c.email_address,
- ... subq.label("address_count")
- ... ).
- ... join_from(user_table, address_table).
- ... order_by(user_table.c.id, address_table.c.id)
+ ... subq.label("address_count"),
+ ... )
+ ... .join_from(user_table, address_table)
+ ... .order_by(user_table.c.id, address_table.c.id)
... )
... print(result.all())
{opensql}BEGIN (implicit)
was discussed in the previous section::
>>> subq = (
- ... select(
- ... func.count(address_table.c.id).label("address_count"),
- ... address_table.c.email_address,
- ... address_table.c.user_id,
- ... ).
- ... where(user_table.c.id == address_table.c.user_id).
- ... lateral()
+ ... select(
+ ... func.count(address_table.c.id).label("address_count"),
+ ... address_table.c.email_address,
+ ... address_table.c.user_id,
+ ... )
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .lateral()
+ ... )
+ >>> stmt = (
+ ... select(user_table.c.name, subq.c.address_count, subq.c.email_address)
+ ... .join_from(user_table, subq)
+ ... .order_by(user_table.c.id, subq.c.email_address)
... )
- >>> stmt = select(
- ... user_table.c.name,
- ... subq.c.address_count,
- ... subq.c.email_address
- ... ).\
- ... join_from(user_table, subq).\
- ... order_by(user_table.c.id, subq.c.email_address)
>>> print(stmt)
{opensql}SELECT user_account.name, anon_1.address_count, anon_1.email_address
FROM user_account
:meth:`_engine.Connection.execute`::
>>> from sqlalchemy import union_all
- >>> stmt1 = select(user_table).where(user_table.c.name == 'sandy')
- >>> stmt2 = select(user_table).where(user_table.c.name == 'spongebob')
+ >>> stmt1 = select(user_table).where(user_table.c.name == "sandy")
+ >>> stmt2 = select(user_table).where(user_table.c.name == "spongebob")
>>> u = union_all(stmt1, stmt2)
>>> with engine.connect() as conn:
... result = conn.execute(u)
>>> u_subq = u.subquery()
>>> stmt = (
- ... select(u_subq.c.name, address_table.c.email_address).
- ... join_from(address_table, u_subq).
- ... order_by(u_subq.c.name, address_table.c.email_address)
+ ... select(u_subq.c.name, address_table.c.email_address)
+ ... .join_from(address_table, u_subq)
+ ... .order_by(u_subq.c.name, address_table.c.email_address)
... )
>>> with engine.connect() as conn:
... result = conn.execute(stmt)
execute; this statement should be composed against the target
ORM entities or their underlying mapped :class:`_schema.Table` objects::
- >>> stmt1 = select(User).where(User.name == 'sandy')
- >>> stmt2 = select(User).where(User.name == 'spongebob')
+ >>> stmt1 = select(User).where(User.name == "sandy")
+ >>> stmt2 = select(User).where(User.name == "spongebob")
>>> u = union_all(stmt1, stmt2)
For a simple SELECT with UNION that is not already nested inside of a
.. sourcecode:: pycon+sql
>>> subq = (
- ... select(func.count(address_table.c.id)).
- ... where(user_table.c.id == address_table.c.user_id).
- ... group_by(address_table.c.user_id).
- ... having(func.count(address_table.c.id) > 1)
+ ... select(func.count(address_table.c.id))
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .group_by(address_table.c.user_id)
+ ... .having(func.count(address_table.c.id) > 1)
... ).exists()
>>> with engine.connect() as conn:
- ... result = conn.execute(
- ... select(user_table.c.name).where(subq)
- ... )
+ ... result = conn.execute(select(user_table.c.name).where(subq))
... print(result.all())
{opensql}BEGIN (implicit)
SELECT user_account.name
.. sourcecode:: pycon+sql
>>> subq = (
- ... select(address_table.c.id).
- ... where(user_table.c.id == address_table.c.user_id)
+ ... select(address_table.c.id).where(user_table.c.id == address_table.c.user_id)
... ).exists()
>>> with engine.connect() as conn:
- ... result = conn.execute(
- ... select(user_table.c.name).where(~subq)
- ... )
+ ... result = conn.execute(select(user_table.c.name).where(~subq))
... print(result.all())
{opensql}BEGIN (implicit)
SELECT user_account.name
.. sourcecode:: pycon+sql
- >>> stmt = select(
- ... func.row_number().over(partition_by=user_table.c.name),
- ... user_table.c.name,
- ... address_table.c.email_address
- ... ).select_from(user_table).join(address_table)
+ >>> stmt = (
+ ... select(
+ ... func.row_number().over(partition_by=user_table.c.name),
+ ... user_table.c.name,
+ ... address_table.c.email_address,
+ ... )
+ ... .select_from(user_table)
+ ... .join(address_table)
+ ... )
>>> with engine.connect() as conn: # doctest:+SKIP
... result = conn.execute(stmt)
... print(result.all())
.. sourcecode:: pycon+sql
- >>> stmt = select(
- ... func.count().over(order_by=user_table.c.name),
- ... user_table.c.name,
- ... address_table.c.email_address).select_from(user_table).join(address_table)
+ >>> stmt = (
+ ... select(
+ ... func.count().over(order_by=user_table.c.name),
+ ... user_table.c.name,
+ ... address_table.c.email_address,
+ ... )
+ ... .select_from(user_table)
+ ... .join(address_table)
+ ... )
>>> with engine.connect() as conn: # doctest:+SKIP
... result = conn.execute(stmt)
... print(result.all())
>>> print(
... func.unnest(
- ... func.percentile_disc([0.25,0.5,0.75,1]).within_group(user_table.c.name)
+ ... func.percentile_disc([0.25, 0.5, 0.75, 1]).within_group(user_table.c.name)
... )
... )
unnest(percentile_disc(:percentile_disc_1) WITHIN GROUP (ORDER BY user_account.name))
particular subset of rows compared to the total range of rows returned, available
using the :meth:`_functions.FunctionElement.filter` method::
- >>> stmt = select(
- ... func.count(address_table.c.email_address).filter(user_table.c.name == 'sandy'),
- ... func.count(address_table.c.email_address).filter(user_table.c.name == 'spongebob')
- ... ).select_from(user_table).join(address_table)
+ >>> stmt = (
+ ... select(
+ ... func.count(address_table.c.email_address).filter(user_table.c.name == "sandy"),
+ ... func.count(address_table.c.email_address).filter(
+ ... user_table.c.name == "spongebob"
+ ... ),
+ ... )
+ ... .select_from(user_table)
+ ... .join(address_table)
+ ... )
>>> with engine.connect() as conn: # doctest:+SKIP
... result = conn.execute(stmt)
... print(result.all())
>>> from sqlalchemy import JSON
>>> from sqlalchemy import type_coerce
>>> from sqlalchemy.dialects import mysql
- >>> s = select(
- ... type_coerce(
- ... {'some_key': {'foo': 'bar'}}, JSON
- ... )['some_key']
- ... )
+ >>> s = select(type_coerce({"some_key": {"foo": "bar"}}, JSON)["some_key"])
>>> print(s.compile(dialect=mysql.dialect()))
SELECT JSON_EXTRACT(%s, %s) AS anon_1
>>> from sqlalchemy import update
>>> stmt = (
- ... update(user_table).where(user_table.c.name == 'patrick').
- ... values(fullname='Patrick the Star')
+ ... update(user_table)
+ ... .where(user_table.c.name == "patrick")
+ ... .values(fullname="Patrick the Star")
... )
>>> print(stmt)
{opensql}UPDATE user_account SET fullname=:fullname WHERE user_account.name = :name_1
UPDATE supports all the major SQL forms of UPDATE, including updates against expressions,
where we can make use of :class:`_schema.Column` expressions::
- >>> stmt = (
- ... update(user_table).
- ... values(fullname="Username: " + user_table.c.name)
- ... )
+ >>> stmt = update(user_table).values(fullname="Username: " + user_table.c.name)
>>> print(stmt)
{opensql}UPDATE user_account SET fullname=(:name_1 || user_account.name)
>>> from sqlalchemy import bindparam
>>> stmt = (
- ... update(user_table).
- ... where(user_table.c.name == bindparam('oldname')).
- ... values(name=bindparam('newname'))
+ ... update(user_table)
+ ... .where(user_table.c.name == bindparam("oldname"))
+ ... .values(name=bindparam("newname"))
... )
>>> with engine.begin() as conn:
- ... conn.execute(
- ... stmt,
- ... [
- ... {'oldname':'jack', 'newname':'ed'},
- ... {'oldname':'wendy', 'newname':'mary'},
- ... {'oldname':'jim', 'newname':'jake'},
- ... ]
- ... )
+ ... conn.execute(
+ ... stmt,
+ ... [
+ ... {"oldname": "jack", "newname": "ed"},
+ ... {"oldname": "wendy", "newname": "mary"},
+ ... {"oldname": "jim", "newname": "jake"},
+ ... ],
+ ... )
{opensql}BEGIN (implicit)
UPDATE user_account SET name=? WHERE user_account.name = ?
[...] [('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')]
anywhere a column expression might be placed::
>>> scalar_subq = (
- ... select(address_table.c.email_address).
- ... where(address_table.c.user_id == user_table.c.id).
- ... order_by(address_table.c.id).
- ... limit(1).
- ... scalar_subquery()
+ ... select(address_table.c.email_address)
+ ... .where(address_table.c.user_id == user_table.c.id)
+ ... .order_by(address_table.c.id)
+ ... .limit(1)
+ ... .scalar_subquery()
... )
>>> update_stmt = update(user_table).values(fullname=scalar_subq)
>>> print(update_stmt)
WHERE clause of the statement::
>>> update_stmt = (
- ... update(user_table).
- ... where(user_table.c.id == address_table.c.user_id).
- ... where(address_table.c.email_address == 'patrick@aol.com').
- ... values(fullname='Pat')
- ... )
+ ... update(user_table)
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .where(address_table.c.email_address == "patrick@aol.com")
+ ... .values(fullname="Pat")
+ ... )
>>> print(update_stmt)
{opensql}UPDATE user_account SET fullname=:fullname FROM address
WHERE user_account.id = address.user_id AND address.email_address = :email_address_1
order to refer to additional tables::
>>> update_stmt = (
- ... update(user_table).
- ... where(user_table.c.id == address_table.c.user_id).
- ... where(address_table.c.email_address == 'patrick@aol.com').
- ... values(
- ... {
- ... user_table.c.fullname: "Pat",
- ... address_table.c.email_address: "pat@aol.com"
- ... }
- ... )
- ... )
+ ... update(user_table)
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .where(address_table.c.email_address == "patrick@aol.com")
+ ... .values(
+ ... {
+ ... user_table.c.fullname: "Pat",
+ ... address_table.c.email_address: "pat@aol.com",
+ ... }
+ ... )
+ ... )
>>> from sqlalchemy.dialects import mysql
>>> print(update_stmt.compile(dialect=mysql.dialect()))
{opensql}UPDATE user_account, address
case, the :meth:`_sql.Update.ordered_values` method accepts a sequence of
tuples so that this order may be controlled [2]_::
- >>> update_stmt = (
- ... update(some_table).
- ... ordered_values(
- ... (some_table.c.y, 20),
- ... (some_table.c.x, some_table.c.y + 10)
- ... )
+ >>> update_stmt = update(some_table).ordered_values(
+ ... (some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10)
... )
>>> print(update_stmt)
{opensql}UPDATE some_table SET y=:y, x=(some_table.y + :y_1)
::
>>> from sqlalchemy import delete
- >>> stmt = delete(user_table).where(user_table.c.name == 'patrick')
+ >>> stmt = delete(user_table).where(user_table.c.name == "patrick")
>>> print(stmt)
{opensql}DELETE FROM user_account WHERE user_account.name = :name_1
syntaxes, such as ``DELETE FROM..USING`` on MySQL::
>>> delete_stmt = (
- ... delete(user_table).
- ... where(user_table.c.id == address_table.c.user_id).
- ... where(address_table.c.email_address == 'patrick@aol.com')
- ... )
+ ... delete(user_table)
+ ... .where(user_table.c.id == address_table.c.user_id)
+ ... .where(address_table.c.email_address == "patrick@aol.com")
+ ... )
>>> from sqlalchemy.dialects import mysql
>>> print(delete_stmt.compile(dialect=mysql.dialect()))
{opensql}DELETE FROM user_account USING user_account, address
>>> with engine.begin() as conn:
... result = conn.execute(
- ... update(user_table).
- ... values(fullname="Patrick McStar").
- ... where(user_table.c.name == 'patrick')
+ ... update(user_table)
+ ... .values(fullname="Patrick McStar")
+ ... .where(user_table.c.name == "patrick")
... )
... print(result.rowcount)
{opensql}BEGIN (implicit)
>>> update_stmt = (
- ... update(user_table).where(user_table.c.name == 'patrick').
- ... values(fullname='Patrick the Star').
- ... returning(user_table.c.id, user_table.c.name)
+ ... update(user_table)
+ ... .where(user_table.c.name == "patrick")
+ ... .values(fullname="Patrick the Star")
+ ... .returning(user_table.c.id, user_table.c.name)
... )
>>> print(update_stmt)
{opensql}UPDATE user_account SET fullname=:fullname
RETURNING user_account.id, user_account.name{stop}
>>> delete_stmt = (
- ... delete(user_table).where(user_table.c.name == 'patrick').
- ... returning(user_table.c.id, user_table.c.name)
+ ... delete(user_table)
+ ... .where(user_table.c.name == "patrick")
+ ... .returning(user_table.c.id, user_table.c.name)
... )
>>> print(delete_stmt)
{opensql}DELETE FROM user_account
... conn.execute(text("CREATE TABLE some_table (x int, y int)"))
... conn.execute(
... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"),
- ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}]
+ ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}],
... )
... conn.commit()
{opensql}BEGIN (implicit)
>>> with engine.begin() as conn:
... conn.execute(
... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"),
- ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}]
+ ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}],
... )
{opensql}BEGIN (implicit)
INSERT INTO some_table (x, y) VALUES (?, ?)
.. sourcecode:: pycon+sql
>>> with engine.connect() as conn:
- ... result = conn.execute(
- ... text("SELECT x, y FROM some_table WHERE y > :y"),
- ... {"y": 2}
- ... )
+ ... result = conn.execute(text("SELECT x, y FROM some_table WHERE y > :y"), {"y": 2})
... for row in result:
- ... print(f"x: {row.x} y: {row.y}")
+ ... print(f"x: {row.x} y: {row.y}")
{opensql}BEGIN (implicit)
SELECT x, y FROM some_table WHERE y > ?
[...] (2,)
>>> with engine.connect() as conn:
... conn.execute(
... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"),
- ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}]
+ ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}],
... )
... conn.commit()
{opensql}BEGIN (implicit)
>>> with Session(engine) as session:
... result = session.execute(stmt, {"y": 6})
... for row in result:
- ... print(f"x: {row.x} y: {row.y}")
+ ... print(f"x: {row.x} y: {row.y}")
{opensql}BEGIN (implicit)
SELECT x, y FROM some_table WHERE y > ? ORDER BY x, y
[...] (6,){stop}
>>> with Session(engine) as session:
... result = session.execute(
... text("UPDATE some_table SET y=:y WHERE x=:x"),
- ... [{"x": 9, "y":11}, {"x": 13, "y": 15}]
+ ... [{"x": 9, "y": 11}, {"x": 13, "y": 15}],
... )
... session.commit()
{opensql}BEGIN (implicit)
>>> user_table = Table(
... "user_account",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('name', String(30)),
- ... Column('fullname', String)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("name", String(30)),
+ ... Column("fullname", String),
... )
We can observe that the above :class:`_schema.Table` construct looks a lot like
>>> address_table = Table(
... "address",
... metadata_obj,
- ... Column('id', Integer, primary_key=True),
- ... Column('user_id', ForeignKey('user_account.id'), nullable=False),
- ... Column('email_address', String, nullable=False)
+ ... Column("id", Integer, primary_key=True),
+ ... Column("user_id", ForeignKey("user_account.id"), nullable=False),
+ ... Column("email_address", String, nullable=False),
... )
The table above also features a third kind of constraint, which in SQL is the
>>> from sqlalchemy.orm import relationship
>>> class User(Base):
- ... __tablename__ = 'user_account'
- ...
+ ... __tablename__ = "user_account"
+ ...
... id: Mapped[int] = mapped_column(primary_key=True)
... name: Mapped[str] = mapped_column(String(30))
... fullname: Mapped[Optional[str]]
- ...
+ ...
... addresses: Mapped[List["Address"]] = relationship(back_populates="user")
- ...
+ ...
... def __repr__(self) -> str:
- ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
+ ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})"
>>> class Address(Base):
- ... __tablename__ = 'address'
- ...
+ ... __tablename__ = "address"
+ ...
... id: Mapped[int] = mapped_column(primary_key=True)
... email_address: Mapped[str]
- ... user_id = mapped_column(ForeignKey('user_account.id'))
- ...
+ ... user_id = mapped_column(ForeignKey("user_account.id"))
+ ...
... user: Mapped[User] = relationship(back_populates="addresses")
- ...
+ ...
... def __repr__(self) -> str:
... return f"Address(id={self.id!r}, email_address={self.email_address!r})"
Base.metadata.create_all(engine)
-
Combining Core Table Declarations with ORM Declarative
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
class Base(DeclarativeBase):
pass
+
class User(Base):
__table__ = user_table
def __repr__(self):
return f"User({self.name!r}, {self.fullname!r})"
+
class Address(Base):
__table__ = address_table
.. sourcecode:: pycon+sql
- >>> sandy_fullname = session.execute(
- ... select(User.fullname).where(User.id == 2)
- ... ).scalar_one()
+ >>> sandy_fullname = session.execute(select(User.fullname).where(User.id == 2)).scalar_one()
{opensql}UPDATE user_account SET fullname=? WHERE user_account.id = ?
[...] ('Sandy Squirrel', 2)
SELECT user_account.fullname
.. sourcecode:: python
- from sqlalchemy.orm import Mapped
- from sqlalchemy.orm import relationship
+ from sqlalchemy.orm import Mapped
+ from sqlalchemy.orm import relationship
- class User(Base):
- __tablename__ = 'user_account'
- # ... mapped_column() mappings
+ class User(Base):
+ __tablename__ = "user_account"
- addresses: Mapped[list["Address"]] = relationship(back_populates="user")
+ # ... mapped_column() mappings
+ addresses: Mapped[list["Address"]] = relationship(back_populates="user")
- class Address(Base):
- __tablename__ = 'address'
- # ... mapped_column() mappings
+ class Address(Base):
+ __tablename__ = "address"
- user: Mapped["User"] = relationship(back_populates="addresses")
+ # ... mapped_column() mappings
+ user: Mapped["User"] = relationship(back_populates="addresses")
Above, the ``User`` class now has an attribute ``User.addresses`` and the
``Address`` class has an attribute ``Address.user``. The
of objects. If we make a new ``User`` object, we can note that there is a
Python list when we access the ``.addresses`` element::
- >>> u1 = User(name='pkrabs', fullname='Pearl Krabs')
+ >>> u1 = User(name="pkrabs", fullname="Pearl Krabs")
>>> u1.addresses
[]
argument** to :meth:`_sql.Select.join`, where it serves to indicate both the
right side of the join as well as the ON clause at once::
- >>> print(
- ... select(Address.email_address).
- ... select_from(User).
- ... join(User.addresses)
- ... )
+ >>> print(select(Address.email_address).select_from(User).join(User.addresses))
{opensql}SELECT address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
between the two mapped :class:`_schema.Table` objects, not because of the
:func:`_orm.relationship` objects on the ``User`` and ``Address`` classes::
- >>> print(
- ... select(Address.email_address).
- ... join_from(User, Address)
- ... )
+ >>> print(select(Address.email_address).join_from(User, Address))
{opensql}SELECT address.email_address
FROM user_account JOIN address ON user_account.id = address.user_id
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import relationship
+
class User(Base):
- __tablename__ = 'user_account'
+ __tablename__ = "user_account"
- addresses: Mapped[list["Address"]] = relationship(back_populates="user", lazy="selectin")
+ addresses: Mapped[list["Address"]] = relationship(
+ back_populates="user", lazy="selectin"
+ )
Each loader strategy object adds some kind of information to the statement that
will be used later by the :class:`_orm.Session` when it is deciding how various
.. sourcecode:: pycon+sql
>>> from sqlalchemy.orm import selectinload
- >>> stmt = (
- ... select(User).options(selectinload(User.addresses)).order_by(User.id)
- ... )
+ >>> stmt = select(User).options(selectinload(User.addresses)).order_by(User.id)
>>> for row in session.execute(stmt):
- ... print(f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})")
+ ... print(
+ ... f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})"
+ ... )
{opensql}SELECT user_account.id, user_account.name, user_account.fullname
FROM user_account ORDER BY user_account.id
[...] ()
>>> from sqlalchemy.orm import joinedload
>>> stmt = (
- ... select(Address).options(joinedload(Address.user, innerjoin=True)).order_by(Address.id)
+ ... select(Address)
+ ... .options(joinedload(Address.user, innerjoin=True))
+ ... .order_by(Address.id)
... )
>>> for row in session.execute(stmt):
... print(f"{row.Address.email_address} {row.Address.user.name}")
>>> from sqlalchemy.orm import contains_eager
>>> stmt = (
- ... select(Address).
- ... join(Address.user).
- ... where(User.name == 'pkrabs').
- ... options(contains_eager(Address.user)).order_by(Address.id)
+ ... select(Address)
+ ... .join(Address.user)
+ ... .where(User.name == "pkrabs")
+ ... .options(contains_eager(Address.user))
+ ... .order_by(Address.id)
... )
>>> for row in session.execute(stmt):
... print(f"{row.Address.email_address} {row.Address.user.name}")
SQL query that unnecessarily joins twice::
>>> stmt = (
- ... select(Address).
- ... join(Address.user).
- ... where(User.name == 'pkrabs').
- ... options(joinedload(Address.user)).order_by(Address.id)
+ ... select(Address)
+ ... .join(Address.user)
+ ... .where(User.name == "pkrabs")
+ ... .options(joinedload(Address.user))
+ ... .order_by(Address.id)
... )
>>> print(stmt) # SELECT has a JOIN and LEFT OUTER JOIN unnecessarily
{opensql}SELECT address.id, address.email_address, address.user_id,
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import relationship
+
class User(Base):
- __tablename__ = 'user_account'
+ __tablename__ = "user_account"
# ... mapped_column() mappings
- addresses: Mapped[list["Address"]] = relationship(back_populates="user", lazy="raise_on_sql")
+ addresses: Mapped[list["Address"]] = relationship(
+ back_populates="user", lazy="raise_on_sql"
+ )
class Address(Base):
- __tablename__ = 'address'
+ __tablename__ = "address"
# ... mapped_column() mappings
user: Mapped["User"] = relationship(back_populates="addresses", lazy="raise_on_sql")
-
Using such a mapping, the application is blocked from lazy loading,
indicating that a particular query would need to specify a loader strategy:
--- /dev/null
+from argparse import ArgumentParser
+from argparse import RawDescriptionHelpFormatter
+from collections.abc import Iterator
+from pathlib import Path
+import re
+
+from black import DEFAULT_LINE_LENGTH
+from black import format_str
+from black import Mode
+from black import parse_pyproject_toml
+from black import TargetVersion
+
+
+home = Path(__file__).parent.parent
+
+_Block = list[tuple[str, int, str | None, str]]
+
+
+def _format_block(
+ input_block: _Block, exit_on_error: bool, is_doctest: bool
+) -> list[str]:
+ code = "\n".join(c for *_, c in input_block)
+ try:
+ formatted = format_str(code, mode=BLACK_MODE)
+ except Exception as e:
+ if is_doctest:
+ start_line = input_block[0][1]
+ print(
+ "Could not format code block starting at "
+ f"line {start_line}:\n{code}\nError: {e}"
+ )
+ if exit_on_error:
+ print("Exiting since --exit-on-error was passed")
+ raise
+ else:
+ print("Ignoring error")
+ elif VERBOSE:
+ start_line = input_block[0][1]
+ print(
+ "Could not format code block starting at "
+ f"line {start_line}:\n---\n{code}\n---Error: {e}"
+ )
+ return [line for line, *_ in input_block]
+ else:
+ formatted_code_lines = formatted.splitlines()
+ padding = input_block[0][2]
+ if is_doctest:
+ formatted_lines = [
+ f"{padding}>>> {formatted_code_lines[0]}",
+ *(f"{padding}... {fcl}" for fcl in formatted_code_lines[1:]),
+ ]
+ else:
+ # The first line may have additional padding.
+ # If it does restore it
+ additionalPadding = re.match(
+ r"^(\s*)[^ ]?", input_block[0][3]
+ ).groups()[0]
+ formatted_lines = [
+ f"{padding}{additionalPadding}{fcl}" if fcl else fcl
+ for fcl in formatted_code_lines
+ ]
+ if not input_block[-1][0] and formatted_lines[-1]:
+ # last line was empty and black removed it. restore it
+ formatted_lines.append("")
+ return formatted_lines
+
+
+doctest_code_start = re.compile(r"^(\s+)>>>\s?(.+)")
+doctest_code_continue = re.compile(r"^\s+\.\.\.\s?(\s*.*)")
+plain_indent = re.compile(r"^(\s{4})(\s*[^: ].*)")
+format_directive = re.compile(r"^\.\.\s*format\s*:\s*(on|off)\s*$")
+dont_format_under_directive = re.compile(r"^\.\. (?:toctree)::\s*$")
+
+
+def format_file(
+ file: Path, exit_on_error: bool, check: bool, no_plain: bool
+) -> bool | None:
+ buffer = []
+ if not check:
+ print(f"Running file {file} ..", end="")
+ original = file.read_text("utf-8")
+ doctest_block: _Block | None = None
+ plain_block: _Block | None = None
+ last_line = None
+ disable_format = False
+ non_code_directive = False
+ for line_no, line in enumerate(original.splitlines(), 1):
+ if match := format_directive.match(line):
+ disable_format = match.groups()[0] == "off"
+ elif match := dont_format_under_directive.match(line):
+ non_code_directive = True
+
+ if doctest_block:
+ assert not plain_block
+ if match := doctest_code_continue.match(line):
+ doctest_block.append((line, line_no, None, match.groups()[0]))
+ continue
+ else:
+ buffer.extend(
+ _format_block(
+ doctest_block, exit_on_error, is_doctest=True
+ )
+ )
+ doctest_block = None
+
+ if plain_block:
+ assert not doctest_block
+ if not line:
+ plain_block.append((line, line_no, None, line))
+ continue
+ elif match := plain_indent.match(line):
+ plain_block.append((line, line_no, None, match.groups()[1]))
+ continue
+ else:
+ if non_code_directive:
+ buffer.extend(line for line, _, _, _ in plain_block)
+ else:
+ buffer.extend(
+ _format_block(
+ plain_block, exit_on_error, is_doctest=False
+ )
+ )
+ plain_block = None
+ non_code_directive = False
+
+ if match := doctest_code_start.match(line):
+ if plain_block:
+ buffer.extend(
+ _format_block(plain_block, exit_on_error, is_doctest=False)
+ )
+ plain_block = None
+ padding, code = match.groups()
+ doctest_block = [(line, line_no, padding, code)]
+ elif (
+ not no_plain
+ and not disable_format
+ and not last_line
+ and (match := plain_indent.match(line))
+ ):
+ # print('start plain', line)
+ assert not doctest_block
+ # start of a plain block
+ padding, code = match.groups()
+ plain_block = [(line, line_no, padding, code)]
+ else:
+ buffer.append(line)
+ last_line = line
+
+ if doctest_block:
+ buffer.extend(
+ _format_block(doctest_block, exit_on_error, is_doctest=True)
+ )
+ if plain_block:
+ if non_code_directive:
+ buffer.extend(line for line, _, _, _ in plain_block)
+ else:
+ buffer.extend(
+ _format_block(plain_block, exit_on_error, is_doctest=False)
+ )
+ if buffer:
+ # if there is nothing in the buffer something strange happened so
+ # don't do anything
+ buffer.append("")
+ updated = "\n".join(buffer)
+ equal = original == updated
+ if not check:
+ print("..done. ", "No changes" if equal else "Changes detected")
+ if not equal:
+ # write only if there are changes to write
+ file.write_text(updated, "utf-8", newline="\n")
+ else:
+ if not check:
+ print(".. Nothing to write")
+ equal = bool(original) is False
+
+ if check:
+ if not equal:
+ print(f"File {file} would be formatted")
+ return equal
+ else:
+ return None
+
+
+def iter_files(directory) -> Iterator[Path]:
+ yield from (home / directory).glob("./**/*.rst")
+
+
+def main(
+ file: str | None,
+ directory: str,
+ exit_on_error: bool,
+ check: bool,
+ no_plain: bool,
+):
+ if file is not None:
+ result = [format_file(Path(file), exit_on_error, check, no_plain)]
+ else:
+ result = [
+ format_file(doc, exit_on_error, check, no_plain)
+ for doc in iter_files(directory)
+ ]
+
+ if check:
+ if all(result):
+ print("All files are correctly formatted")
+ exit(0)
+ else:
+ print("Some file would be reformated")
+ exit(1)
+
+
+if __name__ == "__main__":
+ parser = ArgumentParser(
+ description="""Formats code inside docs using black. Supports \
+doctest code blocks and also tries to format plain code block identifies as \
+all indented blocks of at least 4 spaces, unless '--no-plain' is specified.
+
+Plain code block may lead to false positive. To disable formatting on a \
+file section the comment ``.. format: off`` disables formatting until \
+``.. format: on`` is encountered or the file ends.
+Another alterative is to use less than 4 spaces to indent the code block.
+""",
+ formatter_class=RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "-f", "--file", help="Format only this file instead of all docs"
+ )
+ parser.add_argument(
+ "-d",
+ "--directory",
+ help="Find documents in this directory and its sub dirs",
+ default="doc/build",
+ )
+ parser.add_argument(
+ "-c",
+ "--check",
+ help="Don't write the files back, just return the "
+ "status. Return code 0 means nothing would change. "
+ "Return code 1 means some files would be reformatted.",
+ action="store_true",
+ )
+ parser.add_argument(
+ "-e",
+ "--exit-on-error",
+ help="Exit in case of black format error instead of ignoring it. "
+ "This option is only valid for doctest code blocks",
+ action="store_true",
+ )
+ parser.add_argument(
+ "-l",
+ "--project-line-length",
+ help="Configure the line length to the project value instead "
+ "of using the black default of 88",
+ action="store_true",
+ )
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ help="Increase verbosity",
+ action="store_true",
+ )
+ parser.add_argument(
+ "-n",
+ "--no-plain",
+ help="Disable plain code blocks formatting that's more difficult "
+ "to parse compared to doctest code blocks",
+ action="store_true",
+ )
+ args = parser.parse_args()
+
+ config = parse_pyproject_toml(home / "pyproject.toml")
+ BLACK_MODE = Mode(
+ target_versions=set(
+ TargetVersion[val.upper()]
+ for val in config.get("target_version", [])
+ ),
+ line_length=config.get("line_length", DEFAULT_LINE_LENGTH)
+ if args.project_line_length
+ else DEFAULT_LINE_LENGTH,
+ )
+ VERBOSE = args.verbose
+
+ main(
+ args.file,
+ args.directory,
+ args.exit_on_error,
+ args.check,
+ args.no_plain,
+ )
# test with cython and without cython exts running
slotscheck -m sqlalchemy
env DISABLE_SQLALCHEMY_CEXT_RUNTIME=1 slotscheck -m sqlalchemy
+ python ./tools/format_docs_code.py --check
# "pep8" env was renamed to "lint".